[PATCH 17/18] KVM: arm64: nv: Filter out unsupported features from ID regs

Marc Zyngier maz at kernel.org
Thu Feb 9 09:58:19 PST 2023


As there is a number of features that we either can't support,
or don't want to support right away with NV, let's add some
basic filtering so that we don't advertize silly things to the
EL2 guest.

Whilst we are at it, advertize FEAT_TTL as well as FEAT_GTG, which
the NV implementation will implement.

Reviewed-by: Ganapatrao Kulkarni <gankulkarni at os.amperecomputing.com>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
 arch/arm64/include/asm/kvm_nested.h |   6 ++
 arch/arm64/kvm/Makefile             |   2 +-
 arch/arm64/kvm/nested.c             | 162 ++++++++++++++++++++++++++++
 arch/arm64/kvm/sys_regs.c           |   3 +
 4 files changed, 172 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kvm/nested.c

diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index fd601ea68d13..8fb67f032fd1 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -11,4 +11,10 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
 		test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
 }
 
+struct sys_reg_params;
+struct sys_reg_desc;
+
+void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
+			  const struct sys_reg_desc *r);
+
 #endif /* __ARM64_KVM_NESTED_H */
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 31b07f2b2186..c0c050e53157 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -14,7 +14,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
 	 inject_fault.o va_layout.o handle_exit.o \
 	 guest.o debug.o reset.o sys_regs.o stacktrace.o \
 	 vgic-sys-reg-v3.o fpsimd.o pkvm.o \
-	 arch_timer.o trng.o vmid.o emulate-nested.o \
+	 arch_timer.o trng.o vmid.o emulate-nested.o nested.o \
 	 vgic/vgic.o vgic/vgic-init.o \
 	 vgic/vgic-irqfd.o vgic/vgic-v2.o \
 	 vgic/vgic-v3.o vgic/vgic-v4.o \
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
new file mode 100644
index 000000000000..f7ec27c27a4f
--- /dev/null
+++ b/arch/arm64/kvm/nested.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 - Columbia University and Linaro Ltd.
+ * Author: Jintack Lim <jintack.lim at linaro.org>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
+#include <asm/sysreg.h>
+
+#include "sys_regs.h"
+
+/* Protection against the sysreg repainting madness... */
+#define NV_FTR(r, f)		ID_AA64##r##_EL1_##f
+
+/*
+ * Our emulated CPU doesn't support all the possible features. For the
+ * sake of simplicity (and probably mental sanity), wipe out a number
+ * of feature bits we don't intend to support for the time being.
+ * This list should get updated as new features get added to the NV
+ * support, and new extension to the architecture.
+ */
+void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
+			  const struct sys_reg_desc *r)
+{
+	u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
+			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+	u64 val, tmp;
+
+	val = p->regval;
+
+	switch (id) {
+	case SYS_ID_AA64ISAR0_EL1:
+		/* Support everything but TME, O.S. and Range TLBIs */
+		val &= ~(NV_FTR(ISAR0, TLB)		|
+			 NV_FTR(ISAR0, TME));
+		break;
+
+	case SYS_ID_AA64ISAR1_EL1:
+		/* Support everything but PtrAuth and Spec Invalidation */
+		val &= ~(GENMASK_ULL(63, 56)	|
+			 NV_FTR(ISAR1, SPECRES)	|
+			 NV_FTR(ISAR1, GPI)	|
+			 NV_FTR(ISAR1, GPA)	|
+			 NV_FTR(ISAR1, API)	|
+			 NV_FTR(ISAR1, APA));
+		break;
+
+	case SYS_ID_AA64PFR0_EL1:
+		/* No AMU, MPAM, S-EL2, RAS or SVE */
+		val &= ~(GENMASK_ULL(55, 52)	|
+			 NV_FTR(PFR0, AMU)	|
+			 NV_FTR(PFR0, MPAM)	|
+			 NV_FTR(PFR0, SEL2)	|
+			 NV_FTR(PFR0, RAS)	|
+			 NV_FTR(PFR0, SVE)	|
+			 NV_FTR(PFR0, EL3)	|
+			 NV_FTR(PFR0, EL2)	|
+			 NV_FTR(PFR0, EL1));
+		/* 64bit EL1/EL2/EL3 only */
+		val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
+		val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
+		val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
+		break;
+
+	case SYS_ID_AA64PFR1_EL1:
+		/* Only support SSBS */
+		val &= NV_FTR(PFR1, SSBS);
+		break;
+
+	case SYS_ID_AA64MMFR0_EL1:
+		/* Hide ECV, FGT, ExS, Secure Memory */
+		val &= ~(GENMASK_ULL(63, 43)		|
+			 NV_FTR(MMFR0, TGRAN4_2)	|
+			 NV_FTR(MMFR0, TGRAN16_2)	|
+			 NV_FTR(MMFR0, TGRAN64_2)	|
+			 NV_FTR(MMFR0, SNSMEM));
+
+		/* Disallow unsupported S2 page sizes */
+		switch (PAGE_SIZE) {
+		case SZ_64K:
+			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
+			fallthrough;
+		case SZ_16K:
+			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
+			fallthrough;
+		case SZ_4K:
+			/* Support everything */
+			break;
+		}
+		/*
+		 * Since we can't support a guest S2 page size smaller than
+		 * the host's own page size (due to KVM only populating its
+		 * own S2 using the kernel's page size), advertise the
+		 * limitation using FEAT_GTG.
+		 */
+		switch (PAGE_SIZE) {
+		case SZ_4K:
+			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
+			fallthrough;
+		case SZ_16K:
+			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
+			fallthrough;
+		case SZ_64K:
+			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
+			break;
+		}
+		/* Cap PARange to 48bits */
+		tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
+		if (tmp > 0b0101) {
+			val &= ~NV_FTR(MMFR0, PARANGE);
+			val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
+		}
+		break;
+
+	case SYS_ID_AA64MMFR1_EL1:
+		val &= (NV_FTR(MMFR1, PAN)	|
+			NV_FTR(MMFR1, LO)	|
+			NV_FTR(MMFR1, HPDS)	|
+			NV_FTR(MMFR1, VH)	|
+			NV_FTR(MMFR1, VMIDBits));
+		break;
+
+	case SYS_ID_AA64MMFR2_EL1:
+		val &= ~(NV_FTR(MMFR2, EVT)	|
+			 NV_FTR(MMFR2, BBM)	|
+			 NV_FTR(MMFR2, TTL)	|
+			 GENMASK_ULL(47, 44)	|
+			 NV_FTR(MMFR2, ST)	|
+			 NV_FTR(MMFR2, CCIDX)	|
+			 NV_FTR(MMFR2, VARange));
+
+		/* Force TTL support */
+		val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
+		break;
+
+	case SYS_ID_AA64DFR0_EL1:
+		/* Only limited support for PMU, Debug, BPs and WPs */
+		val &= (NV_FTR(DFR0, PMUVer)	|
+			NV_FTR(DFR0, WRPs)	|
+			NV_FTR(DFR0, BRPs)	|
+			NV_FTR(DFR0, DebugVer));
+
+		/* Cap Debug to ARMv8.1 */
+		tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
+		if (tmp > 0b0111) {
+			val &= ~NV_FTR(DFR0, DebugVer);
+			val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
+		}
+		break;
+
+	default:
+		/* Unknown register, just wipe it clean */
+		val = 0;
+		break;
+	}
+
+	p->regval = val;
+}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f5dd4f4eaaf0..82c1f8d786f7 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1223,6 +1223,9 @@ static bool access_id_reg(struct kvm_vcpu *vcpu,
 		return write_to_read_only(vcpu, p, r);
 
 	p->regval = read_id_reg(vcpu, r);
+	if (vcpu_has_nv(vcpu))
+		access_nested_id_reg(vcpu, p, r);
+
 	return true;
 }
 
-- 
2.34.1




More information about the linux-arm-kernel mailing list