[RFC] KVM: arm64: Support FEAT_CCIDX

Shaokun Zhang zhangshaokun at hisilicon.com
Sat Apr 10 09:16:37 BST 2021


CCSIDR_EL1 can be implemented as 64-bit format inferred by CCIDX field
in ID_AA64MMFR2_EL1. The bits of Numsets and Associativity are different
from the 32-bit format. Let's support this feature.

Cc: Marc Zyngier <maz at kernel.org>
Cc: James Morse <james.morse at arm.com>
Cc: Alexandru Elisei <alexandru.elisei at arm.com>
Cc: Suzuki K Poulose <suzuki.poulose at arm.com>
Signed-off-by: Shaokun Zhang <zhangshaokun at hisilicon.com>
---
 arch/arm64/kvm/sys_regs.c | 27 +++++++++++++++++++--------
 1 file changed, 19 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 52fdb9a015a4..0dc822cef20b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -18,6 +18,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
+#include <asm/cpufeature.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
 #include <asm/kvm_arm.h>
@@ -95,9 +96,9 @@ static u32 cache_levels;
 #define CSSELR_MAX 14
 
 /* Which cache CCSIDR represents depends on CSSELR value. */
-static u32 get_ccsidr(u32 csselr)
+static u64 get_ccsidr(u32 csselr)
 {
-	u32 ccsidr;
+	u64 ccsidr;
 
 	/* Make sure noone else changes CSSELR during this! */
 	local_irq_disable();
@@ -1275,12 +1276,16 @@ static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 			  const struct sys_reg_desc *r)
 {
-	u32 csselr;
+	u32 csselr, ccidx;
+	u64 mmfr2;
 
 	if (p->is_write)
 		return write_to_read_only(vcpu, p, r);
 
 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
+	mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+	ccidx = cpuid_feature_extract_unsigned_field(mmfr2,
+						     ID_AA64MMFR2_CCIDX_SHIFT);
 	p->regval = get_ccsidr(csselr);
 
 	/*
@@ -1295,8 +1300,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	 * geometry (which is not permitted by the architecture), they would
 	 * only do so for virtually indexed caches.]
 	 */
-	if (!(csselr & 1)) // data or unified cache
-		p->regval &= ~GENMASK(27, 3);
+	if (!(csselr & 1)) { // data or unified cache
+		if (ccidx)
+			p->regval &= ~(GENMASK(23, 3) | GENMASK(55, 32));
+		else
+			p->regval &= ~GENMASK(27, 3);
+	}
+
 	return true;
 }
 
@@ -2521,7 +2531,7 @@ static bool is_valid_cache(u32 val)
 static int demux_c15_get(u64 id, void __user *uaddr)
 {
 	u32 val;
-	u32 __user *uval = uaddr;
+	u64 __user *uval = uaddr;
 
 	/* Fail if we have unknown bits set. */
 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
@@ -2545,8 +2555,9 @@ static int demux_c15_get(u64 id, void __user *uaddr)
 
 static int demux_c15_set(u64 id, void __user *uaddr)
 {
-	u32 val, newval;
-	u32 __user *uval = uaddr;
+	u32 val;
+	u64 newval;
+	u64 __user *uval = uaddr;
 
 	/* Fail if we have unknown bits set. */
 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
-- 
2.7.4




More information about the linux-arm-kernel mailing list