[PATCH v5 21/28] KVM: arm64: Flush register state on writes to SVCR.SM and SVCR.ZA
Mark Brown
broonie at kernel.org
Wed Apr 16 17:25:25 PDT 2025
Writes to the physical SVCR.SM and SVCR.ZA change the state of PSTATE.SM
and PSTATE.ZA, causing other floating point state to reset. Emulate this
behaviour for writes done via the KVM userspace ABI.
Setting PSTATE.ZA to 1 causes ZA and ZT0 to be reset to 0, these are stored
in sme_state. Setting PSTATE.ZA to 0 causes ZA and ZT0 to become inaccesible
so no reset is needed.
Any change in PSTATE.SM causes the V, Z, P, FFR and FPMR registers to be
reset to 0 and FPSR to be reset to 0x800009f.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/include/asm/kvm_host.h | 18 ++++++++++++++++++
arch/arm64/kvm/sys_regs.c | 29 ++++++++++++++++++++++++++++-
2 files changed, 46 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 09351c14032f..ef8b60222ac9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1066,6 +1066,24 @@ struct kvm_vcpu_arch {
__size_ret; \
})
+#define vcpu_sme_state(vcpu) (kern_hyp_va((vcpu)->arch.sme_state))
+
+#define vcpu_sme_state_size(vcpu) ({ \
+ size_t __size_ret; \
+ unsigned int __vcpu_vq; \
+ \
+ if (WARN_ON(!sve_vl_valid((vcpu)->arch.max_vl[ARM64_VEC_SME]))) { \
+ __size_ret = 0; \
+ } else { \
+ __vcpu_vq = vcpu_sme_max_vq(vcpu); \
+ __size_ret = ZA_SIG_REGS_SIZE(__vcpu_vq); \
+ if (vcpu_has_sme2(vcpu)) \
+ __size_ret += ZT_SIG_REG_SIZE; \
+ } \
+ \
+ __size_ret; \
+})
+
/*
* Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
* memory backed version of a register, and not the one most recently
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7cfc0c2801ed..0f08f791e3fe 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -806,6 +806,33 @@ static bool access_smidr(struct kvm_vcpu *vcpu,
return true;
}
+static int set_svcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ u64 val)
+{
+ u64 old = __vcpu_sys_reg(vcpu, rd->reg);
+
+ if (val & SVCR_RES0)
+ return -EINVAL;
+
+ if ((val & SVCR_ZA) && !(old & SVCR_ZA) && vcpu->arch.sme_state)
+ memset(vcpu->arch.sme_state, 0, vcpu_sme_state_size(vcpu));
+
+ if ((val & SVCR_SM) != (old & SVCR_SM)) {
+ memset(vcpu->arch.ctxt.fp_regs.vregs, 0,
+ sizeof(vcpu->arch.ctxt.fp_regs.vregs));
+
+ if (vcpu->arch.sve_state)
+ memset(vcpu->arch.sve_state, 0,
+ vcpu_sve_state_size(vcpu));
+
+ __vcpu_sys_reg(vcpu, FPMR) = 0;
+ vcpu->arch.ctxt.fp_regs.fpsr = 0x800009f;
+ }
+
+ __vcpu_sys_reg(vcpu, rd->reg) = val;
+ return 0;
+}
+
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
@@ -3115,7 +3142,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
CTR_EL0_DminLine_MASK |
CTR_EL0_L1Ip_MASK |
CTR_EL0_IminLine_MASK),
- { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
+ { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility, .set_user = set_svcr },
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
--
2.39.5
More information about the linux-arm-kernel
mailing list