[PATCH v8 25/38] KVM: arm64: Trap SME usage in guest
Mark Brown
broonie at kernel.org
Mon Jan 24 16:11:01 PST 2022
SME defines two new traps which need to be enabled for guests to ensure
that they can't use SME, one for the main SME operations which mirrors the
traps for SVE and another for access to TPIDR2 in SCTLR_EL2.
For VHE manage SMEN along with ZEN in activate_traps() and the FP state
management callbacks.
For nVHE the value to be used for CPTR_EL2 in the guest is stored in
vcpu->arch.cptr_el2, set TSM there during initialisation. It will be
cleared in __deactivate_traps_common() by virtue of not being set in
CPTR_EL2_DEFAULT.
For both VHE and nVHE cases handle SCTLR_EL2.EnTPIDR2 in the shared
__active_traps_common() and __deactivate_traps_common(), there is no
existing dynamic management of SCTLR_EL2.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/kvm/hyp/nvhe/switch.c | 30 ++++++++++++++++++++++++++++++
arch/arm64/kvm/hyp/vhe/switch.c | 10 +++++++++-
2 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6410d21d8695..184bf6bd79b9 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -47,10 +47,25 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
+ if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+ if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME) &&
+ cpus_have_final_cap(ARM64_HAS_FGT)) {
+ val = read_sysreg_s(SYS_HFGRTR_EL2);
+ val &= ~(HFGxTR_EL2_nTPIDR_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK);
+ write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+ val = read_sysreg_s(SYS_HFGWTR_EL2);
+ val &= ~(HFGxTR_EL2_nTPIDR_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK);
+ write_sysreg_s(val, SYS_HFGWTR_EL2);
+ }
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -94,9 +109,24 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+ if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME) &&
+ cpus_have_final_cap(ARM64_HAS_FGT)) {
+ u64 val;
+
+ val = read_sysreg_s(SYS_HFGRTR_EL2);
+ val |= HFGxTR_EL2_nTPIDR_EL0_MASK | HFGxTR_EL2_nSMPRI_EL1_MASK;
+ write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+ val = read_sysreg_s(SYS_HFGWTR_EL2);
+ val |= HFGxTR_EL2_nTPIDR_EL0_MASK | HFGxTR_EL2_nSMPRI_EL1_MASK;
+ write_sysreg_s(val, SYS_HFGWTR_EL2);
+ }
+
cptr = CPTR_EL2_DEFAULT;
if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
cptr |= CPTR_EL2_TZ;
+ if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ cptr &= ~CPTR_EL2_TSM;
write_sysreg(cptr, cptr_el2);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 11d053fdd604..f5630579f577 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -38,7 +38,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~CPACR_EL1_ZEN;
+ val &= ~(CPACR_EL1_ZEN | CPACR_EL1_SMEN);
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -59,6 +59,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
+ if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
+ sctlr_el2);
+
write_sysreg(val, cpacr_el1);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
@@ -80,6 +84,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
+ if (IS_ENABLED(CONFIG_ARM64_SME) && cpus_have_final_cap(ARM64_SME))
+ write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
+ sctlr_el2);
+
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
--
2.30.2
More information about the linux-arm-kernel
mailing list