Patch "KVM: arm64: Calculate cptr_el2 traps on activating traps" has been added to the 6.13-stable tree
gregkh at linuxfoundation.org
gregkh at linuxfoundation.org
Mon Mar 24 11:56:28 PDT 2025
This is a note to let you know that I've just added the patch titled
KVM: arm64: Calculate cptr_el2 traps on activating traps
to the 6.13-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
The filename of the patch is:
kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
and it can be found in the queue-6.13 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable at vger.kernel.org> know about it.
>From stable+bounces-125698-greg=kroah.com at vger.kernel.org Thu Mar 20 17:11:15 2025
From: Mark Brown <broonie at kernel.org>
Date: Fri, 21 Mar 2025 00:10:10 +0000
Subject: KVM: arm64: Calculate cptr_el2 traps on activating traps
To: Greg Kroah-Hartman <gregkh at linuxfoundation.org>, Marc Zyngier <maz at kernel.org>, Oliver Upton <oliver.upton at linux.dev>, Joey Gouly <joey.gouly at arm.com>, Suzuki K Poulose <suzuki.poulose at arm.com>, Catalin Marinas <catalin.marinas at arm.com>, Will Deacon <will at kernel.org>
Cc: linux-arm-kernel at lists.infradead.org, kvmarm at lists.linux.dev, linux-kernel at vger.kernel.org, stable at vger.kernel.org, Mark Brown <broonie at kernel.org>, Fuad Tabba <tabba at google.com>, James Clark <james.clark at linaro.org>
Message-ID: <20250321-stable-sve-6-13-v2-1-3150e3370c40 at kernel.org>
From: Fuad Tabba <tabba at google.com>
[ Upstream commit 2fd5b4b0e7b440602455b79977bfa64dea101e6c ]
Similar to VHE, calculate the value of cptr_el2 from scratch on
activate traps. This removes the need to store cptr_el2 in every
vcpu structure. Moreover, some traps, such as whether the guest
owns the fp registers, need to be set on every vcpu run.
Reported-by: James Clark <james.clark at linaro.org>
Fixes: 5294afdbf45a ("KVM: arm64: Exclude FP ownership from kvm_vcpu_arch")
Signed-off-by: Fuad Tabba <tabba at google.com>
Link: https://lore.kernel.org/r/20241216105057.579031-13-tabba@google.com
Signed-off-by: Marc Zyngier <maz at kernel.org>
Signed-off-by: Mark Brown <broonie at kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
---
arch/arm64/include/asm/kvm_host.h | 1
arch/arm64/kvm/arm.c | 1
arch/arm64/kvm/hyp/nvhe/pkvm.c | 30 ----------------------
arch/arm64/kvm/hyp/nvhe/switch.c | 51 +++++++++++++++++++++++---------------
4 files changed, 32 insertions(+), 51 deletions(-)
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -708,7 +708,6 @@ struct kvm_vcpu_arch {
u64 hcr_el2;
u64 hcrx_el2;
u64 mdcr_el2;
- u64 cptr_el2;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1569,7 +1569,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init
}
vcpu_reset_hcr(vcpu);
- vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
/*
* Handle the "start in power-off" case.
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -31,8 +31,6 @@ static void pvm_init_traps_aa64pfr0(stru
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
u64 hcr_set = HCR_RW;
u64 hcr_clear = 0;
- u64 cptr_set = 0;
- u64 cptr_clear = 0;
/* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@@ -62,21 +60,10 @@ static void pvm_init_traps_aa64pfr0(stru
/* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN;
- cptr_set |= CPTR_EL2_TAM;
- }
-
- /* Trap SVE */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
- if (has_hvhe())
- cptr_clear |= CPACR_ELx_ZEN;
- else
- cptr_set |= CPTR_EL2_TZ;
}
vcpu->arch.hcr_el2 |= hcr_set;
vcpu->arch.hcr_el2 &= ~hcr_clear;
- vcpu->arch.cptr_el2 |= cptr_set;
- vcpu->arch.cptr_el2 &= ~cptr_clear;
}
/*
@@ -106,7 +93,6 @@ static void pvm_init_traps_aa64dfr0(stru
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
u64 mdcr_set = 0;
u64 mdcr_clear = 0;
- u64 cptr_set = 0;
/* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
@@ -133,21 +119,12 @@ static void pvm_init_traps_aa64dfr0(stru
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF;
- /* Trap Trace */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
- if (has_hvhe())
- cptr_set |= CPACR_EL1_TTA;
- else
- cptr_set |= CPTR_EL2_TTA;
- }
-
/* Trap External Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
mdcr_clear |= MDCR_EL2_E2TB_MASK;
vcpu->arch.mdcr_el2 |= mdcr_set;
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
- vcpu->arch.cptr_el2 |= cptr_set;
}
/*
@@ -198,10 +175,6 @@ static void pvm_init_trap_regs(struct kv
/* Clear res0 and set res1 bits to trap potential new features. */
vcpu->arch.hcr_el2 &= ~(HCR_RES0);
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
- if (!has_hvhe()) {
- vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
- vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
- }
}
static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
@@ -236,7 +209,6 @@ static void pkvm_vcpu_reset_hcr(struct k
*/
static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
vcpu->arch.mdcr_el2 = 0;
pkvm_vcpu_reset_hcr(vcpu);
@@ -693,8 +665,6 @@ unlock:
return ret;
}
- hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
-
return 0;
}
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -36,33 +36,46 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_ve
extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
+ u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
- ___activate_traps(vcpu, vcpu->arch.hcr_el2);
- __activate_traps_common(vcpu);
+ if (has_hvhe()) {
+ val |= CPACR_ELx_TTA;
- val = vcpu->arch.cptr_el2;
- val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
- val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
- if (cpus_have_final_cap(ARM64_SME)) {
- if (has_hvhe())
- val &= ~CPACR_ELx_SMEN;
- else
- val |= CPTR_EL2_TSM;
- }
+ if (guest_owns_fp_regs()) {
+ val |= CPACR_ELx_FPEN;
+ if (vcpu_has_sve(vcpu))
+ val |= CPACR_ELx_ZEN;
+ }
+ } else {
+ val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
+
+ /*
+ * Always trap SME since it's not supported in KVM.
+ * TSM is RES1 if SME isn't implemented.
+ */
+ val |= CPTR_EL2_TSM;
- if (!guest_owns_fp_regs()) {
- if (has_hvhe())
- val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN);
- else
- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
+ if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
+ val |= CPTR_EL2_TZ;
- __activate_traps_fpsimd32(vcpu);
+ if (!guest_owns_fp_regs())
+ val |= CPTR_EL2_TFP;
}
+ if (!guest_owns_fp_regs())
+ __activate_traps_fpsimd32(vcpu);
+
kvm_write_cptr_el2(val);
+}
+
+static void __activate_traps(struct kvm_vcpu *vcpu)
+{
+ ___activate_traps(vcpu, vcpu->arch.hcr_el2);
+ __activate_traps_common(vcpu);
+ __activate_cptr_traps(vcpu);
+
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
Patches currently in stable-queue which might be from broonie at kernel.org are
queue-6.13/kvm-arm64-calculate-cptr_el2-traps-on-activating-traps.patch
queue-6.13/regulator-check-that-dummy-regulator-has-been-probed-before-using-it.patch
queue-6.13/kvm-arm64-eagerly-switch-zcr_el-1-2.patch
queue-6.13/kvm-arm64-mark-some-header-functions-as-inline.patch
queue-6.13/kvm-arm64-remove-host-fpsimd-saving-for-non-protected-kvm.patch
queue-6.13/regulator-dummy-force-synchronous-probing.patch
queue-6.13/kvm-arm64-refactor-exit-handlers.patch
queue-6.13/kvm-arm64-unconditionally-save-flush-host-fpsimd-sve-sme-state.patch
queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.smen.patch
queue-6.13/kvm-arm64-remove-vhe-host-restore-of-cpacr_el1.zen.patch
More information about the linux-arm-kernel
mailing list