[PATCH 63/89] KVM: arm64: Fix initializing traps in protected mode
Will Deacon
will at kernel.org
Thu May 19 06:41:38 PDT 2022
From: Fuad Tabba <tabba at google.com>
The values of the trapping registers for protected VMs should be
computed from the ground up, and not depend on potentially
preexisting values.
Moreover, non-protected VMs should not be restricted in protected
mode in the same manner as protected VMs.
Signed-off-by: Fuad Tabba <tabba at google.com>
---
arch/arm64/kvm/hyp/nvhe/pkvm.c | 48 ++++++++++++++++++++++------------
1 file changed, 31 insertions(+), 17 deletions(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 2c13ba0f2bf2..839506a546c7 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -168,34 +168,48 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
*/
static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
{
- const u64 hcr_trap_feat_regs = HCR_TID3;
- const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
-
/*
* Always trap:
* - Feature id registers: to control features exposed to guests
* - Implementation-defined features
*/
- vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
+ vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS |
+ HCR_TID3 | HCR_TACR | HCR_TIDCP | HCR_TID1;
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
+ /* route synchronous external abort exceptions to EL2 */
+ vcpu->arch.hcr_el2 |= HCR_TEA;
+ /* trap error record accesses */
+ vcpu->arch.hcr_el2 |= HCR_TERR;
+ }
- /* Clear res0 and set res1 bits to trap potential new features. */
- vcpu->arch.hcr_el2 &= ~(HCR_RES0);
- vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
- vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
- vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+ if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ vcpu->arch.hcr_el2 |= HCR_FWB;
+
+ if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE))
+ vcpu->arch.hcr_el2 |= HCR_TID2;
}
/*
* Initialize trap register values for protected VMs.
*/
-static void pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
+static void pkvm_vcpu_init_traps(struct kvm_vcpu *shadow_vcpu, struct kvm_vcpu *host_vcpu)
{
- pvm_init_trap_regs(vcpu);
- pvm_init_traps_aa64pfr0(vcpu);
- pvm_init_traps_aa64pfr1(vcpu);
- pvm_init_traps_aa64dfr0(vcpu);
- pvm_init_traps_aa64mmfr0(vcpu);
- pvm_init_traps_aa64mmfr1(vcpu);
+ shadow_vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
+ shadow_vcpu->arch.mdcr_el2 = 0;
+
+ if (!vcpu_is_protected(shadow_vcpu)) {
+ shadow_vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS |
+ READ_ONCE(host_vcpu->arch.hcr_el2);
+ return;
+ }
+
+ pvm_init_trap_regs(shadow_vcpu);
+ pvm_init_traps_aa64pfr0(shadow_vcpu);
+ pvm_init_traps_aa64pfr1(shadow_vcpu);
+ pvm_init_traps_aa64dfr0(shadow_vcpu);
+ pvm_init_traps_aa64mmfr0(shadow_vcpu);
+ pvm_init_traps_aa64mmfr1(shadow_vcpu);
}
/*
@@ -364,7 +378,7 @@ static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
shadow_vcpu->arch.hw_mmu = &vm->kvm.arch.mmu;
- pkvm_vcpu_init_traps(shadow_vcpu);
+ pkvm_vcpu_init_traps(shadow_vcpu, host_vcpu);
}
return 0;
--
2.36.1.124.g0e6072fb45-goog
More information about the linux-arm-kernel
mailing list