[PATCH v10 52/59] KVM: arm64: nv: Fold GICv3 host trapping requirements into guest setup
Marc Zyngier
maz at kernel.org
Mon May 15 10:30:56 PDT 2023
Popular HW that is able to use NV also has a broken vgic implementation
that requires trapping.
On such HW, propagate the host trap bits into the guest's shadow
ICH_HCR_EL2 register, making sure we don't allow an L2 guest to bring
the system down.
This involves a bit of tweaking so that the emulation code correctly
poicks up the shadow state as needed, and to only partially sync
ICH_HCR_EL2 back with the guest state to capture EOIcount.
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/kvm/hyp/vgic-v3-sr.c | 4 ++--
arch/arm64/kvm/vgic/vgic-v3-nested.c | 21 ++++++++++++++++++---
2 files changed, 20 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 75152c1ce646..aaaea35099e5 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -484,7 +484,7 @@ static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
u64 *lr_val)
{
- unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
+ unsigned int used_lrs = kern_hyp_va(vcpu->arch.vgic_cpu.current_cpu_if)->used_lrs;
u8 priority = GICv3_IDLE_PRIORITY;
int i, lr = -1;
@@ -523,7 +523,7 @@ static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
u64 *lr_val)
{
- unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
+ unsigned int used_lrs = kern_hyp_va(vcpu->arch.vgic_cpu.current_cpu_if)->used_lrs;
int i;
for (i = 0; i < used_lrs; i++) {
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c
index 12937bc86e1c..51f97bd4489d 100644
--- a/arch/arm64/kvm/vgic/vgic-v3-nested.c
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -149,9 +149,20 @@ void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
void vgic_v3_create_shadow_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.shadow_vgic_v3;
+ struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 val = 0;
int i;
- cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
+ /*
+ * If we're on a system with a broken vgic that requires
+ * trapping, propagate the trapping requirements.
+ *
+ * Ah, the smell of rotten fruits...
+ */
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ val = host_if->vgic_hcr & (ICH_HCR_TALL0 | ICH_HCR_TALL1 |
+ ICH_HCR_TC | ICH_HCR_TDIR);
+ cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val;
cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
for (i = 0; i < 4; i++) {
@@ -181,6 +192,7 @@ void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *s_cpu_if = vcpu_shadow_if(vcpu);
+ u64 val;
int i;
__vgic_v3_save_state(s_cpu_if);
@@ -189,7 +201,10 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
* Translate the shadow state HW fields back to the virtual ones
* before copying the shadow struct back to the nested one.
*/
- __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = s_cpu_if->vgic_hcr;
+ val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
+ val &= ~ICH_HCR_EOIcount_MASK;
+ val |= (s_cpu_if->vgic_hcr & ICH_HCR_EOIcount_MASK);
+ __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val;
__vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr;
for (i = 0; i < 4; i++) {
@@ -198,7 +213,7 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
}
for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
- u64 val = __vcpu_sys_reg(vcpu, ICH_LRN(i));
+ val = __vcpu_sys_reg(vcpu, ICH_LRN(i));
val &= ~ICH_LR_STATE;
val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE;
--
2.34.1
More information about the linux-arm-kernel
mailing list