[PATCH v4 21/49] KVM: arm64: Revamp vgic maintenance interrupt configuration
Marc Zyngier
maz at kernel.org
Thu Nov 20 09:25:11 PST 2025
We currently don't use the maintenance interrupt very much, apart
from EOI on level interrupts, and for LR underflow in limited cases.
However, as we are moving toward a setup where active interrupts
can live outside of the LRs, we need to use the MIs in a more
diverse set of cases.
Add a new helper that produces a digest of the ap_list, and use
that summary to set the various control bits as required.
This slightly changes the way v2 SGIs are handled, as they used to
count for more than one interrupt, but not anymore.
Tested-by: Fuad Tabba <tabba at google.com>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/kvm/vgic/vgic-v2.c | 12 ++++-
arch/arm64/kvm/vgic/vgic-v3.c | 18 ++++++-
arch/arm64/kvm/vgic/vgic.c | 89 ++++++++++++-----------------------
arch/arm64/kvm/vgic/vgic.h | 19 +++++++-
4 files changed, 73 insertions(+), 65 deletions(-)
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 07e93acafd04d..f53bc55288978 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -26,11 +26,19 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr(i, 0);
}
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
- cpuif->vgic_hcr |= GICH_HCR_UIE;
+ cpuif->vgic_hcr = GICH_HCR_EN;
+
+ if (irqs_pending_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_NPIE;
+ if (irqs_active_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_LRENPIE;
+ if (irqs_outside_lrs(als))
+ cpuif->vgic_hcr |= GICH_HCR_UIE;
}
static bool lr_signals_eoi_mi(u32 lr_val)
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 5b276e303aab2..81f1de9e3897b 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -20,11 +20,25 @@ static bool common_trap;
static bool dir_trap;
static bool gicv4_enable;
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
- cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return;
+
+ cpuif->vgic_hcr = ICH_HCR_EL2_En;
+
+ if (irqs_pending_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_NPIE;
+ if (irqs_active_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_LRENPIE;
+ if (irqs_outside_lrs(als))
+ cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
+
+ if (!als->nr_sgi)
+ cpuif->vgic_hcr |= ICH_HCR_EL2_vSGIEOICount;
}
static bool lr_signals_eoi_mi(u64 lr_val)
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 39346baa2677c..7e6f02d48fff1 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -791,38 +791,30 @@ static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
vgic_v3_clear_lr(vcpu, lr);
}
-static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
-{
- if (kvm_vgic_global_state.type == VGIC_V2)
- vgic_v2_set_underflow(vcpu);
- else
- vgic_v3_set_underflow(vcpu);
-}
-
-/* Requires the ap_list_lock to be held. */
-static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
- bool *multi_sgi)
+static void summarize_ap_list(struct kvm_vcpu *vcpu,
+ struct ap_list_summary *als)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
- int count = 0;
-
- *multi_sgi = false;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
+ *als = (typeof(*als)){};
+
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- int w;
+ guard(raw_spinlock)(&irq->irq_lock);
- raw_spin_lock(&irq->irq_lock);
- /* GICv2 SGIs can count for more than one... */
- w = vgic_irq_get_lr_count(irq);
- raw_spin_unlock(&irq->irq_lock);
+ if (unlikely(vgic_target_oracle(irq) != vcpu))
+ continue;
+
+ if (!irq->active)
+ als->nr_pend++;
+ else
+ als->nr_act++;
- count += w;
- *multi_sgi |= (w > 1);
+ if (irq->intid < VGIC_NR_SGIS)
+ als->nr_sgi++;
}
- return count;
}
/*
@@ -908,60 +900,39 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct ap_list_summary als;
struct vgic_irq *irq;
- int count;
- bool multi_sgi;
- u8 prio = 0xff;
- int i = 0;
+ int count = 0;
lockdep_assert_held(&vgic_cpu->ap_list_lock);
- count = compute_ap_list_depth(vcpu, &multi_sgi);
- if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
- vgic_sort_ap_list(vcpu);
+ summarize_ap_list(vcpu, &als);
- count = 0;
+ if (irqs_outside_lrs(&als))
+ vgic_sort_ap_list(vcpu);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
- raw_spin_lock(&irq->irq_lock);
-
- /*
- * If we have multi-SGIs in the pipeline, we need to
- * guarantee that they are all seen before any IRQ of
- * lower priority. In that case, we need to filter out
- * these interrupts by exiting early. This is easy as
- * the AP list has been sorted already.
- */
- if (multi_sgi && irq->priority > prio) {
- raw_spin_unlock(&irq->irq_lock);
- break;
+ scoped_guard(raw_spinlock, &irq->irq_lock) {
+ if (likely(vgic_target_oracle(irq) == vcpu)) {
+ vgic_populate_lr(vcpu, irq, count++);
+ }
}
- if (likely(vgic_target_oracle(irq) == vcpu)) {
- vgic_populate_lr(vcpu, irq, count++);
-
- if (irq->source)
- prio = irq->priority;
- }
-
- raw_spin_unlock(&irq->irq_lock);
-
- if (count == kvm_vgic_global_state.nr_lr) {
- if (!list_is_last(&irq->ap_list,
- &vgic_cpu->ap_list_head))
- vgic_set_underflow(vcpu);
+ if (count == kvm_vgic_global_state.nr_lr)
break;
- }
}
/* Nuke remaining LRs */
- for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
+ for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++)
vgic_clear_lr(vcpu, i);
- if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
+ if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
- else
+ vgic_v2_configure_hcr(vcpu, &als);
+ } else {
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
+ vgic_v3_configure_hcr(vcpu, &als);
+ }
}
static inline bool can_access_vgic_from_kernel(void)
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 0ecadfa00397d..4a0733869cb5f 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -236,6 +236,21 @@ struct its_ite {
u32 event_id;
};
+struct ap_list_summary {
+ unsigned int nr_pend; /* purely pending, not active */
+ unsigned int nr_act; /* active, or active+pending */
+ unsigned int nr_sgi; /* any SGI */
+};
+
+#define irqs_outside_lrs(s) \
+ (((s)->nr_pend + (s)->nr_act) > kvm_vgic_global_state.nr_lr)
+
+#define irqs_pending_outside_lrs(s) \
+ ((s)->nr_pend > kvm_vgic_global_state.nr_lr)
+
+#define irqs_active_outside_lrs(s) \
+ ((s)->nr_act && irqs_outside_lrs(s))
+
int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
struct vgic_reg_attr *reg_attr);
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
@@ -262,7 +277,7 @@ int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
-void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v2_configure_hcr(struct kvm_vcpu *vcpu, struct ap_list_summary *als);
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
@@ -302,7 +317,7 @@ static inline void vgic_get_irq_ref(struct vgic_irq *irq)
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
-void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v3_configure_hcr(struct kvm_vcpu *vcpu, struct ap_list_summary *als);
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_enable(struct kvm_vcpu *vcpu);
--
2.47.3
More information about the linux-arm-kernel
mailing list