[PATCH v3 21/36] KVM: arm64: gic-v5: Check for pending PPIs
Jonathan Cameron
jonathan.cameron at huawei.com
Mon Jan 12 08:13:29 PST 2026
On Fri, 9 Jan 2026 17:04:46 +0000
Sascha Bischoff <Sascha.Bischoff at arm.com> wrote:
> This change allows KVM to check for pending PPI interrupts. This has
> two main components:
>
> First of all, the effective priority mask is calculated. This is a
> combination of the priority mask in the VPEs ICC_PCR_EL1.PRIORITY and
> the currently running priority as determined from the VPE's
> ICH_APR_EL1. If an interrupt's priority is greater than or equal to
> the effective priority mask, it can be signalled. Otherwise, it
> cannot.
>
> Secondly, any Enabled and Pending PPIs must be checked against this
> compound priority mask. The reqires the PPI priorities to by synced
> back to the KVM shadow state - this is skipped in general operation as
> it isn't required and is rather expensive. If any Enabled and Pending
> PPIs are of sufficient priority to be signalled, then there are
> pending PPIs. Else, there are not. This ensures that a VPE is not
> woken when it cannot actually process the pending interrupts.
>
> Signed-off-by: Sascha Bischoff <sascha.bischoff at arm.com>
> Reviewed-by: Joey Gouly <joey.gouly at arm.com>
Trivial suggestions below. Assuming you'll tidy up the wrap, the other one
is more of an observation than something I particularly care about.
Reviewed-by: Jonathan Cameron <jonathan.cameron at huawei.com>
> ---
> arch/arm64/kvm/vgic/vgic-v5.c | 133 ++++++++++++++++++++++++++++++++++
> arch/arm64/kvm/vgic/vgic.c | 3 +
> arch/arm64/kvm/vgic/vgic.h | 1 +
> 3 files changed, 137 insertions(+)
>
> diff --git a/arch/arm64/kvm/vgic/vgic-v5.c b/arch/arm64/kvm/vgic/vgic-v5.c
> index c1899add8f5c3..3e2a01e3008c4 100644
> --- a/arch/arm64/kvm/vgic/vgic-v5.c
> +++ b/arch/arm64/kvm/vgic/vgic-v5.c
> static bool vgic_v5_ppi_set_pending_state(struct kvm_vcpu *vcpu,
> struct vgic_irq *irq)
> {
> @@ -216,6 +239,112 @@ void vgic_v5_set_ppi_ops(struct vgic_irq *irq)
> irq->ops = &vgic_v5_ppi_irq_ops;
> }
>
> +/*
> + * Sync back the PPI priorities to the vgic_irq shadow state for any interrupts
> + * exposed to the guest (skipping all others).
> + */
> +static void vgic_v5_sync_ppi_priorities(struct kvm_vcpu *vcpu)
> +{
> + struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
> + u64 priorityr;
> +
> + /*
> + * We have 16 PPI Priority regs, but only have a few interrupts that the
> + * guest is allowed to use. Limit our sync of PPI priorities to those
> + * actually exposed to the guest by first iterating over the mask of
> + * exposed PPIs.
> + */
> + for (int mask_reg = 0; mask_reg < 2; mask_reg++) {
> + unsigned long *p;
> + int i;
> +
> + p = (unsigned long *)&vcpu->kvm->arch.vgic.gicv5_vm.vgic_ppi_mask[mask_reg];
Following is minor, but maybe worth some more thought:
Even though it's silly I'd be tempted to avoid that cast via
unsigned long bm_p = 0;
bitmap_from_arr64(&bm_p,
&vcpu->kvm->arch.vgic.gicv5_vm.vgic_ppi_mask[mask_reg],
//consider a local variable for this!
64);
for_each_set_bit(i, bm_p, 64) {
}
which compiler should be able to collapse to a simple u64 assignment.
> +
> + for_each_set_bit(i, p, 64) {
> + struct vgic_irq *irq;
> + int pri_idx, pri_reg;
> + u32 intid;
> + u8 priority;
> +
> + pri_reg = (mask_reg * 64 + i) / 8;
> + pri_idx = (mask_reg * 64 + i) % 8;
> +
> + priorityr = cpu_if->vgic_ppi_priorityr[pri_reg];
> + priority = (priorityr >> (pri_idx * 8)) & GENMASK(4, 0);
> +
> + intid = FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_PPI);
> + intid |= FIELD_PREP(GICV5_HWIRQ_ID, mask_reg * 64 + i);
> +
> + irq = vgic_get_vcpu_irq(vcpu, intid);
> +
> + scoped_guard(raw_spinlock_irqsave, &irq->irq_lock)
> + irq->priority = priority;
> +
> + vgic_put_irq(vcpu->kvm, irq);
> + }
> + }
> +}
> +
> +bool vgic_v5_has_pending_ppi(struct kvm_vcpu *vcpu)
> +{
> + struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
> + unsigned int priority_mask;
> +
> + /* If no pending bits are set, exit early */
> + if (!cpu_if->vgic_ppi_pendr[0] && !cpu_if->vgic_ppi_pendr[1])
> + return false;
> +
> + priority_mask = vgic_v5_get_effective_priority_mask(vcpu);
> +
> + /* If the combined priority mask is 0, nothing can be signalled! */
> + if (!priority_mask)
> + return false;
> +
> + for (int reg = 0; reg < 2; reg++) {
> + const u64 enabler = cpu_if->vgic_ppi_enabler[reg];
> + const u64 pendr = cpu_if->vgic_ppi_pendr[reg];
> + unsigned long possible_bits;
> + int i;
> +
> + /* Check all interrupts that are enabled and pending */
> + possible_bits = enabler & pendr;
> +
> + /*
> + * Optimisation: pending and enabled with no active priorities
> + */
> + if (possible_bits && priority_mask == 32)
> + return true;
> +
> + for_each_set_bit(i, &possible_bits, 64) {
> + bool has_pending = false;
> + struct vgic_irq *irq;
> + u32 intid;
> +
> + intid = FIELD_PREP(GICV5_HWIRQ_TYPE, GICV5_HWIRQ_TYPE_PPI);
> + intid |= FIELD_PREP(GICV5_HWIRQ_ID, reg * 64 + i);
> +
> + irq = vgic_get_vcpu_irq(vcpu, intid);
> +
> + scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) {
> + /*
> + * We know that the interrupt is
short wrap. This could be
/*
* We know that the interrupt is enabled and
* pending, so only check the priority.
> + */
> + * enabled and pending, so only check
> + * the priority.
> + */
> + if (irq->priority <= priority_mask)
> + has_pending = true;
> + }
> +
> + vgic_put_irq(vcpu->kvm, irq);
> +
> + if (has_pending)
> + return true;
> + }
> + }
> +
> + return false;
> +}
> +
More information about the linux-arm-kernel
mailing list