[PATCH v7 13/20] KVM: arm64: Apply dynamic guest counter reservations
Colton Lewis
coltonlewis at google.com
Mon May 4 14:18:06 PDT 2026
Apply dynamic guest counter reservations by checking if the requested
guest mask collides with any events the host has scheduled and calling
pmu_perf_resched_update() with a hook that updates the mask of
available counters in between schedule out and schedule in.
Signed-off-by: Colton Lewis <coltonlewis at google.com>
---
arch/arm64/kvm/pmu-direct.c | 69 ++++++++++++++++++++++++++++++++++++
include/linux/perf/arm_pmu.h | 1 +
2 files changed, 70 insertions(+)
diff --git a/arch/arm64/kvm/pmu-direct.c b/arch/arm64/kvm/pmu-direct.c
index 2252d3b905db9..14cc419dbafad 100644
--- a/arch/arm64/kvm/pmu-direct.c
+++ b/arch/arm64/kvm/pmu-direct.c
@@ -100,6 +100,73 @@ u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu)
return *host_data_ptr(nr_event_counters);
}
+/* Callback to update counter mask between perf scheduling */
+static void kvm_pmu_update_mask(struct pmu *pmu, void *data)
+{
+ struct arm_pmu *arm_pmu = to_arm_pmu(pmu);
+ unsigned long *new_mask = data;
+
+ bitmap_copy(arm_pmu->cntr_mask, new_mask, ARMPMU_MAX_HWEVENTS);
+}
+
+/**
+ * kvm_pmu_set_guest_counters() - Handle dynamic counter reservations
+ * @cpu_pmu: struct arm_pmu to potentially modify
+ * @guest_mask: new guest mask for the pmu
+ *
+ * Check if guest counters will interfere with current host events and
+ * call into perf_pmu_resched_update if a reschedule is required.
+ */
+static void kvm_pmu_set_guest_counters(struct arm_pmu *cpu_pmu, u64 guest_mask)
+{
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ DECLARE_BITMAP(guest_bitmap, ARMPMU_MAX_HWEVENTS);
+ DECLARE_BITMAP(new_mask, ARMPMU_MAX_HWEVENTS);
+ bool need_resched = false;
+
+ bitmap_from_arr64(guest_bitmap, &guest_mask, ARMPMU_MAX_HWEVENTS);
+ bitmap_copy(new_mask, cpu_pmu->hw_cntr_mask, ARMPMU_MAX_HWEVENTS);
+
+ if (guest_mask) {
+ /* Subtract guest counters from available host mask */
+ bitmap_andnot(new_mask, new_mask, guest_bitmap, ARMPMU_MAX_HWEVENTS);
+
+ /* Did we collide with an active host event? */
+ if (bitmap_intersects(cpuc->used_mask, guest_bitmap, ARMPMU_MAX_HWEVENTS)) {
+ int idx;
+
+ need_resched = true;
+ cpuc->host_squeezed = true;
+
+ /* Look for pinned events that are about to be preempted */
+ for_each_set_bit(idx, guest_bitmap, ARMPMU_MAX_HWEVENTS) {
+ if (test_bit(idx, cpuc->used_mask) && cpuc->events[idx] &&
+ cpuc->events[idx]->attr.pinned) {
+ pr_warn_ratelimited("perf: Pinned host event squeezed out by KVM guest PMU partition\n");
+ break;
+ }
+ }
+ }
+ } else {
+ /*
+ * Restoring to hw_cntr_mask.
+ * Only resched if we previously squeezed an event.
+ */
+ if (cpuc->host_squeezed) {
+ need_resched = true;
+ cpuc->host_squeezed = false;
+ }
+ }
+
+ if (need_resched) {
+ /* Collision: run full perf reschedule */
+ perf_pmu_resched_update(&cpu_pmu->pmu, kvm_pmu_update_mask, new_mask);
+ } else {
+ /* Host was never using guest counters anyway */
+ bitmap_copy(cpu_pmu->cntr_mask, new_mask, ARMPMU_MAX_HWEVENTS);
+ }
+}
+
/**
* kvm_pmu_host_counter_mask() - Compute bitmask of host-reserved counters
* @pmu: Pointer to arm_pmu struct
@@ -218,6 +285,7 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu)
pmu = vcpu->kvm->arch.arm_pmu;
guest_counters = kvm_pmu_guest_counter_mask(pmu);
+ kvm_pmu_set_guest_counters(pmu, guest_counters);
kvm_pmu_apply_event_filter(vcpu);
for_each_set_bit(i, &guest_counters, ARMPMU_MAX_HWEVENTS) {
@@ -319,5 +387,6 @@ void kvm_pmu_put(struct kvm_vcpu *vcpu)
val = read_sysreg(pmintenset_el1);
__vcpu_assign_sys_reg(vcpu, PMINTENSET_EL1, val & mask);
+ kvm_pmu_set_guest_counters(pmu, 0);
preempt_enable();
}
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index f7b000bb3eca8..63f88fec5e80f 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -75,6 +75,7 @@ struct pmu_hw_events {
/* Active events requesting branch records */
unsigned int branch_users;
+ bool host_squeezed;
};
enum armpmu_attr_groups {
--
2.54.0.545.g6539524ca2-goog
More information about the linux-arm-kernel
mailing list