[kvm-unit-tests PATCH 07/16] x86/pmu: Rename pmu_gp_counter_is_available() to pmu_arch_event_is_available()

Sean Christopherson seanjc at google.com
Thu May 29 15:19:20 PDT 2025


Rename pmu_gp_counter_is_available() to pmu_arch_event_is_available() to
reflect what the field and helper actually track.  The availablity of
architectural events has nothing to do with the GP counters themselves.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc at google.com>
---
 lib/x86/pmu.c | 4 ++--
 lib/x86/pmu.h | 6 +++---
 x86/pmu.c     | 6 +++---
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/lib/x86/pmu.c b/lib/x86/pmu.c
index d06e9455..599168ac 100644
--- a/lib/x86/pmu.c
+++ b/lib/x86/pmu.c
@@ -21,7 +21,7 @@ void pmu_init(void)
 		pmu.gp_counter_mask_length = (cpuid_10.a >> 24) & 0xff;
 
 		/* CPUID.0xA.EBX bit is '1' if a counter is NOT available. */
-		pmu.gp_counter_available = ~cpuid_10.b;
+		pmu.arch_event_available = ~cpuid_10.b;
 
 		if (this_cpu_has(X86_FEATURE_PDCM))
 			pmu.perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES);
@@ -51,7 +51,7 @@ void pmu_init(void)
 		}
 		pmu.gp_counter_width = PMC_DEFAULT_WIDTH;
 		pmu.gp_counter_mask_length = pmu.nr_gp_counters;
-		pmu.gp_counter_available = (1u << pmu.nr_gp_counters) - 1;
+		pmu.arch_event_available = (1u << pmu.nr_gp_counters) - 1;
 
 		if (this_cpu_has_perf_global_status()) {
 			pmu.msr_global_status = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS;
diff --git a/lib/x86/pmu.h b/lib/x86/pmu.h
index f07fbd93..d0ad280a 100644
--- a/lib/x86/pmu.h
+++ b/lib/x86/pmu.h
@@ -64,7 +64,7 @@ struct pmu_caps {
 	u8 nr_gp_counters;
 	u8 gp_counter_width;
 	u8 gp_counter_mask_length;
-	u32 gp_counter_available;
+	u32 arch_event_available;
 	u32 msr_gp_counter_base;
 	u32 msr_gp_event_select_base;
 
@@ -110,9 +110,9 @@ static inline bool this_cpu_has_perf_global_status(void)
 	return pmu.version > 1;
 }
 
-static inline bool pmu_gp_counter_is_available(int i)
+static inline bool pmu_arch_event_is_available(int i)
 {
-	return pmu.gp_counter_available & BIT(i);
+	return pmu.arch_event_available & BIT(i);
 }
 
 static inline u64 pmu_lbr_version(void)
diff --git a/x86/pmu.c b/x86/pmu.c
index 8cf26b12..0ce34433 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -436,7 +436,7 @@ static void check_gp_counters(void)
 	int i;
 
 	for (i = 0; i < gp_events_size; i++)
-		if (pmu_gp_counter_is_available(i))
+		if (pmu_arch_event_is_available(i))
 			check_gp_counter(&gp_events[i]);
 		else
 			printf("GP event '%s' is disabled\n",
@@ -463,7 +463,7 @@ static void check_counters_many(void)
 	int i, n;
 
 	for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) {
-		if (!pmu_gp_counter_is_available(i))
+		if (!pmu_arch_event_is_available(i))
 			continue;
 
 		cnt[n].ctr = MSR_GP_COUNTERx(n);
@@ -902,7 +902,7 @@ static void set_ref_cycle_expectations(void)
 	uint64_t t0, t1, t2, t3;
 
 	/* Bit 2 enumerates the availability of reference cycles events. */
-	if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2))
+	if (!pmu.nr_gp_counters || !pmu_arch_event_is_available(2))
 		return;
 
 	if (this_cpu_has_perf_global_ctrl())
-- 
2.49.0.1204.g71687c7c1d-goog




More information about the kvm-riscv mailing list