[kvm-unit-tests PATCH 07/16] x86/pmu: Rename pmu_gp_counter_is_available() to pmu_arch_event_is_available()

Mi, Dapeng dapeng1.mi at linux.intel.com
Tue Jun 10 00:09:13 PDT 2025


On 5/30/2025 6:19 AM, Sean Christopherson wrote:
> Rename pmu_gp_counter_is_available() to pmu_arch_event_is_available() to
> reflect what the field and helper actually track.  The availablity of
> architectural events has nothing to do with the GP counters themselves.
>
> No functional change intended.
>
> Signed-off-by: Sean Christopherson <seanjc at google.com>
> ---
>  lib/x86/pmu.c | 4 ++--
>  lib/x86/pmu.h | 6 +++---
>  x86/pmu.c     | 6 +++---
>  3 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/lib/x86/pmu.c b/lib/x86/pmu.c
> index d06e9455..599168ac 100644
> --- a/lib/x86/pmu.c
> +++ b/lib/x86/pmu.c
> @@ -21,7 +21,7 @@ void pmu_init(void)
>  		pmu.gp_counter_mask_length = (cpuid_10.a >> 24) & 0xff;
>  
>  		/* CPUID.0xA.EBX bit is '1' if a counter is NOT available. */

We need to modify the comment as well.


> -		pmu.gp_counter_available = ~cpuid_10.b;
> +		pmu.arch_event_available = ~cpuid_10.b;
>  
>  		if (this_cpu_has(X86_FEATURE_PDCM))
>  			pmu.perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES);
> @@ -51,7 +51,7 @@ void pmu_init(void)
>  		}
>  		pmu.gp_counter_width = PMC_DEFAULT_WIDTH;
>  		pmu.gp_counter_mask_length = pmu.nr_gp_counters;
> -		pmu.gp_counter_available = (1u << pmu.nr_gp_counters) - 1;
> +		pmu.arch_event_available = (1u << pmu.nr_gp_counters) - 1;

"available architectural events" and "available GP counters" are two
different things. I know this would be changed in later patch 09/16, but
it's really confusing. Could we merge the later patch 09/16 into this patch?


>  
>  		if (this_cpu_has_perf_global_status()) {
>  			pmu.msr_global_status = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS;
> diff --git a/lib/x86/pmu.h b/lib/x86/pmu.h
> index f07fbd93..d0ad280a 100644
> --- a/lib/x86/pmu.h
> +++ b/lib/x86/pmu.h
> @@ -64,7 +64,7 @@ struct pmu_caps {
>  	u8 nr_gp_counters;
>  	u8 gp_counter_width;
>  	u8 gp_counter_mask_length;
> -	u32 gp_counter_available;
> +	u32 arch_event_available;
>  	u32 msr_gp_counter_base;
>  	u32 msr_gp_event_select_base;
>  
> @@ -110,9 +110,9 @@ static inline bool this_cpu_has_perf_global_status(void)
>  	return pmu.version > 1;
>  }
>  
> -static inline bool pmu_gp_counter_is_available(int i)
> +static inline bool pmu_arch_event_is_available(int i)
>  {
> -	return pmu.gp_counter_available & BIT(i);
> +	return pmu.arch_event_available & BIT(i);
>  }
>  
>  static inline u64 pmu_lbr_version(void)
> diff --git a/x86/pmu.c b/x86/pmu.c
> index 8cf26b12..0ce34433 100644
> --- a/x86/pmu.c
> +++ b/x86/pmu.c
> @@ -436,7 +436,7 @@ static void check_gp_counters(void)
>  	int i;
>  
>  	for (i = 0; i < gp_events_size; i++)
> -		if (pmu_gp_counter_is_available(i))
> +		if (pmu_arch_event_is_available(i))
>  			check_gp_counter(&gp_events[i]);
>  		else
>  			printf("GP event '%s' is disabled\n",
> @@ -463,7 +463,7 @@ static void check_counters_many(void)
>  	int i, n;
>  
>  	for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) {
> -		if (!pmu_gp_counter_is_available(i))
> +		if (!pmu_arch_event_is_available(i))
>  			continue;

The intent of check_counters_many() is to verify all available GP and fixed
counters can count correctly at the same time. So we should select another
available event to verify the counter instead of skipping the counter if an
event is not available.

Maybe like this.

diff --git a/x86/pmu.c b/x86/pmu.c
index 63eae3db..013fdfce 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -457,18 +457,34 @@ static void check_fixed_counters(void)
        }
 }

+static struct pmu_event *get_one_event(int idx)
+{
+       int i;
+
+       if (pmu_arch_event_is_available(idx))
+               return &gp_events[idx % gp_events_size];
+
+       for (i = 0; i < gp_events_size; i++) {
+               if (pmu_arch_event_is_available(i))
+                       return &gp_events[i];
+       }
+
+       return NULL;
+}
+
 static void check_counters_many(void)
 {
+       struct pmu_event *evt;
        pmu_counter_t cnt[48];
        int i, n;

        for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) {
-               if (!pmu_arch_event_is_available(i))
+               evt = get_one_event(i);
+               if (!evt)
                        continue;

                cnt[n].ctr = MSR_GP_COUNTERx(n);
-               cnt[n].config = EVNTSEL_OS | EVNTSEL_USR |
-                       gp_events[i % gp_events_size].unit_sel;
+               cnt[n].config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel;
                n++;
        }
        for (i = 0; i < fixed_counters_num; i++) {


>  
>  		cnt[n].ctr = MSR_GP_COUNTERx(n);
> @@ -902,7 +902,7 @@ static void set_ref_cycle_expectations(void)
>  	uint64_t t0, t1, t2, t3;
>  
>  	/* Bit 2 enumerates the availability of reference cycles events. */
> -	if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2))
> +	if (!pmu.nr_gp_counters || !pmu_arch_event_is_available(2))
>  		return;
>  
>  	if (this_cpu_has_perf_global_ctrl())



More information about the kvm-riscv mailing list