[PATCH v3 4/9] drivers/perf: riscv: Implement PMU event info function

Anup Patel anup at brainfault.org
Thu Jul 17 21:32:53 PDT 2025


On Fri, May 23, 2025 at 12:33 AM Atish Patra <atishp at rivosinc.com> wrote:
>
> With the new SBI PMU event info function, we can query the availability
> of the all standard SBI PMU events at boot time with a single ecall.
> This improves the bootime by avoiding making an SBI call for each
> standard PMU event. Since this function is defined only in SBI v3.0,
> invoke this only if the underlying SBI implementation is v3.0 or higher.
>
> Signed-off-by: Atish Patra <atishp at rivosinc.com>
> ---
>  arch/riscv/include/asm/sbi.h |  9 ++++++
>  drivers/perf/riscv_pmu_sbi.c | 68 ++++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 77 insertions(+)
>
> diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
> index 6ce385a3a7bb..77b6997eb6c5 100644
> --- a/arch/riscv/include/asm/sbi.h
> +++ b/arch/riscv/include/asm/sbi.h
> @@ -135,6 +135,7 @@ enum sbi_ext_pmu_fid {
>         SBI_EXT_PMU_COUNTER_FW_READ,
>         SBI_EXT_PMU_COUNTER_FW_READ_HI,
>         SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
> +       SBI_EXT_PMU_EVENT_GET_INFO,
>  };
>
>  union sbi_pmu_ctr_info {
> @@ -158,6 +159,14 @@ struct riscv_pmu_snapshot_data {
>         u64 reserved[447];
>  };
>
> +struct riscv_pmu_event_info {
> +       u32 event_idx;
> +       u32 output;
> +       u64 event_data;
> +};
> +
> +#define RISCV_PMU_EVENT_INFO_OUTPUT_MASK 0x01
> +
>  #define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
>  #define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
>  /* SBI v3.0 allows extended hpmeventX width value */
> diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
> index 273ed70098a3..33d8348bf68a 100644
> --- a/drivers/perf/riscv_pmu_sbi.c
> +++ b/drivers/perf/riscv_pmu_sbi.c
> @@ -299,6 +299,66 @@ static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
>         },
>  };
>
> +static int pmu_sbi_check_event_info(void)
> +{
> +       int num_events = ARRAY_SIZE(pmu_hw_event_map) + PERF_COUNT_HW_CACHE_MAX *
> +                        PERF_COUNT_HW_CACHE_OP_MAX * PERF_COUNT_HW_CACHE_RESULT_MAX;
> +       struct riscv_pmu_event_info *event_info_shmem;
> +       phys_addr_t base_addr;
> +       int i, j, k, result = 0, count = 0;
> +       struct sbiret ret;
> +
> +       event_info_shmem = kcalloc(num_events, sizeof(*event_info_shmem), GFP_KERNEL);
> +       if (!event_info_shmem)
> +               return -ENOMEM;
> +
> +       for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
> +               event_info_shmem[count++].event_idx = pmu_hw_event_map[i].event_idx;
> +
> +       for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
> +               for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
> +                       for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
> +                               event_info_shmem[count++].event_idx =
> +                                                       pmu_cache_event_map[i][j][k].event_idx;
> +               }
> +       }
> +
> +       base_addr = __pa(event_info_shmem);
> +       if (IS_ENABLED(CONFIG_32BIT))
> +               ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, lower_32_bits(base_addr),
> +                               upper_32_bits(base_addr), count, 0, 0, 0);
> +       else
> +               ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, base_addr, 0,
> +                               count, 0, 0, 0);
> +       if (ret.error) {
> +               result = -EOPNOTSUPP;
> +               goto free_mem;
> +       }
> +
> +       for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) {
> +               if (!(event_info_shmem[i].output & RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
> +                       pmu_hw_event_map[i].event_idx = -ENOENT;
> +       }
> +
> +       count = ARRAY_SIZE(pmu_hw_event_map);
> +
> +       for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
> +               for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
> +                       for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) {
> +                               if (!(event_info_shmem[count].output &
> +                                     RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
> +                                       pmu_cache_event_map[i][j][k].event_idx = -ENOENT;
> +                               count++;
> +                       }
> +               }
> +       }
> +
> +free_mem:
> +       kfree(event_info_shmem);
> +
> +       return result;
> +}
> +
>  static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
>  {
>         struct sbiret ret;
> @@ -316,6 +376,14 @@ static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
>
>  static void pmu_sbi_check_std_events(struct work_struct *work)
>  {
> +       int ret;
> +
> +       if (sbi_v3_available) {
> +               ret = pmu_sbi_check_event_info();
> +               if (!ret)
> +                       return;

We should not fall back to the old way if ret != 0. Rather,
the pmu_sbi_check_std_events() should return failure if
pmu_sbi_check_event_info() fails.

> +       }
> +
>         for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
>                 pmu_sbi_check_event(&pmu_hw_event_map[i]);
>
>
> --
> 2.43.0
>

Regards,
Anup



More information about the linux-riscv mailing list