[PATCH 1/1] lib: pmu: allow to use the highest available counter

Atish Patra atishp at atishpatra.org
Thu Jun 23 10:59:30 PDT 2022


On Thu, Jun 23, 2022 at 8:31 AM Anup Patel <anup at brainfault.org> wrote:
>
> On Thu, Jun 23, 2022 at 7:43 PM Sergey Matyukevich <geomatsi at gmail.com> wrote:
> >
> > Both OpenSBI and OS explicitly assume that there is no hardware counter
> > with index 1: hardware uses that bit for TM control. So OpenSBI filters
> > out that index in sanity checks. However the range sanity checks do not
> > treat that index in a special way. As a result, OpenSBI does not allow
> > to use the firmware counter with the highest index. This change modifies
> > range checks to allow access to the highest index firmware counter.
> >
> > The simple test is to make sure that there is no counter multiplexing
> > in the following command:
> >
> > $ perf stat -e \
> >         r8000000000000000,r8000000000000001,r8000000000000002,r8000000000000003, \
> >         r8000000000000004,r8000000000000005,r8000000000000006,r8000000000000007, \
> >         r8000000000000008,r8000000000000009,r800000000000000a,r800000000000000b, \
> >         r800000000000000c,r800000000000000d,r800000000000000e,r800000000000000f  \
> >         ls
> >
> > Signed-off-by: Sergey Matyukevich <geomatsi at gmail.com>
> >
> > ---
> >
> > Note that perf RISC-V SBI Linux driver is adapted for the current OpenSBI
> > behavior: it passes counters mask with exluded highest valid index. So the
> > accompanying fix is also required for Linux, see the patches posted to
> > the risc-v mailing list:
> >
> > https://lore.kernel.org/linux-riscv/20220623112735.357093-1-geomatsi@gmail.com/T/#m4811c83cf767cdaaeb31e2530d4a51034fcf6c3c
> >
> >  lib/sbi/sbi_pmu.c | 18 +++++++++---------
> >  1 file changed, 9 insertions(+), 9 deletions(-)
> >
> > diff --git a/lib/sbi/sbi_pmu.c b/lib/sbi/sbi_pmu.c
> > index 3ecf536..b386d33 100644
> > --- a/lib/sbi/sbi_pmu.c
> > +++ b/lib/sbi/sbi_pmu.c
> > @@ -115,7 +115,7 @@ static int pmu_ctr_validate(uint32_t cidx, uint32_t *event_idx_code)
> >
> >         event_idx_val = active_events[hartid][cidx];
> >
> > -       if (cidx >= total_ctrs || (event_idx_val == SBI_PMU_EVENT_IDX_INVALID))
> > +       if (cidx > total_ctrs || (event_idx_val == SBI_PMU_EVENT_IDX_INVALID))
>
> Instead of changing all comparisons of total_ctrs, why not set
> "num_hw_ctrs = sbi_hart_mhpm_count(scratch) + 3;" in sbi_pmu_init() ?
>

Yes. That is simpler. We need a few other fixes along with that as
well. Here is the diff

diff --git a/lib/sbi/sbi_pmu.c b/lib/sbi/sbi_pmu.c
index 3ecf536002a4..a532746c74be 100644
--- a/lib/sbi/sbi_pmu.c
+++ b/lib/sbi/sbi_pmu.c
@@ -594,7 +594,7 @@ static int pmu_ctr_find_fw(unsigned long cbase,
unsigned long cmask, u32 hartid)
        unsigned long ctr_mask = cmask << cbase;

        if (cbase <= num_hw_ctrs)
-               fw_base = num_hw_ctrs + 1;
+               fw_base = num_hw_ctrs;
        else
                fw_base = cbase;

@@ -694,7 +694,7 @@ int sbi_pmu_ctr_get_info(uint32_t cidx, unsigned
long *ctr_info)
                return SBI_EINVAL;

        /* We have 31 HW counters with 31 being the last index(MHPMCOUNTER31) */
-       if (cidx <= num_hw_ctrs) {
+       if (cidx < num_hw_ctrs) {
                cinfo.type = SBI_PMU_CTR_TYPE_HW;
                cinfo.csr = CSR_CYCLE + cidx;
                /* mcycle & minstret are always 64 bit */
@@ -749,7 +749,7 @@ int sbi_pmu_init(struct sbi_scratch *scratch, bool
cold_boot)
                sbi_platform_pmu_init(plat);

                /* mcycle & minstret is available always */
-               num_hw_ctrs = sbi_hart_mhpm_count(scratch) + 2;
+               num_hw_ctrs = sbi_hart_mhpm_count(scratch) + 3;
                total_ctrs = num_hw_ctrs + SBI_PMU_FW_CTR_MAX;
        }

As the counters are zero indexed, the firmware counter should start
from num_hw_ctrs
E.g. If a hardware has 8 programmable counters, num_hw_ctrs will be set to 11.
with counter value (0-10). Firmware counters should range from 11-26.

> Regards,
> Anup
>
> >                 return SBI_EINVAL;
> >
> >         event_idx_type = get_cidx_type(event_idx_val);
> > @@ -347,13 +347,13 @@ int sbi_pmu_ctr_start(unsigned long cbase, unsigned long cmask,
> >         int ret = SBI_EINVAL;
> >         bool bUpdate = FALSE;
> >
> > -       if (sbi_fls(ctr_mask) >= total_ctrs)
> > +       if (sbi_fls(ctr_mask) > total_ctrs)
> >                 return ret;
> >
> >         if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
> >                 bUpdate = TRUE;
> >
> > -       for_each_set_bit_from(cbase, &ctr_mask, total_ctrs) {
> > +       for_each_set_bit_from(cbase, &ctr_mask, total_ctrs + 1) {
> >                 event_idx_type = pmu_ctr_validate(cbase, &event_code);
> >                 if (event_idx_type < 0)
> >                         /* Continue the start operation for other counters */
> > @@ -423,10 +423,10 @@ int sbi_pmu_ctr_stop(unsigned long cbase, unsigned long cmask,
> >         uint32_t event_code;
> >         unsigned long ctr_mask = cmask << cbase;
> >
> > -       if (sbi_fls(ctr_mask) >= total_ctrs)
> > +       if (sbi_fls(ctr_mask) > total_ctrs)
> >                 return SBI_EINVAL;
> >
> > -       for_each_set_bit_from(cbase, &ctr_mask, total_ctrs) {
> > +       for_each_set_bit_from(cbase, &ctr_mask, total_ctrs + 1) {
> >                 event_idx_type = pmu_ctr_validate(cbase, &event_code);
> >                 if (event_idx_type < 0)
> >                         /* Continue the stop operation for other counters */
> > @@ -598,7 +598,7 @@ static int pmu_ctr_find_fw(unsigned long cbase, unsigned long cmask, u32 hartid)
> >         else
> >                 fw_base = cbase;
> >
> > -       for (i = fw_base; i < total_ctrs; i++)
> > +       for (i = fw_base; i <= total_ctrs; i++)
> >                 if ((active_events[hartid][i] == SBI_PMU_EVENT_IDX_INVALID) &&
> >                     ((1UL << i) & ctr_mask))
> >                         return i;
> > @@ -618,7 +618,7 @@ int sbi_pmu_ctr_cfg_match(unsigned long cidx_base, unsigned long cidx_mask,
> >         unsigned long tmp = cidx_mask << cidx_base;
> >
> >         /* Do a basic sanity check of counter base & mask */
> > -       if (sbi_fls(tmp) >= total_ctrs || event_type >= SBI_PMU_EVENT_TYPE_MAX)
> > +       if (sbi_fls(tmp) > total_ctrs || event_type >= SBI_PMU_EVENT_TYPE_MAX)
> >                 return SBI_EINVAL;
> >
> >         if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) {
> > @@ -690,7 +690,7 @@ int sbi_pmu_ctr_get_info(uint32_t cidx, unsigned long *ctr_info)
> >         struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
> >
> >         /* Sanity check. Counter1 is not mapped at all */
> > -       if (cidx >= total_ctrs || cidx == 1)
> > +       if (cidx > total_ctrs || cidx == 1)
> >                 return SBI_EINVAL;
> >
> >         /* We have 31 HW counters with 31 being the last index(MHPMCOUNTER31) */
> > @@ -719,7 +719,7 @@ static void pmu_reset_event_map(u32 hartid)
> >         int j;
> >
> >         /* Initialize the counter to event mapping table */
> > -       for (j = 3; j < total_ctrs; j++)
> > +       for (j = 3; j <= total_ctrs; j++)
> >                 active_events[hartid][j] = SBI_PMU_EVENT_IDX_INVALID;
> >         for (j = 0; j < SBI_PMU_FW_CTR_MAX; j++)
> >                 sbi_memset(&fw_event_map[hartid][j], 0,
> > --
> > 2.36.1
> >
> >
> > --
> > opensbi mailing list
> > opensbi at lists.infradead.org
> > http://lists.infradead.org/mailman/listinfo/opensbi
>
> --
> opensbi mailing list
> opensbi at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/opensbi



-- 
Regards,
Atish



More information about the opensbi mailing list