[PATCH 3/5] lib: sbi: Implement hart protection for PMP and ePMP

Anup Patel apatel at ventanamicro.com
Tue Dec 9 04:55:27 PST 2025


On Sun, Dec 7, 2025 at 4:42 PM Samuel Holland <samuel.holland at sifive.com> wrote:
>
> Hi Anup,
>
> On 2025-11-26 8:18 AM, Anup Patel wrote:
> > Implement PMP and ePMP based hart protection abstraction so
> > that usage of sbi_hart_pmp_xyz() functions can be replaced
> > with sbi_hart_protection_xyz() functions.
> >
> > Signed-off-by: Anup Patel <apatel at ventanamicro.com>
> > ---
> >  lib/sbi/sbi_hart.c | 209 ++++++++++++++++++++++++++++-----------------
> >  1 file changed, 133 insertions(+), 76 deletions(-)
> >
> > diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
> > index c5a8d248..8869839d 100644
> > --- a/lib/sbi/sbi_hart.c
> > +++ b/lib/sbi/sbi_hart.c
> > @@ -17,6 +17,7 @@
> >  #include <sbi/sbi_csr_detect.h>
> >  #include <sbi/sbi_error.h>
> >  #include <sbi/sbi_hart.h>
> > +#include <sbi/sbi_hart_protection.h>
> >  #include <sbi/sbi_math.h>
> >  #include <sbi/sbi_platform.h>
> >  #include <sbi/sbi_pmu.h>
> > @@ -311,6 +312,30 @@ bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx)
> >       return bitmap_test(fw_smepmp_ids, pmp_idx) ? true : false;
> >  }
> >
> > +static void sbi_hart_pmp_fence(void)
> > +{
> > +     /*
> > +      * As per section 3.7.2 of privileged specification v1.12,
> > +      * virtual address translations can be speculatively performed
> > +      * (even before actual access). These, along with PMP traslations,
> > +      * can be cached. This can pose a problem with CPU hotplug
> > +      * and non-retentive suspend scenario because PMP states are
> > +      * not preserved.
> > +      * It is advisable to flush the caching structures under such
> > +      * conditions.
> > +      */
> > +     if (misa_extension('S')) {
> > +             __asm__ __volatile__("sfence.vma");
> > +
> > +             /*
> > +              * If hypervisor mode is supported, flush caching
> > +              * structures in guest mode too.
> > +              */
> > +             if (misa_extension('H'))
> > +                     __sbi_hfence_gvma_all();
> > +     }
> > +}
> > +
> >  static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
> >                               struct sbi_domain *dom,
> >                               struct sbi_domain_memregion *reg,
> > @@ -343,14 +368,19 @@ static bool is_valid_pmp_idx(unsigned int pmp_count, unsigned int pmp_idx)
> >       return false;
> >  }
> >
> > -static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
> > -                                  unsigned int pmp_count,
> > -                                  unsigned int pmp_log2gran,
> > -                                  unsigned long pmp_addr_max)
> > +static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch)
> >  {
> >       struct sbi_domain_memregion *reg;
> >       struct sbi_domain *dom = sbi_domain_thishart_ptr();
> > -     unsigned int pmp_idx, pmp_flags;
> > +     unsigned int pmp_log2gran, pmp_bits;
> > +     unsigned int pmp_idx, pmp_count;
> > +     unsigned long pmp_addr_max;
> > +     unsigned int pmp_flags;
> > +
> > +     pmp_count = sbi_hart_pmp_count(scratch);
> > +     pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
> > +     pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
> > +     pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
> >
> >       /*
> >        * Set the RLB so that, we can write to PMP entries without
> > @@ -430,20 +460,64 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
> >        * Keep the RLB bit so that dynamic mappings can be done.
> >        */
> >
> > +     sbi_hart_pmp_fence();
> >       return 0;
> >  }
> >
> > -static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
> > -                                  unsigned int pmp_count,
> > -                                  unsigned int pmp_log2gran,
> > -                                  unsigned long pmp_addr_max)
> > +static int sbi_hart_smepmp_map_range(struct sbi_scratch *scratch,
> > +                                  unsigned long addr, unsigned long size)
> > +{
> > +     /* shared R/W access for M and S/U mode */
> > +     unsigned int pmp_flags = (PMP_W | PMP_X);
> > +     unsigned long order, base = 0;
> > +
> > +     if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
> > +             return SBI_ENOSPC;
> > +
> > +     for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
> > +          order <= __riscv_xlen; order++) {
> > +             if (order < __riscv_xlen) {
> > +                     base = addr & ~((1UL << order) - 1UL);
> > +                     if ((base <= addr) &&
> > +                         (addr < (base + (1UL << order))) &&
> > +                         (base <= (addr + size - 1UL)) &&
> > +                         ((addr + size - 1UL) < (base + (1UL << order))))
> > +                             break;
> > +             } else {
> > +                     return SBI_EFAIL;
> > +             }
> > +     }
> > +
> > +     sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
> > +                          SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
> > +                          pmp_flags, base, order);
> > +     pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
> > +
> > +     return SBI_OK;
> > +}
> > +
> > +static int sbi_hart_smepmp_unmap_range(struct sbi_scratch *scratch,
> > +                                    unsigned long addr, unsigned long size)
> > +{
> > +     sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
> > +     return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
> > +}
> > +
> > +static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch)
> >  {
> >       struct sbi_domain_memregion *reg;
> >       struct sbi_domain *dom = sbi_domain_thishart_ptr();
> > -     unsigned int pmp_idx = 0;
> > +     unsigned long pmp_addr, pmp_addr_max;
> > +     unsigned int pmp_log2gran, pmp_bits;
> > +     unsigned int pmp_idx, pmp_count;
> >       unsigned int pmp_flags;
> > -     unsigned long pmp_addr;
> >
> > +     pmp_count = sbi_hart_pmp_count(scratch);
> > +     pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
> > +     pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
> > +     pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
> > +
> > +     pmp_idx = 0;
> >       sbi_domain_for_each_memregion(dom, reg) {
> >               if (!is_valid_pmp_idx(pmp_count, pmp_idx))
> >                       return SBI_EFAIL;
> > @@ -478,43 +552,19 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
> >               }
> >       }
> >
> > +     sbi_hart_pmp_fence();
> >       return 0;
> >  }
> >
> >  int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
> >  {
> > -     /* shared R/W access for M and S/U mode */
> > -     unsigned int pmp_flags = (PMP_W | PMP_X);
> > -     unsigned long order, base = 0;
> >       struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
> >
> >       /* If Smepmp is not supported no special mapping is required */
> >       if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
> >               return SBI_OK;
> >
> > -     if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
> > -             return SBI_ENOSPC;
> > -
> > -     for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
> > -          order <= __riscv_xlen; order++) {
> > -             if (order < __riscv_xlen) {
> > -                     base = addr & ~((1UL << order) - 1UL);
> > -                     if ((base <= addr) &&
> > -                         (addr < (base + (1UL << order))) &&
> > -                         (base <= (addr + size - 1UL)) &&
> > -                         ((addr + size - 1UL) < (base + (1UL << order))))
> > -                             break;
> > -             } else {
> > -                     return SBI_EFAIL;
> > -             }
> > -     }
> > -
> > -     sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
> > -                          SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
> > -                          pmp_flags, base, order);
> > -     pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
> > -
> > -     return SBI_OK;
> > +     return sbi_hart_smepmp_map_range(scratch, addr, size);
> >  }
> >
> >  int sbi_hart_unmap_saddr(void)
> > @@ -524,53 +574,18 @@ int sbi_hart_unmap_saddr(void)
> >       if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
> >               return SBI_OK;
> >
> > -     sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
> > -     return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
> > +     return sbi_hart_smepmp_unmap_range(scratch, 0, 0);
> >  }
> >
> >  int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
> >  {
> > -     int rc;
> > -     unsigned int pmp_bits, pmp_log2gran;
> > -     unsigned int pmp_count = sbi_hart_pmp_count(scratch);
> > -     unsigned long pmp_addr_max;
> > -
> > -     if (!pmp_count)
> > +     if (!sbi_hart_pmp_count(scratch))
> >               return 0;
> >
> > -     pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
> > -     pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
> > -     pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
> > -
> >       if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
> > -             rc = sbi_hart_smepmp_configure(scratch, pmp_count,
> > -                                             pmp_log2gran, pmp_addr_max);
> > +             return sbi_hart_smepmp_configure(scratch);
> >       else
> > -             rc = sbi_hart_oldpmp_configure(scratch, pmp_count,
> > -                                             pmp_log2gran, pmp_addr_max);
> > -
> > -     /*
> > -      * As per section 3.7.2 of privileged specification v1.12,
> > -      * virtual address translations can be speculatively performed
> > -      * (even before actual access). These, along with PMP traslations,
> > -      * can be cached. This can pose a problem with CPU hotplug
> > -      * and non-retentive suspend scenario because PMP states are
> > -      * not preserved.
> > -      * It is advisable to flush the caching structures under such
> > -      * conditions.
> > -      */
> > -     if (misa_extension('S')) {
> > -             __asm__ __volatile__("sfence.vma");
> > -
> > -             /*
> > -              * If hypervisor mode is supported, flush caching
> > -              * structures in guest mode too.
> > -              */
> > -             if (misa_extension('H'))
> > -                     __sbi_hfence_gvma_all();
> > -     }
> > -
> > -     return rc;
> > +             return sbi_hart_oldpmp_configure(scratch);
> >  }
> >
> >  void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
> > @@ -587,6 +602,42 @@ void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
> >       }
> >  }
> >
> > +static struct sbi_hart_protection pmp_protection = {
> > +     .name = "pmp",
> > +     .rating = 100,
> > +     .configure = sbi_hart_oldpmp_configure,
> > +     .unconfigure = sbi_hart_pmp_unconfigure,
> > +};
> > +
> > +static struct sbi_hart_protection epmp_protection = {
> > +     .name = "epmp",
> > +     .rating = 200,
> > +     .configure = sbi_hart_smepmp_configure,
> > +     .unconfigure = sbi_hart_pmp_unconfigure,
> > +     .map_range = sbi_hart_smepmp_map_range,
> > +     .unmap_range = sbi_hart_smepmp_unmap_range,
> > +};
> > +
> > +static int sbi_hart_pmp_init(struct sbi_scratch *scratch)
> > +{
> > +     int rc;
> > +
> > +     if (!sbi_hart_pmp_count(scratch))
> > +             return 0;
> > +
> > +     rc = sbi_hart_protection_register(&pmp_protection);
> > +     if (rc)
> > +             return rc;
> > +
> > +     if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) {
> > +             rc = sbi_hart_protection_register(&epmp_protection);
> > +             if (rc)
> > +                     return rc;
> > +     }
>
> The only reason I can see for registering both PMP and ePMP is to fall back to
> PMP if ePMP configuration fails (as ePMP requires more slots). But you don't do
> that. We shouldn't register PMP if we know it will not be used.
>

Your suggestion makes sense. I will update.

Regards,
Anup



More information about the opensbi mailing list