[RFC PATCH 6/7] lib: sbi: sbi_hart: extend PMP handling to support multiple reserved entries

Yu-Chien Peter Lin peter.lin at sifive.com
Fri Aug 15 03:01:14 PDT 2025


Previously, OpenSBI supported only a single reserved PMP entry. This
change adds support for multiple reserved PMP entries, configurable
via the `reserved-pmp-count` DT property in the opensbi-config.

Signed-off-by: Yu-Chien Peter Lin <peter.lin at sifive.com>
---
 include/sbi/sbi_hart.h       | 15 ----------
 lib/sbi/sbi_domain_context.c |  6 +++-
 lib/sbi/sbi_hart.c           | 56 +++++++++++++++++++++++++++---------
 3 files changed, 47 insertions(+), 30 deletions(-)

diff --git a/include/sbi/sbi_hart.h b/include/sbi/sbi_hart.h
index 82b19dcf..86c2675b 100644
--- a/include/sbi/sbi_hart.h
+++ b/include/sbi/sbi_hart.h
@@ -101,21 +101,6 @@ enum sbi_hart_csrs {
 	SBI_HART_CSR_MAX,
 };
 
-/*
- * Smepmp enforces access boundaries between M-mode and
- * S/U-mode. When it is enabled, the PMPs are programmed
- * such that M-mode doesn't have access to S/U-mode memory.
- *
- * To give M-mode R/W access to the shared memory between M and
- * S/U-mode, first entry is reserved. It is disabled at boot.
- * When shared memory access is required, the physical address
- * should be programmed into the first PMP entry with R/W
- * permissions to the M-mode. Once the work is done, it should be
- * unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
- * pair should be used to map/unmap the shared memory.
- */
-#define SBI_SMEPMP_RESV_ENTRY		0
-
 struct sbi_hart_features {
 	bool detected;
 	int priv_version;
diff --git a/lib/sbi/sbi_domain_context.c b/lib/sbi/sbi_domain_context.c
index fb04d81d..a78bd28c 100644
--- a/lib/sbi/sbi_domain_context.c
+++ b/lib/sbi/sbi_domain_context.c
@@ -101,6 +101,7 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
 	struct sbi_domain *current_dom = ctx->dom;
 	struct sbi_domain *target_dom = dom_ctx->dom;
 	struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+	const struct sbi_platform *plat = sbi_platform_thishart_ptr();
 	unsigned int pmp_count = sbi_hart_pmp_count(scratch);
 
 	/* Assign current hart to target domain */
@@ -115,7 +116,10 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
 	spin_unlock(&target_dom->assigned_harts_lock);
 
 	/* Reconfigure PMP settings for the new domain */
-	for (int i = 0; i < pmp_count; i++) {
+	for (int i = plat->reserved_pmp_count; i < pmp_count; i++) {
+		if (pmp_is_fw_region(i, current_dom))
+			continue;
+
 		sbi_platform_pmp_disable(sbi_platform_thishart_ptr(), i);
 		pmp_disable(i);
 	}
diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
index 6a2d7d6f..e8762084 100644
--- a/lib/sbi/sbi_hart.c
+++ b/lib/sbi/sbi_hart.c
@@ -17,6 +17,7 @@
 #include <sbi/sbi_csr_detect.h>
 #include <sbi/sbi_error.h>
 #include <sbi/sbi_hart.h>
+#include <sbi/sbi_init.h>
 #include <sbi/sbi_math.h>
 #include <sbi/sbi_platform.h>
 #include <sbi/sbi_pmu.h>
@@ -30,6 +31,7 @@ extern void __sbi_expected_trap_hext(void);
 void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
 
 static unsigned long hart_features_offset;
+static unsigned int saddr_pmp_id;
 
 static void mstatus_init(struct sbi_scratch *scratch)
 {
@@ -393,6 +395,7 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
 				     unsigned long pmp_addr_max)
 {
 	struct sbi_domain_memregion *reg;
+	const struct sbi_platform *plat = sbi_platform_thishart_ptr();
 	struct sbi_domain *dom = sbi_domain_thishart_ptr();
 	unsigned int pmp_idx, pmp_flags;
 
@@ -402,16 +405,19 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
 	 */
 	csr_set(CSR_MSECCFG, MSECCFG_RLB);
 
-	/* Disable the reserved entry */
-	pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+	/* Disable the reserved entries */
+	for (int i = 0; i < plat->reserved_pmp_count; i++)
+		pmp_disable(i);
 
 	/* Program M-only regions when MML is not set. */
 	pmp_idx = 0;
 	sbi_domain_for_each_memregion(dom, reg) {
 		/* Skip reserved entry */
-		if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
-			pmp_idx++;
-		if (pmp_count <= pmp_idx)
+		if (pmp_idx < plat->reserved_pmp_count)
+			pmp_idx += plat->reserved_pmp_count;
+		if (pmp_count <= pmp_idx) {
+			sbi_printf("%s: ERR: region %#lx cannot be protected - "
+				   "insufficient PMP entries\n", __func__, reg->base);
 			break;
 
 		/* Skip shared and SU-only regions */
@@ -435,9 +441,11 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
 	pmp_idx = 0;
 	sbi_domain_for_each_memregion(dom, reg) {
 		/* Skip reserved entry */
-		if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
-			pmp_idx++;
-		if (pmp_count <= pmp_idx)
+		if (pmp_idx < plat->reserved_pmp_count)
+			pmp_idx += plat->reserved_pmp_count;
+		if (pmp_count <= pmp_idx) {
+			sbi_printf("%s: ERR: region %#lx cannot be protected - "
+				   "insufficient PMP entries\n", __func__, reg->base);
 			break;
 
 		/* Skip M-only regions */
@@ -468,13 +476,19 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
 				     unsigned long pmp_addr_max)
 {
 	struct sbi_domain_memregion *reg;
+	const struct sbi_platform *plat = sbi_platform_thishart_ptr();
 	struct sbi_domain *dom = sbi_domain_thishart_ptr();
 	unsigned int pmp_idx = 0;
 	unsigned int pmp_flags;
 	unsigned long pmp_addr;
 
 	sbi_domain_for_each_memregion(dom, reg) {
-		if (pmp_count <= pmp_idx)
+		/* Skip reserved entry */
+		if (pmp_idx < plat->reserved_pmp_count)
+			pmp_idx += plat->reserved_pmp_count;
+		if (pmp_count <= pmp_idx) {
+			sbi_printf("%s: ERR: region %#lx cannot be protected - "
+				   "insufficient PMP entries\n", __func__, reg->base);
 			break;
 
 		pmp_flags = 0;
@@ -510,6 +524,19 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
 	return 0;
 }
 
+/*
+ * Smepmp enforces access boundaries between M-mode and
+ * S/U-mode. When it is enabled, the PMPs are programmed
+ * such that M-mode doesn't have access to S/U-mode memory.
+ *
+ * To give M-mode R/W access to the shared memory between M and
+ * S/U-mode, high-priority entry is reserved. It is disabled at boot.
+ * When shared memory access is required, the physical address
+ * should be programmed into the reserved PMP entry with R/W
+ * permissions to the M-mode. Once the work is done, it should be
+ * unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
+ * pair should be used to map/unmap the shared memory.
+ */
 int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
 {
 	/* shared R/W access for M and S/U mode */
@@ -521,8 +548,9 @@ int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
 	if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
 		return SBI_OK;
 
-	if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
+	if (reserved_pmp_alloc(&saddr_pmp_id)) {
 		return SBI_ENOSPC;
+	}
 
 	for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
 	     order <= __riscv_xlen; order++) {
@@ -538,10 +566,10 @@ int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
 		}
 	}
 
-	sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
+	sbi_platform_pmp_set(sbi_platform_ptr(scratch), saddr_pmp_id,
 			     SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
 			     pmp_flags, base, order);
-	pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
+	pmp_set(saddr_pmp_id, pmp_flags, base, order);
 
 	return SBI_OK;
 }
@@ -553,8 +581,8 @@ int sbi_hart_unmap_saddr(void)
 	if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
 		return SBI_OK;
 
-	sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
-	return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+	sbi_platform_pmp_disable(sbi_platform_ptr(scratch), saddr_pmp_id);
+	return pmp_disable(saddr_pmp_id);
 }
 
 int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
-- 
2.48.0




More information about the opensbi mailing list