[PATCH] lib: sbi: Flush cache entries after writing PMP CSRs

cp0613 at linux.alibaba.com cp0613 at linux.alibaba.com
Thu Sep 4 02:54:47 PDT 2025


From: Chen Pei <cp0613 at linux.alibaba.com>

As the privileged specification states, after writing to the PMP CSRs,
a SFENCE.VMA or HFENCE.GVMA instruction should be executed with rs1=x0
and rs2=x0 to flush all address translation cache entries.

The original implementation does not cover all possible cases. For
example, the sbi_hart_map_saddr function calls pmp_set but does not
execute the SFENCE.VMA instruction. This patch covers sbi_hart_map_saddr
and sbi_hart_unmap_saddr, ensuring that dbtr, sse and other modules can
safely update pmpcfg.

Signed-off-by: Chen Pei <cp0613 at linux.alibaba.com>
---
 lib/sbi/sbi_hart.c | 54 ++++++++++++++++++++++++++++------------------
 1 file changed, 33 insertions(+), 21 deletions(-)

diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
index 6a2d7d6..33bba38 100644
--- a/lib/sbi/sbi_hart.c
+++ b/lib/sbi/sbi_hart.c
@@ -364,6 +364,30 @@ static unsigned int sbi_hart_get_smepmp_flags(struct sbi_scratch *scratch,
 	return pmp_flags;
 }
 
+static void pmp_flush(void)
+{
+	/*
+	 * As per section 3.7.2 of privileged specification v1.12,
+	 * virtual address translations can be speculatively performed
+	 * (even before actual access). These, along with PMP traslations,
+	 * can be cached. This can pose a problem with CPU hotplug
+	 * and non-retentive suspend scenario because PMP states are
+	 * not preserved.
+	 * It is advisable to flush the caching structures under such
+	 * conditions.
+	 */
+	if (misa_extension('S')) {
+		__asm__ __volatile__("sfence.vma");
+
+		/*
+		 * If hypervisor mode is supported, flush caching
+		 * structures in guest mode too.
+		 */
+		if (misa_extension('H'))
+			__sbi_hfence_gvma_all();
+	}
+}
+
 static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
 				struct sbi_domain *dom,
 				struct sbi_domain_memregion *reg,
@@ -543,18 +567,25 @@ int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
 			     pmp_flags, base, order);
 	pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
 
+	pmp_flush();
+
 	return SBI_OK;
 }
 
 int sbi_hart_unmap_saddr(void)
 {
+	int rc;
 	struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
 
 	if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
 		return SBI_OK;
 
 	sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
-	return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+	rc = pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+
+	pmp_flush();
+
+	return rc;
 }
 
 int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
@@ -578,26 +609,7 @@ int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
 		rc = sbi_hart_oldpmp_configure(scratch, pmp_count,
 						pmp_log2gran, pmp_addr_max);
 
-	/*
-	 * As per section 3.7.2 of privileged specification v1.12,
-	 * virtual address translations can be speculatively performed
-	 * (even before actual access). These, along with PMP traslations,
-	 * can be cached. This can pose a problem with CPU hotplug
-	 * and non-retentive suspend scenario because PMP states are
-	 * not preserved.
-	 * It is advisable to flush the caching structures under such
-	 * conditions.
-	 */
-	if (misa_extension('S')) {
-		__asm__ __volatile__("sfence.vma");
-
-		/*
-		 * If hypervisor mode is supported, flush caching
-		 * structures in guest mode too.
-		 */
-		if (misa_extension('H'))
-			__sbi_hfence_gvma_all();
-	}
+	pmp_flush();
 
 	return rc;
 }
-- 
2.49.0




More information about the opensbi mailing list