[PATCH 7/7] riscv: add Svnapot-aware pte_batch_hint support
Yunhui Cui
cuiyunhui at bytedance.com
Tue Apr 21 02:24:57 PDT 2026
Provide a Svnapot-specific pte_batch_hint() implementation so callers can
batch over a contiguous napot range without re-reading each PTE entry.
Keep the public wrapper in pgtable.h and leave the CONFIG-disabled case on
the existing single-entry fallback.
Signed-off-by: Yunhui Cui <cuiyunhui at bytedance.com>
---
arch/riscv/include/asm/pgtable.h | 19 +++++++++++++++++-
arch/riscv/mm/contpte.c | 33 ++++++++++++++++++++++++++++++++
2 files changed, 51 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index db82253efb218..264af77392c6e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -872,6 +872,13 @@ static inline bool __ptep_clear_flush_young(struct vm_area_struct *vma,
#define __ptep_clear_flush_young __ptep_clear_flush_young
+static inline unsigned int __pte_batch_hint(pte_t *ptep, pte_t pte)
+{
+ return 1;
+}
+
+#define __pte_batch_hint __pte_batch_hint
+
#ifdef CONFIG_RISCV_ISA_SVNAPOT
/*
@@ -886,6 +893,7 @@ void __napotpte_try_unfold(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t napotpte_ptep_get(pte_t *ptep, pte_t orig_pte);
pte_t napotpte_ptep_get_lockless(pte_t *ptep);
+unsigned int napotpte_pte_batch_hint(pte_t *ptep);
void napotpte_set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr);
void napotpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
@@ -1056,6 +1064,15 @@ static inline bool ptep_clear_flush_young(struct vm_area_struct *vma,
return napotpte_ptep_clear_flush_young(vma, address, ptep);
}
+#define pte_batch_hint pte_batch_hint
+static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
+{
+ if (!pte_present(pte))
+ return 1;
+
+ return napotpte_pte_batch_hint(ptep);
+}
+
#else /* CONFIG_RISCV_ISA_SVNAPOT */
static __always_inline bool riscv_pte_present_napot(pte_t pte)
@@ -1100,9 +1117,9 @@ napotpte_ptep_clear_flush_young(struct vm_area_struct *vma,
#define ptep_set_wrprotect __ptep_set_wrprotect
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young __ptep_clear_flush_young
+#define pte_batch_hint __pte_batch_hint
#endif /* CONFIG_RISCV_ISA_SVNAPOT */
-
#define pgprot_nx pgprot_nx
static inline pgprot_t pgprot_nx(pgprot_t _prot)
{
diff --git a/arch/riscv/mm/contpte.c b/arch/riscv/mm/contpte.c
index 077ffa49e89d9..134b8c401cabc 100644
--- a/arch/riscv/mm/contpte.c
+++ b/arch/riscv/mm/contpte.c
@@ -187,6 +187,12 @@ static inline bool napotpte_is_consistent(pte_t pte, pte_t orig_pte)
pte_val(pte_mask_ad(pte)) == pte_val(pte_mask_ad(orig_pte));
}
+static inline bool napotpte_is_batch_consistent(pte_t pte, pte_t orig_pte)
+{
+ return pte_present_napot(pte) &&
+ pte_val(pte_mkold(pte)) == pte_val(pte_mkold(orig_pte));
+}
+
void __napotpte_try_fold(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
@@ -391,6 +397,33 @@ pte_t napotpte_ptep_get_lockless(pte_t *orig_ptep)
}
EXPORT_SYMBOL(napotpte_ptep_get_lockless);
+unsigned int napotpte_pte_batch_hint(pte_t *ptep)
+{
+ pte_t orig_pte, pte;
+ pte_t *start;
+ unsigned int i, nr, off;
+
+ if (!napot_hw_supported())
+ return 1;
+
+ orig_pte = READ_ONCE(*ptep);
+ if (!pte_present_napot(orig_pte))
+ return 1;
+
+ start = napot_align_ptep(ptep);
+ nr = napotpte_pte_num();
+ off = ptep - start;
+
+ for (i = off; i < nr; i++) {
+ pte = READ_ONCE(start[i]);
+ if (!napotpte_is_batch_consistent(pte, orig_pte))
+ return 1;
+ }
+
+ return nr - off;
+}
+EXPORT_SYMBOL(napotpte_pte_batch_hint);
+
void napotpte_set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
--
2.39.5
More information about the linux-riscv
mailing list