[RFC PATCH V1 03/11] riscv: Adapt pte struct to gap between hw page and sw page
Xu Lu
luxu.kernel at bytedance.com
Wed Nov 22 22:57:00 PST 2023
The pte_t struct maps a virtual page to a physical page, both of which
are the concept of software page in view of kernel mm. In contrary, per
page table entry refers to a single hardware page. When sw page is
larger than hw page, the existing pte_t struct with only one page table
entry can not represent a software page anymore.
This commit extends pte_t struct to contain an array of page table
entries. The pte_t struct now maps a software page to an exact number of
hardware pages, the total size of which still matches sw page size.
Signed-off-by: Xu Lu <luxu.kernel at bytedance.com>
---
arch/riscv/include/asm/page.h | 7 ++-
arch/riscv/include/asm/pgtable-64.h | 3 +-
arch/riscv/include/asm/pgtable.h | 91 ++++++++++++++++++++++++++---
arch/riscv/kernel/efi.c | 2 +-
arch/riscv/mm/hugetlbpage.c | 2 +-
arch/riscv/mm/pageattr.c | 2 +-
6 files changed, 92 insertions(+), 15 deletions(-)
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index a8c59d80683c..cbaa7e027f9a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -68,9 +68,11 @@ typedef struct {
unsigned long pgd;
} pgd_t;
+#define PTES_PER_PAGE (1 << (PAGE_SHIFT - HW_PAGE_SHIFT))
+
/* Page Table entry */
typedef struct {
- unsigned long pte;
+ unsigned long ptes[PTES_PER_PAGE];
} pte_t;
typedef struct {
@@ -79,11 +81,10 @@ typedef struct {
typedef struct page *pgtable_t;
-#define pte_val(x) ((x).pte)
+#define pte_val(x) ((x).ptes[0])
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
-#define __pte(x) ((pte_t) { (x) })
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 9a2c780a11e9..c08db54594a9 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -99,7 +99,8 @@ enum napot_cont_order {
#define for_each_napot_order_rev(order) \
for (order = NAPOT_ORDER_MAX - 1; \
order >= NAPOT_CONT_ORDER_BASE; order--)
-#define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1))
+#define napot_cont_order(val) \
+ (__builtin_ctzl((pte_val(val) >> _PAGE_PFN_SHIFT) << 1))
#define napot_cont_shift(order) ((order) + PAGE_SHIFT)
#define napot_cont_size(order) BIT(napot_cont_shift(order))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 294044429e8e..342be2112fd2 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -212,6 +212,31 @@ extern pgd_t swapper_pg_dir[];
extern pgd_t trampoline_pg_dir[];
extern pgd_t early_pg_dir[];
+static __always_inline int __pte_present(unsigned long pteval)
+{
+ return (pteval & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+
+static __always_inline unsigned long __pte_napot(unsigned long pteval)
+{
+ return pteval & _PAGE_NAPOT;
+}
+
+static inline pte_t __pte(unsigned long pteval)
+{
+ pte_t pte;
+ unsigned int i;
+
+ for (i = 0; i < PTES_PER_PAGE; i++) {
+ pte.ptes[i] = pteval;
+ if (__pte_present(pteval) && !__pte_napot(pteval))
+ pteval += 1 << _PAGE_PFN_SHIFT;
+ }
+
+ return pte;
+}
+#define __pte __pte
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd)
{
@@ -300,7 +325,7 @@ static __always_inline bool has_svnapot(void)
static inline unsigned long pte_napot(pte_t pte)
{
- return pte_val(pte) & _PAGE_NAPOT;
+ return __pte_napot(pte_val(pte));
}
static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
@@ -350,7 +375,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
static inline int pte_present(pte_t pte)
{
- return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+ return __pte_present(pte_val(pte));
}
static inline int pte_none(pte_t pte)
@@ -439,6 +464,36 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ unsigned int i;
+ pte_t pte = *ptep;
+
+ for (i = 0; i < PTES_PER_PAGE; i++) {
+ if (pte.ptes[i] & _PAGE_DIRTY) {
+ pte = pte_mkdirty(pte);
+ break;
+ }
+ }
+ for (i = 0; i < PTES_PER_PAGE; i++) {
+ if (pte.ptes[i] & _PAGE_ACCESSED) {
+ pte = pte_mkyoung(pte);
+ break;
+ }
+ }
+
+ return pte;
+}
+#define ptep_get ptep_get
+
+static inline pte_t ptep_get_lockless(pte_t *ptep)
+{
+ unsigned long pteval = READ_ONCE(ptep->ptes[0]);
+
+ return __pte(pteval);
+}
+#define ptep_get_lockless ptep_get_lockless
+
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/asm-generic/pgtable.h
@@ -526,6 +581,8 @@ static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval, unsigned int nr)
{
+ unsigned int i;
+
page_table_check_ptes_set(mm, ptep, pteval, nr);
for (;;) {
@@ -533,7 +590,10 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
if (--nr == 0)
break;
ptep++;
- pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
+ if (pte_present(pteval) && !pte_napot(pteval)) {
+ for (i = 0; i < PTES_PER_PAGE; i++)
+ pteval.ptes[i] += PTES_PER_PAGE << _PAGE_PFN_SHIFT;
+ }
}
}
#define set_ptes set_ptes
@@ -562,7 +622,11 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+ pte_t pte;
+ unsigned int i;
+
+ for (i = 0; i < PTES_PER_PAGE; i++)
+ pte.ptes[i] = atomic_long_xchg((atomic_long_t *)(&ptep->ptes[i]), 0);
page_table_check_pte_clear(mm, pte);
@@ -574,16 +638,27 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
- if (!pte_young(*ptep))
+ int ret = 0;
+ unsigned int i;
+
+ if (!pte_young(ptep_get(ptep)))
return 0;
- return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+
+ for (i = 0; i < PTES_PER_PAGE; i++)
+ ret |= test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &ptep->ptes[i]);
+
+ return ret;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
+ unsigned int i;
+
+ for (i = 0; i < PTES_PER_PAGE; i++)
+ atomic_long_and(~(unsigned long)_PAGE_WRITE,
+ (atomic_long_t *)(&ptep->ptes[i]));
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
@@ -829,7 +904,7 @@ extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+#define __swp_entry_to_pte(x) __pte((x).val)
static inline int pte_swp_exclusive(pte_t pte)
{
diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
index aa6209a74c83..b64bf1624a05 100644
--- a/arch/riscv/kernel/efi.c
+++ b/arch/riscv/kernel/efi.c
@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
unsigned long val;
if (md->attribute & EFI_MEMORY_RO) {
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index d7cf8e2d3c5b..67fd71c36853 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -293,7 +293,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
int i, pte_num;
if (!pte_napot(pte)) {
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index fc5fc4f785c4..b8e30df2e7df 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pte_t val = READ_ONCE(*pte);
+ pte_t val = ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
--
2.20.1
More information about the linux-riscv
mailing list