[PATCH v8 36/43] arm64: mm: Add support for folding PUDs at runtime
Ryan Roberts
ryan.roberts at arm.com
Mon Sep 30 07:36:23 PDT 2024
Hi Ard,
On 14/02/2024 12:29, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb at kernel.org>
>
> In order to support LPA2 on 16k pages in a way that permits non-LPA2
> systems to run the same kernel image, we have to be able to fall back to
> at most 48 bits of virtual addressing.
>
> Falling back to 48 bits would result in a level 0 with only 2 entries,
> which is suboptimal in terms of TLB utilization. So instead, let's fall
> back to 47 bits in that case. This means we need to be able to fold PUDs
> dynamically, similar to how we fold P4Ds for 48 bit virtual addressing
> on LPA2 with 4k pages.
>
> Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
[...]
>
> +#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
> +
> +static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr)
> +{
> + return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr);
> +}
> +
I wonder if you could explain what this function (and its equivalents at other
levels) is doing? Why isn't it just returning p4dp cast to a (pud_t *)?
I'm working on a prototype for boot-time page size selection. For this, I'm
compile-time enabling all levels, then run-time folding the ones I don't need,
based on the selected page size and VA size.
I'm trying to reuse your run-time folding code, but I have a case where this
function is broken as written. Replacing with "return (pud_t *)p4dp;" resolves
the problem; If VA_BITS=48 and pagesize=64K, the pgd has 64 entries. p4dp is
pointing to the correct entry in the pgd already, but this code aligns back to
the start of the page, then adds pud_index(), which is wrong because
PTRS_PER_PUD != PTRS_PER_PGDIR. (In my case, these 2 macros are actually
boot-time selected values rather than compile-time constants).
I think your code is probably correct and working around PTRS_PER_PXD being
compile-time constants for the non-folded case, but I can't quite convince myself.
Thanks,
Ryan
> static inline pud_t *p4d_pgtable(p4d_t p4d)
> {
> return (pud_t *)__va(p4d_page_paddr(p4d));
> }
>
> -/* Find an entry in the first-level page table. */
> -#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
> +static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr)
> +{
> + BUG_ON(!pgtable_l4_enabled());
>
> -#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
> -#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
> -#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
> + return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t);
> +}
>
> -#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
> +static inline
> +pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr)
> +{
> + if (!pgtable_l4_enabled())
> + return p4d_to_folded_pud(p4dp, addr);
> + return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr);
> +}
> +#define pud_offset_lockless pud_offset_lockless
> +
> +static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr)
> +{
> + return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr);
> +}
> +#define pud_offset pud_offset
> +
> +static inline pud_t *pud_set_fixmap(unsigned long addr)
> +{
> + if (!pgtable_l4_enabled())
> + return NULL;
> + return (pud_t *)set_fixmap_offset(FIX_PUD, addr);
> +}
> +
> +static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr)
> +{
> + if (!pgtable_l4_enabled())
> + return p4d_to_folded_pud(p4dp, addr);
> + return pud_set_fixmap(pud_offset_phys(p4dp, addr));
> +}
> +
> +static inline void pud_clear_fixmap(void)
> +{
> + if (pgtable_l4_enabled())
> + clear_fixmap(FIX_PUD);
> +}
>
> /* use ONLY for statically allocated translation tables */
> -#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
> +static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr)
> +{
> + if (!pgtable_l4_enabled())
> + return p4d_to_folded_pud(p4dp, addr);
> + return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr));
> +}
> +
> +#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
>
> #else
>
> +static inline bool pgtable_l4_enabled(void) { return false; }
> +
> #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
>
> /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
> diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
> index 0150deb332af..a947c6e784ed 100644
> --- a/arch/arm64/include/asm/tlb.h
> +++ b/arch/arm64/include/asm/tlb.h
> @@ -103,6 +103,9 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
> {
> struct ptdesc *ptdesc = virt_to_ptdesc(pudp);
>
> + if (!pgtable_l4_enabled())
> + return;
> +
> pagetable_pud_dtor(ptdesc);
> tlb_remove_ptdesc(tlb, ptdesc);
> }
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index bc5e4e569864..94f035f6c421 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -1767,6 +1767,8 @@ static int __init __kpti_install_ng_mappings(void *__unused)
>
> if (levels == 5 && !pgtable_l5_enabled())
> levels = 4;
> + else if (levels == 4 && !pgtable_l4_enabled())
> + levels = 3;
>
> remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 8e5b3a7c5afd..b131ed31a6c8 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -1065,7 +1065,7 @@ static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
> free_empty_pmd_table(pudp, addr, next, floor, ceiling);
> } while (addr = next, addr < end);
>
> - if (CONFIG_PGTABLE_LEVELS <= 3)
> + if (!pgtable_l4_enabled())
> return;
>
> if (!pgtable_range_aligned(start, end, floor, ceiling, P4D_MASK))
> diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
> index 3c4f8a279d2b..0c501cabc238 100644
> --- a/arch/arm64/mm/pgd.c
> +++ b/arch/arm64/mm/pgd.c
> @@ -21,6 +21,8 @@ static bool pgdir_is_page_size(void)
> {
> if (PGD_SIZE == PAGE_SIZE)
> return true;
> + if (CONFIG_PGTABLE_LEVELS == 4)
> + return !pgtable_l4_enabled();
> if (CONFIG_PGTABLE_LEVELS == 5)
> return !pgtable_l5_enabled();
> return false;
More information about the linux-arm-kernel
mailing list