[PATCH v4 4/4] arm64: Add batched versions of ptep_modify_prot_start/commit
Ryan Roberts
ryan.roberts at arm.com
Mon Jun 30 03:43:23 PDT 2025
On 28/06/2025 12:34, Dev Jain wrote:
> Override the generic definition of modify_prot_start_ptes() to use
> get_and_clear_full_ptes(). This helper does a TLBI only for the starting
> and ending contpte block of the range, whereas the current implementation
> will call ptep_get_and_clear() for every contpte block, thus doing a
> TLBI on every contpte block. Therefore, we have a performance win.
>
> The arm64 definition of pte_accessible() allows us to batch in the
> errata specific case:
>
> #define pte_accessible(mm, pte) \
> (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
>
> All ptes are obviously present in the folio batch, and they are also valid.
>
> Override the generic definition of modify_prot_commit_ptes() to simply
> use set_ptes() to map the new ptes into the pagetable.
>
> Signed-off-by: Dev Jain <dev.jain at arm.com>
Reviewed-by: Ryan Roberts <ryan.roberts at arm.com>
> ---
> arch/arm64/include/asm/pgtable.h | 10 ++++++++++
> arch/arm64/mm/mmu.c | 28 +++++++++++++++++++++++-----
> 2 files changed, 33 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index ba63c8736666..abd2dee416b3 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -1643,6 +1643,16 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
> unsigned long addr, pte_t *ptep,
> pte_t old_pte, pte_t new_pte);
>
> +#define modify_prot_start_ptes modify_prot_start_ptes
> +extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
> + unsigned long addr, pte_t *ptep,
> + unsigned int nr);
> +
> +#define modify_prot_commit_ptes modify_prot_commit_ptes
> +extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
> + pte_t *ptep, pte_t old_pte, pte_t pte,
> + unsigned int nr);
> +
> #ifdef CONFIG_ARM64_CONTPTE
>
> /*
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 3d5fb37424ab..38325616f467 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -26,6 +26,7 @@
> #include <linux/set_memory.h>
> #include <linux/kfence.h>
> #include <linux/pkeys.h>
> +#include <linux/mm_inline.h>
>
> #include <asm/barrier.h>
> #include <asm/cputype.h>
> @@ -1524,24 +1525,41 @@ static int __init prevent_bootmem_remove_init(void)
> early_initcall(prevent_bootmem_remove_init);
> #endif
>
> -pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
> +pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr,
> + pte_t *ptep, unsigned int nr)
> {
> + pte_t pte = get_and_clear_full_ptes(vma->vm_mm, addr, ptep, nr, 0);
> +
> if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
> /*
> * Break-before-make (BBM) is required for all user space mappings
> * when the permission changes from executable to non-executable
> * in cases where cpu is affected with errata #2645198.
> */
> - if (pte_user_exec(ptep_get(ptep)))
> - return ptep_clear_flush(vma, addr, ptep);
> + if (pte_accessible(vma->vm_mm, pte) && pte_user_exec(pte))
> + __flush_tlb_range(vma, addr, nr * PAGE_SIZE,
> + PAGE_SIZE, true, 3);
> }
> - return ptep_get_and_clear(vma->vm_mm, addr, ptep);
> +
> + return pte;
> +}
> +
> +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
> +{
> + return modify_prot_start_ptes(vma, addr, ptep, 1);
> +}
> +
> +void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
> + pte_t *ptep, pte_t old_pte, pte_t pte,
> + unsigned int nr)
> +{
> + set_ptes(vma->vm_mm, addr, ptep, pte, nr);
> }
>
> void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
> pte_t old_pte, pte_t pte)
> {
> - set_pte_at(vma->vm_mm, addr, ptep, pte);
> + modify_prot_commit_ptes(vma, addr, ptep, old_pte, pte, 1);
> }
>
> /*
More information about the linux-arm-kernel
mailing list