[PATCH 2/3] arm64: pageattr: Use walk_page_range_novma() to change memory permissions
Lorenzo Stoakes
lorenzo.stoakes at oracle.com
Fri Jun 6 02:49:22 PDT 2025
On Fri, May 30, 2025 at 02:34:06PM +0530, Dev Jain wrote:
> Move away from apply_to_page_range(), which does not honour leaf mappings,
> to walk_page_range_novma(). The callbacks emit a warning and return EINVAL
> if a partial range is detected.
Hm a follow up question here - why not just improve apply_to_page_range() to
honour leaf mappings?
What does honouring leaf mappings actually mean? You mean handling huge pages?
Would it be all that difficult to implement?
It seems like you're pushing a bunch of the 'applying' logic over from there to
a walker that isn't maybe best suited to it and having to introduce an iffy new
form of locking...
Can we go vice-versa? :)
Also obviously walk_page_range_novma() doesn't exist any more :P
walk_kernel_page_table_range() is the preferred solution.
>
> Signed-off-by: Dev Jain <dev.jain at arm.com>
> ---
> arch/arm64/mm/pageattr.c | 69 +++++++++++++++++++++++++++++++++++++---
> 1 file changed, 64 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 39fd1f7ff02a..a5c829c64969 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -8,6 +8,7 @@
> #include <linux/mem_encrypt.h>
> #include <linux/sched.h>
> #include <linux/vmalloc.h>
> +#include <linux/pagewalk.h>
>
> #include <asm/cacheflush.h>
> #include <asm/pgtable-prot.h>
> @@ -20,6 +21,67 @@ struct page_change_data {
> pgprot_t clear_mask;
> };
>
> +static pteval_t set_pageattr_masks(unsigned long val, struct mm_walk *walk)
> +{
> + struct page_change_data *masks = walk->private;
> + unsigned long new_val = val;
> +
> + new_val &= ~(pgprot_val(masks->clear_mask));
> + new_val |= (pgprot_val(masks->set_mask));
> +
> + return new_val;
> +}
> +
> +static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
> + unsigned long next, struct mm_walk *walk)
> +{
> + pud_t val = pudp_get(pud);
> +
> + if (pud_leaf(val)) {
> + if (WARN_ON_ONCE((next - addr) != PUD_SIZE))
> + return -EINVAL;
> + val = __pud(set_pageattr_masks(pud_val(val), walk));
> + set_pud(pud, val);
> + walk->action = ACTION_CONTINUE;
> + }
> +
> + return 0;
> +}
> +
> +static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
> + unsigned long next, struct mm_walk *walk)
> +{
> + pmd_t val = pmdp_get(pmd);
> +
> + if (pmd_leaf(val)) {
> + if (WARN_ON_ONCE((next - addr) != PMD_SIZE))
> + return -EINVAL;
> + val = __pmd(set_pageattr_masks(pmd_val(val), walk));
> + set_pmd(pmd, val);
> + walk->action = ACTION_CONTINUE;
> + }
> +
> + return 0;
> +}
> +
> +static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
> + unsigned long next, struct mm_walk *walk)
> +{
> + pte_t val = ptep_get(pte);
> +
> + val = __pte(set_pageattr_masks(pte_val(val), walk));
> + set_pte(pte, val);
> +
> + return 0;
> +}
> +
> +static const struct mm_walk_ops pageattr_ops = {
> + .pud_entry = pageattr_pud_entry,
> + .pmd_entry = pageattr_pmd_entry,
> + .pte_entry = pageattr_pte_entry,
> + .walk_lock = PGWALK_NOLOCK,
> +};
> +
> bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
>
> bool can_set_direct_map(void)
> @@ -49,9 +111,6 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> return 0;
> }
>
> -/*
> - * This function assumes that the range is mapped with PAGE_SIZE pages.
> - */
> static int __change_memory_common(unsigned long start, unsigned long size,
> pgprot_t set_mask, pgprot_t clear_mask)
> {
> @@ -61,8 +120,8 @@ static int __change_memory_common(unsigned long start, unsigned long size,
> data.set_mask = set_mask;
> data.clear_mask = clear_mask;
>
> - ret = apply_to_page_range(&init_mm, start, size, change_page_range,
> - &data);
> + ret = walk_page_range_novma(&init_mm, start, start + size,
> + &pageattr_ops, NULL, &data);
>
> /*
> * If the memory is being made valid without changing any other bits
> --
> 2.30.2
>
More information about the linux-arm-kernel
mailing list