[PATCH 2/2] arm64: mm: use XN table mapping attributes for the linear region

Ard Biesheuvel ardb at kernel.org
Fri Mar 5 08:13:26 GMT 2021


On Thu, 4 Mar 2021 at 18:39, Mark Rutland <mark.rutland at arm.com> wrote:
>
> On Thu, Mar 04, 2021 at 06:11:45PM +0100, Ard Biesheuvel wrote:
> > The way the arm64 kernel virtual address space is constructed guarantees
> > that swapper PGD entries are never shared between the linear region on
> > the one hand, and the vmalloc region on the other, which is where all
> > kernel text, module text and BPF text mappings reside.
> >
> > This means that mappings in the linear region (which never require
> > executable permissions) never share any table entries at any level with
> > mappings that do require executable permissions, and so we can set the
> > table-level PXN/UXN attributes for all table entries that are created
> > while setting up mappings in the linear region. Since swapper's PGD
> > level page table is mapped r/o itself, this adds another layer of
> > robustness to the way the kernel manages its own page tables.
> >
> > Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
> > ---
> >  arch/arm64/include/asm/pgtable-hwdef.h |  3 +++
> >  arch/arm64/mm/mmu.c                    | 27 +++++++++++++++-----
> >  2 files changed, 23 insertions(+), 7 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
> > index e64e77a345b2..b6f50abd63d7 100644
> > --- a/arch/arm64/include/asm/pgtable-hwdef.h
> > +++ b/arch/arm64/include/asm/pgtable-hwdef.h
> > @@ -101,6 +101,7 @@
> >  #define P4D_TYPE_MASK                (_AT(p4dval_t, 3) << 0)
> >  #define P4D_TYPE_SECT                (_AT(p4dval_t, 1) << 0)
> >  #define P4D_SECT_RDONLY              (_AT(p4dval_t, 1) << 7)         /* AP[2] */
> > +#define P4D_TABLE_XN         (_AT(p4dval_t, 3) << 59)        /* UXN:PXN */
>
> I reckon it's worth having separate P?D_TABLE_UXN and P?D_TABLE_PXN
> definitions, so that it's easier/possible to:
>
> * Set UXNTable for *all* kernel mappings
>
> * Set PXNTable for *all* user mappings
>
> ... though we don't need to do either immediately.
>
> Otherwise, this looks good to me, and with or without that split:
>
> Acked-by: Mark Rutland <mark.rutland at arm.com>
>

Yeah, it's probably better to split them right away.

> >
> >  /*
> >   * Level 1 descriptor (PUD).
> > @@ -110,6 +111,7 @@
> >  #define PUD_TYPE_MASK                (_AT(pudval_t, 3) << 0)
> >  #define PUD_TYPE_SECT                (_AT(pudval_t, 1) << 0)
> >  #define PUD_SECT_RDONLY              (_AT(pudval_t, 1) << 7)         /* AP[2] */
> > +#define PUD_TABLE_XN         (_AT(pudval_t, 3) << 59)        /* UXN:PXN */
> >
> >  /*
> >   * Level 2 descriptor (PMD).
> > @@ -131,6 +133,7 @@
> >  #define PMD_SECT_CONT                (_AT(pmdval_t, 1) << 52)
> >  #define PMD_SECT_PXN         (_AT(pmdval_t, 1) << 53)
> >  #define PMD_SECT_UXN         (_AT(pmdval_t, 1) << 54)
> > +#define PMD_TABLE_XN         (_AT(pmdval_t, 3) << 59)        /* UXN:PXN */
> >
> >  /*
> >   * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
> > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> > index 029091474042..ff5cbca403be 100644
> > --- a/arch/arm64/mm/mmu.c
> > +++ b/arch/arm64/mm/mmu.c
> > @@ -39,6 +39,7 @@
> >
> >  #define NO_BLOCK_MAPPINGS    BIT(0)
> >  #define NO_CONT_MAPPINGS     BIT(1)
> > +#define NO_EXEC_MAPPINGS     BIT(2)
> >
> >  u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
> >  u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
> > @@ -185,10 +186,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
> >
> >       BUG_ON(pmd_sect(pmd));
> >       if (pmd_none(pmd)) {
> > +             pmdval_t pmdval = PMD_TYPE_TABLE;
> >               phys_addr_t pte_phys;
> > +
> > +             if (flags & NO_EXEC_MAPPINGS)
> > +                     pmdval |= PMD_TABLE_XN;
> >               BUG_ON(!pgtable_alloc);
> >               pte_phys = pgtable_alloc(PAGE_SHIFT);
> > -             __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
> > +             __pmd_populate(pmdp, pte_phys, pmdval);
> >               pmd = READ_ONCE(*pmdp);
> >       }
> >       BUG_ON(pmd_bad(pmd));
> > @@ -259,10 +264,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
> >        */
> >       BUG_ON(pud_sect(pud));
> >       if (pud_none(pud)) {
> > +             pudval_t pudval = PUD_TYPE_TABLE;
> >               phys_addr_t pmd_phys;
> > +
> > +             if (flags & NO_EXEC_MAPPINGS)
> > +                     pudval |= PUD_TABLE_XN;
> >               BUG_ON(!pgtable_alloc);
> >               pmd_phys = pgtable_alloc(PMD_SHIFT);
> > -             __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
> > +             __pud_populate(pudp, pmd_phys, pudval);
> >               pud = READ_ONCE(*pudp);
> >       }
> >       BUG_ON(pud_bad(pud));
> > @@ -306,10 +315,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
> >       p4d_t p4d = READ_ONCE(*p4dp);
> >
> >       if (p4d_none(p4d)) {
> > +             p4dval_t p4dval = P4D_TYPE_TABLE;
> >               phys_addr_t pud_phys;
> > +
> > +             if (flags & NO_EXEC_MAPPINGS)
> > +                     p4dval |= P4D_TABLE_XN;
> >               BUG_ON(!pgtable_alloc);
> >               pud_phys = pgtable_alloc(PUD_SHIFT);
> > -             __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
> > +             __p4d_populate(p4dp, pud_phys, p4dval);
> >               p4d = READ_ONCE(*p4dp);
> >       }
> >       BUG_ON(p4d_bad(p4d));
> > @@ -489,11 +502,11 @@ static void __init map_mem(pgd_t *pgdp)
> >       phys_addr_t kernel_start = __pa_symbol(_stext);
> >       phys_addr_t kernel_end = __pa_symbol(__init_begin);
> >       phys_addr_t start, end;
> > -     int flags = 0;
> > +     int flags = NO_EXEC_MAPPINGS;
> >       u64 i;
> >
> >       if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
> > -             flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> > +             flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> >
> >       /*
> >        * Take care not to create a writable alias for the
> > @@ -1462,7 +1475,7 @@ struct range arch_get_mappable_range(void)
> >  int arch_add_memory(int nid, u64 start, u64 size,
> >                   struct mhp_params *params)
> >  {
> > -     int ret, flags = 0;
> > +     int ret, flags = NO_EXEC_MAPPINGS;
> >
> >       VM_BUG_ON(!mhp_range_allowed(start, size, true));
> >
> > @@ -1472,7 +1485,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
> >        */
> >       if (rodata_full || debug_pagealloc_enabled() ||
> >           IS_ENABLED(CONFIG_KFENCE))
> > -             flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> > +             flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
> >
> >       __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
> >                            size, params->pgprot, __pgd_pgtable_alloc,
> > --
> > 2.20.1
> >



More information about the linux-arm-kernel mailing list