[PATCH 2/2] arm64: mm: extend linear region for 52-bit VA configurations
Ard Biesheuvel
ardb at kernel.org
Wed Oct 7 14:00:36 EDT 2020
On Wed, 7 Oct 2020 at 00:50, Ard Biesheuvel <ardb at kernel.org> wrote:
>
> For historical reasons, the arm64 kernel VA space is configured as two
> equally sized halves, i.e., on a 48-bit VA build, the VA space is split
> into a 47-bit vmalloc region and a 47-bit linear region.
>
> When support for 52-bit virtual addressing was added, this equal split
> was kept, resulting in a substantial waste of virtual address space in
> the linear region:
>
> 48-bit VA 52-bit VA
> 0xffff_ffff_ffff_ffff +-------------+ +-------------+
> | vmalloc | | vmalloc |
> 0xffff_8000_0000_0000 +-------------+ _PAGE_END(48) +-------------+
> | linear | : :
> 0xffff_0000_0000_0000 +-------------+ : :
> : : : :
> : : : :
> : : : :
> : : : currently :
> : unusable : : :
> : : : unused :
> : by : : :
> : : : :
> : hardware : : :
> : : : :
> 0xfff8_0000_0000_0000 : : _PAGE_END(52) +-------------+
> : : | |
> : : | |
> : : | |
> : : | |
> : : | |
> : unusable : | |
> : : | linear |
> : by : | |
> : : | region |
> : hardware : | |
> : : | |
> : : | |
> : : | |
> : : | |
> : : | |
> : : | |
> 0xfff0_0000_0000_0000 +-------------+ PAGE_OFFSET +-------------+
>
> As illustrated above, the 52-bit VA kernel uses 47 bits for the vmalloc
> space (as before), to ensure that a single 64k granule kernel image can
> support any 64k granule capable system, regardless of whether it supports
> the 52-bit virtual addressing extension. However, due to the fact that
> the VA space is still split in equal halves, the linear region is only
> 2^51 bytes in size, wasting almost half of the 52-bit VA space.
>
> Let's fix this, by abandoning the equal split, and simply assigning all
> VA space outside of the vmalloc region to the linear region.
>
> The KASAN shadow region is reconfigured so that it ends at the start of
> the vmalloc region, and grows downwards. That way, the arrangement of
> the vmalloc space (which contains kernel mappings, modules, BPF region,
> the vmemmap array etc) is identical between non-KASAN and KASAN builds,
> which aids debugging.
>
> Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
> ---
> Documentation/arm64/kasan-offsets.sh | 3 +--
> arch/arm64/Kconfig | 20 ++++++++++----------
> arch/arm64/include/asm/memory.h | 8 ++++----
> arch/arm64/mm/init.c | 2 +-
> 4 files changed, 16 insertions(+), 17 deletions(-)
>
> diff --git a/Documentation/arm64/kasan-offsets.sh b/Documentation/arm64/kasan-offsets.sh
> index 2b7a021db363..2dc5f9e18039 100644
> --- a/Documentation/arm64/kasan-offsets.sh
> +++ b/Documentation/arm64/kasan-offsets.sh
> @@ -1,12 +1,11 @@
> #!/bin/sh
>
> # Print out the KASAN_SHADOW_OFFSETS required to place the KASAN SHADOW
> -# start address at the mid-point of the kernel VA space
> +# start address at the top of the linear region
>
> print_kasan_offset () {
> printf "%02d\t" $1
> printf "0x%08x00000000\n" $(( (0xffffffff & (-1 << ($1 - 1 - 32))) \
> - + (1 << ($1 - 32 - $2)) \
> - (1 << (64 - 32 - $2)) ))
> }
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 6d232837cbee..896a46a71d23 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -321,16 +321,16 @@ config BROKEN_GAS_INST
> config KASAN_SHADOW_OFFSET
> hex
> depends on KASAN
> - default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
> - default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
> - default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
> - default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
> - default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
> - default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
> - default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
> - default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
> - default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
> - default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
> + default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
> + default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
> + default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
> + default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
> + default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
> + default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
> + default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
> + default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
> + default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
> + default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
> default 0xffffffffffffffff
>
> source "arch/arm64/Kconfig.platforms"
> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> index 1ded73189874..a9bb750b3dac 100644
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -44,7 +44,7 @@
> #define _PAGE_OFFSET(va) (-(UL(1) << (va)))
> #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
> #define KIMAGE_VADDR (MODULES_END)
> -#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
> +#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN))
> #define BPF_JIT_REGION_SIZE (SZ_128M)
> #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
> #define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
> @@ -76,10 +76,11 @@
> #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
> #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
> + KASAN_SHADOW_OFFSET)
> +#define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT)))
> #define KASAN_THREAD_SHIFT 1
> #else
> #define KASAN_THREAD_SHIFT 0
> -#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
> +#define PAGE_END (_PAGE_END(VA_BITS_MIN))
> #endif /* CONFIG_KASAN */
>
> #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
> @@ -162,7 +163,6 @@
> #include <asm/bug.h>
>
> extern u64 vabits_actual;
> -#define PAGE_END (_PAGE_END(vabits_actual))
>
> extern s64 memstart_addr;
> /* PHYS_OFFSET - the physical address of the start of memory. */
> @@ -237,7 +237,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
> * space. Testing the top bit for the start of the region is a
> * sufficient check and avoids having to worry about the tag.
> */
> -#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
> +#define __is_lm_address(addr) ((untagged_addr(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
This shouldn't be using untagged_addr(), but just a (u64) cast for addr.
>
> #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
> #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 324f0e0894f6..9090779dd3cd 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -270,7 +270,7 @@ static void __init fdt_enforce_memory_region(void)
>
> void __init arm64_memblock_init(void)
> {
> - const s64 linear_region_size = BIT(vabits_actual - 1);
> + const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
>
> /* Handle linux,usable-memory-range property */
> fdt_enforce_memory_region();
> --
> 2.17.1
>
More information about the linux-arm-kernel
mailing list