[PATCH 6/6] arm64: module: rework module VA range selection
Ard Biesheuvel
ardb at kernel.org
Tue May 9 04:40:12 PDT 2023
Hi Mark,
Thanks for cleaning this up.
On Tue, 9 May 2023 at 13:15, Mark Rutland <mark.rutland at arm.com> wrote:
>
> Currently, the modules region is 128M in size, which is a problem for
> some large modules. Shanker reports [1] that the NVIDIA GPU driver alone
> can consume 110M of module space in some configurations. We'd like to
> make the modules region a full 2G such that we can always make use of a
> 2G range.
>
> It's possible to build kernel images which are larger than 128M in some
> configurations, such as when many debug options are selected and many
> drivers are built in. In these configurations, we can't legitimately
> select a base for a 128M module region, though we currently select a
> value for which allocation will fail. It would be nicer to have a
> diagnostic message in this case.
>
> Similarly, in theory it's possible to build a kernel image which is
> larger than 2G and which cannot support modules. While this isn't likely
> to be the case for any realistic kernel deplyed in the field, it would
> be nice if we could print a diagnostic in this case.
>
> This patch reworks the module VA renage selection to use a 2G range, and
> improves handling of cases where we cannot select legitimate module
> regions. We now attempt to select a 128M region and a 2G region:
>
> * The 128M region is selected such that modules can use direct branches
> (with JUMP26/CALL26 relocations) to branch to kernel code and other
> modules, and so that modules can use direct data references (with
> PREL32 relocations) to access data in the kernel image and other
> modules.
>
> This region covers the entire kernel image (rather than just the text)
> to ensure that all PREL32 relocations are in range even the kernel
> data section is absurdly large. Where we cannot allocate from this
> region, we'll fall back to the full 2G region.
>
> * The 2G region is selected such that modules can use direct branches
> with PLTS to branch to kernel code and other modules, and so that
> modules can use direct data references (with PREL32 relocations) to
> access data in the kernel image and other modules.
>
> This region covers the entire kernel image, and the 128M region (if
> one is selected).
>
> The two module regions are randomized independently while ensuring the
> constraints described above.
>
> [1] https://lore.kernel.org/linux-arm-kernel/159ceeab-09af-3174-5058-445bc8dcf85b@nvidia.com/
>
> Signed-off-by: Mark Rutland <mark.rutland at arm.com>
> Cc: Ard Biesheuvel <ardb at kernel.org>
> Cc: Catalin Marinas <catalin.marinas at arm.com>
> Cc: Shanker Donthineni <sdonthineni at nvidia.com>
> Cc: Will Deacon <will at kernel.org>
> ---
> Documentation/arm64/memory.rst | 8 +-
> arch/arm64/include/asm/memory.h | 2 +-
> arch/arm64/kernel/module.c | 143 ++++++++++++++++++++++----------
> 3 files changed, 104 insertions(+), 49 deletions(-)
>
> diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst
> index 2a641ba7be3b7..55a55f30eed8a 100644
> --- a/Documentation/arm64/memory.rst
> +++ b/Documentation/arm64/memory.rst
> @@ -33,8 +33,8 @@ AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
> 0000000000000000 0000ffffffffffff 256TB user
> ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map
> [ffff600000000000 ffff7fffffffffff] 32TB [kasan shadow region]
> - ffff800000000000 ffff800007ffffff 128MB modules
> - ffff800008000000 fffffbffefffffff 124TB vmalloc
> + ffff800000000000 ffff80007fffffff 2GB modules
> + ffff800080000000 fffffbffefffffff 124TB vmalloc
> fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
> fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
> fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
> @@ -50,8 +50,8 @@ AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):
> 0000000000000000 000fffffffffffff 4PB user
> fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map
> [fffd800000000000 ffff7fffffffffff] 512TB [kasan shadow region]
> - ffff800000000000 ffff800007ffffff 128MB modules
> - ffff800008000000 fffffbffefffffff 124TB vmalloc
> + ffff800000000000 ffff80007fffffff 2GB modules
> + ffff800080000000 fffffbffefffffff 124TB vmalloc
> fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
> fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
> fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> index 215efc3bbbcf9..6e0e5722f229e 100644
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -46,7 +46,7 @@
> #define KIMAGE_VADDR (MODULES_END)
> #define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
> #define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
> -#define MODULES_VSIZE (SZ_128M)
> +#define MODULES_VSIZE (SZ_2G)
> #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
> #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
> #define PCI_IO_END (VMEMMAP_START - SZ_8M)
> diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> index f64636c2fd054..c00511fa3b686 100644
> --- a/arch/arm64/kernel/module.c
> +++ b/arch/arm64/kernel/module.c
> @@ -24,72 +24,127 @@
> #include <asm/scs.h>
> #include <asm/sections.h>
>
> -static u64 __ro_after_init module_alloc_base = (u64)_etext - MODULES_VSIZE;
> +static u64 module_direct_base __ro_after_init = 0;
> +static u64 module_plt_base __ro_after_init = 0;
>
> -#ifdef CONFIG_RANDOMIZE_BASE
> -static int __init kaslr_module_init(void)
> +/*
> + * Choose a random page-aligned base address for a window of 'size' bytes which
> + * entirely contains the interval [start, end - 1].
> + */
> +static u64 __init choose_random_bounding_box(u64 size, u64 start, u64 end)
> {
> - u64 module_range;
> - u32 seed;
> + u64 max_pgoff, pgoff;
>
> - if (!kaslr_enabled())
> + if (WARN_ON((end - start) >= size))
> return 0;
>
> - seed = get_random_u32();
> + max_pgoff = (size - (end - start)) / PAGE_SIZE;
> + pgoff = get_random_u32_inclusive(0, max_pgoff);
> +
> + return start - pgoff * PAGE_SIZE;
> +}
> +
> +/*
> + * Modules may directly reference data anywhere within the kernel image and
> + * other modules. These data references will use PREL32 relocations with a
> + * +/-2G range, and so we need to ensure that the entire kernel image and all
> + * modules fall within a 2G window such that these are always within range.
> + *
'Data references' is slightly inaccurate here - data references from
code use ADRP/LDR with a -/+ 4G range, whereas the PREL32 references
in question are references *from* data to both data and code symbols.
The conclusion is the same of course, PREL32 having the smaller range
and needing to cover the entire kernel image, including code symbols
living in .text
> + * Modules may directly branch to functions and code within the kernel text,
> + * and to functions and code within other modules. These branches will use
> + * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
> + * that the entire kernel text and all module text falls within a 128M window
> + * such that these are always within range. With PLTs, we can expand this to a
> + * 2G window.
> + *
> + * We chose the 128M region to surround the entire kernel image (rather than
> + * just the text) as using the same bounds for the 128M and 2G regions ensures
> + * by construction that we never select a 128M region that is not a subset of
> + * the 2G region. For very large and unusual kernel configurations this means
> + * we may fall back to PLTs where they could have been avoided, but this keeps
> + * the logic significantly simpler.
> + */
> +static int __init module_init_limits(void)
> +{
> + u64 kernel_end = (u64)_end;
> + u64 kernel_start = (u64)_text;
> + u64 kernel_size = kernel_end - kernel_start;
> +
> + /*
> + * The default modules region is placed immediately below the kernel
> + * image, and is large enough to use the full 2G relocation range.
> + */
> + BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
> + BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
> +
> + if (kernel_size >= SZ_2G) {
> + pr_warn("Kernel is too large to support modules (%llu bytes)\n",
> + kernel_size);
> + return 0;
> + }
>
> if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
> - /*
> - * Randomize the module region over a 2 GB window covering the
> - * kernel. This reduces the risk of modules leaking information
> - * about the address of the kernel itself, but results in
> - * branches between modules and the core kernel that are
> - * resolved via PLTs. (Branches between modules will be
> - * resolved normally.)
> - */
> - module_range = SZ_2G - (u64)(_end - _stext);
> - module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
> + pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
> + } else if (kernel_size >= SZ_128M) {
I suppose this bound is somewhat arbitrary? I mean, if kernel_size
were SZ_128M-SZ_4K, we'd have the exact same problem, and end up using
the 2G region all the same, just with a different diagnostic message?
> + pr_info("2G module region forced by kernel size (%llu bytes)\n",
> + kernel_size);
> + } else if (IS_ENABLED(CONFIG_RANOMIZE_BASE)) {
Typo here ^^^
> + module_direct_base = choose_random_bounding_box(SZ_128M,
> + kernel_start,
> + kernel_end);
> } else {
> - /*
> - * Randomize the module region by setting module_alloc_base to
> - * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
> - * _stext) . This guarantees that the resulting region still
> - * covers [_stext, _etext], and that all relative branches can
> - * be resolved without veneers unless this region is exhausted
> - * and we fall back to a larger 2GB window in module_alloc()
> - * when ARM64_MODULE_PLTS is enabled.
> - */
> - module_range = MODULES_VSIZE - (u64)(_etext - _stext);
> + module_direct_base = kernel_end - SZ_128M;
> }
>
> - /* use the lower 21 bits to randomize the base of the module region */
> - module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
> - module_alloc_base &= PAGE_MASK;
> + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
> + u64 min, max;
> +
> + if (module_direct_base) {
> + min = module_direct_base;
> + max = module_direct_base + SZ_128M;
> + } else {
> + min = kernel_start;
> + max = kernel_end;
> + }
> +
> + module_plt_base = choose_random_bounding_box(SZ_2G, min, max);
> + } else {
> + module_plt_base = kernel_end - SZ_2G;
> + }
>
> return 0;
> }
> -subsys_initcall(kaslr_module_init)
> -#endif
> +subsys_initcall(module_init_limits);
>
> void *module_alloc(unsigned long size)
> {
> - u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
> - void *p;
> + void *p = NULL;
>
> /*
> * Where possible, prefer to allocate within direct branch range of the
> - * kernel such that no PLTs are necessary. This may fail, so we pass
> - * __GFP_NOWARN to silence the resulting warning.
> + * kernel such that no PLTs are necessary.
> */
> - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
> - module_alloc_end, GFP_KERNEL | __GFP_NOWARN,
> - PAGE_KERNEL, 0, NUMA_NO_NODE,
> - __builtin_return_address(0));
> + if (module_direct_base) {
> + p = __vmalloc_node_range(size, MODULE_ALIGN,
> + module_direct_base,
> + module_direct_base + SZ_128M,
> + GFP_KERNEL | __GFP_NOWARN,
> + PAGE_KERNEL, 0, NUMA_NO_NODE,
> + __builtin_return_address(0));
> + }
> +
> + if (!p && module_plt_base) {
> + p = __vmalloc_node_range(size, MODULE_ALIGN,
> + module_plt_base,
> + module_plt_base + SZ_2G,
> + GFP_KERNEL | __GFP_NOWARN,
> + PAGE_KERNEL, 0, NUMA_NO_NODE,
> + __builtin_return_address(0));
> + }
>
> if (!p) {
> - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
> - module_alloc_base + SZ_2G, GFP_KERNEL,
> - PAGE_KERNEL, 0, NUMA_NO_NODE,
> - __builtin_return_address(0));
> + pr_warn_ratelimited("%s: unable to allocate memory\n",
> + __func__);
> }
>
> if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
> --
> 2.30.2
>
More information about the linux-arm-kernel
mailing list