[PATCH v1 2/3] arm64: alternative: patch alternatives in the vDSO

Mark Rutland mark.rutland at arm.com
Thu Aug 25 06:19:32 PDT 2022


On Thu, Aug 25, 2022 at 11:20:24AM +0100, Joey Gouly wrote:
> Make it possible to use alternatives in the vDSO, so that better
> implementations can be used if possible.
> 
> Signed-off-by: Joey Gouly <joey.gouly at arm.com>
> Cc: Catalin Marinas <catalin.marinas at arm.com>
> Cc: Will Deacon <will at kernel.org>
> Cc: Vincenzo Frascino <vincenzo.frascino at arm.com>
> Cc: Mark Rutland <mark.rutland at arm.com>
> ---
>  arch/arm64/include/asm/vdso.h     |  3 +++
>  arch/arm64/kernel/alternative.c   | 25 +++++++++++++++++++++++++
>  arch/arm64/kernel/vdso.c          |  3 ---
>  arch/arm64/kernel/vdso/vdso.lds.S |  7 +++++++
>  4 files changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
> index f99dcb94b438..b4ae32109932 100644
> --- a/arch/arm64/include/asm/vdso.h
> +++ b/arch/arm64/include/asm/vdso.h
> @@ -26,6 +26,9 @@
>  	(void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \
>  })
>  
> +extern char vdso_start[], vdso_end[];
> +extern char vdso32_start[], vdso32_end[];
> +
>  #endif /* !__ASSEMBLY__ */
>  
>  #endif /* __ASM_VDSO_H */
> diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
> index 9bcaa5eacf16..a4036194a5cd 100644
> --- a/arch/arm64/kernel/alternative.c
> +++ b/arch/arm64/kernel/alternative.c
> @@ -10,11 +10,14 @@
>  
>  #include <linux/init.h>
>  #include <linux/cpu.h>
> +#include <linux/elf.h>
>  #include <asm/cacheflush.h>
>  #include <asm/alternative.h>
>  #include <asm/cpufeature.h>
>  #include <asm/insn.h>
> +#include <asm/module.h>
>  #include <asm/sections.h>
> +#include <asm/vdso.h>
>  #include <linux/stop_machine.h>
>  
>  #define __ALT_PTR(a, f)		((void *)&(a)->f + (a)->f)
> @@ -192,6 +195,27 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
>  	}
>  }
>  
> +void apply_alternatives_vdso(unsigned long *feature_mask)
> +{
> +	struct alt_region region;
> +	const struct elf64_hdr *hdr;
> +	const struct elf64_shdr *shdr;
> +	const struct elf64_shdr *alt;
> +
> +	hdr = (struct elf64_hdr *)vdso_start;
> +	shdr = (void *)hdr + hdr->e_shoff;
> +	alt = find_section(hdr, shdr, ".altinstructions");
> +	if (!alt)
> +		return;
> +
> +	region = (struct alt_region){
> +		.begin	= (void *)hdr + alt->sh_offset,
> +		.end	= (void *)hdr + alt->sh_offset + alt->sh_size,
> +	};
> +
> +	__apply_alternatives(&region, false, feature_mask);
> +}
> +
>  /*
>   * We might be patching the stop_machine state machine, so implement a
>   * really simple polling protocol here.
> @@ -216,6 +240,7 @@ static int __apply_alternatives_multi_stop(void *unused)
>  
>  		BUG_ON(all_alternatives_applied);
>  		__apply_alternatives(&region, false, remaining_capabilities);
> +		apply_alternatives_vdso(remaining_capabilities);

Since we didn't patch the vdso in apply_boot_alternatives(), using
`remaining_capabilities` means that we could in theory miss alternatives for
features which were detected on the boot CPU.

Since the VDSO cannot be concurrently executed within the kernel, we could
hoist the call to apply_alternatives_vdso() out of
__apply_alternatives_multi_stop(), and call it before the stop_machine in
apply_alternatives_all().

That would keep __apply_alternatives_multi_stop() simple (and easier to make
noinstr-safe), and we could use the same mask logic as
apply_alternatives_module(), e.g.

void apply_alternatives_vdso(void)
{
	DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
	bitmap_fill(all_capabilities, ARM64_NPATCHABLE);

	struct alt_region region;
	const struct elf64_hdr *hdr;
	const struct elf64_shdr *shdr;
	const struct elf64_shdr *alt;

	hdr = (struct elf64_hdr *)vdso_start;
	shdr = (void *)hdr + hdr->e_shoff;
	alt = find_section(hdr, shdr, ".altinstructions");
	if (!alt)
	return;

	region = (struct alt_region){
		.begin	= (void *)hdr + alt->sh_offset,
		.end	= (void *)hdr + alt->sh_offset + alt->sh_size,
	};

	__apply_alternatives(&region, false, &all_capabilities[0]);
}

... does that sound ok to you?

Thanks,
Mark.

>  		/* Barriers provided by the cache flushing */
>  		all_alternatives_applied = 1;
>  	}
> diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
> index a61fc4f989b3..ac93a2ee9c07 100644
> --- a/arch/arm64/kernel/vdso.c
> +++ b/arch/arm64/kernel/vdso.c
> @@ -29,9 +29,6 @@
>  #include <asm/signal32.h>
>  #include <asm/vdso.h>
>  
> -extern char vdso_start[], vdso_end[];
> -extern char vdso32_start[], vdso32_end[];
> -
>  enum vdso_abi {
>  	VDSO_ABI_AA64,
>  	VDSO_ABI_AA32,
> diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
> index e69fb4aaaf3e..6028f1fe2d1c 100644
> --- a/arch/arm64/kernel/vdso/vdso.lds.S
> +++ b/arch/arm64/kernel/vdso/vdso.lds.S
> @@ -48,6 +48,13 @@ SECTIONS
>  	PROVIDE (_etext = .);
>  	PROVIDE (etext = .);
>  
> +	. = ALIGN(4);
> +	.altinstructions : {
> +		__alt_instructions = .;
> +		*(.altinstructions)
> +		__alt_instructions_end = .;
> +	}
> +
>  	.dynamic	: { *(.dynamic) }		:text	:dynamic
>  
>  	.rela.dyn	: ALIGN(8) { *(.rela .rela*) }
> -- 
> 2.17.1
> 



More information about the linux-arm-kernel mailing list