[PATCH 6/9] arm64: alternatives: have callbacks take a cap

Joey Gouly joey.gouly at arm.com
Fri Sep 2 08:54:45 PDT 2022


Hi Mark,

On Thu, Sep 01, 2022 at 04:14:00PM +0100, Mark Rutland wrote:
> Today, callback alternatives are special-cased within
> __apply_alternatives(), and are applied alongside patching for system
> capabilities as ARM64_NCAPS is not part of the boot_capabilities feature
> mask.
> 
> This special-casing is less than ideal. Giving special meaning to
> ARM64_NCAPS for this requires some structures and loops to use
> ARM64_NCAPS + 1 (AKA ARM64_NPATCHABLE), while others use ARM64_NCAPS.
> It's also not immediately clear callback alternatives are only applied
> when applying alternatives for system-wide features.
> 
> To make this a bit clearer, changes the way that callback alternatives
> are identified to remove the special-casing of ARM64_NCAPS, and to allow
> callback alternatives to be associated with a cpucap as with all other
> alternatives.
> 
> New cpucaps, ARM64_ALWAYS_BOOT and ARM64_ALWAYS_SYSTEM are added which
> are always detected alongside boot cpu capabilities and system
> capabilities respectively. All existing callback alternatives are made
> to use ARM64_ALWAYS_SYSTEM, and so will be patched at the same point
> during the boot flow as before.
> 
> Subsequent patches will make more use of these new cpucaps.
> 
> There should be no functional change as a result of this patch.
> 
> Signed-off-by: Mark Rutland <mark.rutland at arm.com>
> Cc: Ard Biesheuvel <ardb at kernel.org>
> Cc: Catalin Marinas <catalin.marinas at arm.com>
> Cc: James Morse <james.morse at arm.com>
> Cc: Joey Gouly <joey.gouly at arm.com>
> Cc: Marc Zyngier <maz at kernel.org>
> Cc: Will Deacon <will at kernel.org>
> ---
>  arch/arm64/include/asm/alternative-macros.h | 18 ++++++++-----
>  arch/arm64/include/asm/assembler.h          | 10 ++++----
>  arch/arm64/include/asm/cpufeature.h         |  4 +--
>  arch/arm64/include/asm/kvm_mmu.h            |  5 ++--
>  arch/arm64/kernel/alternative.c             | 28 ++++++++++++---------
>  arch/arm64/kernel/cpufeature.c              | 19 ++++++++++++--
>  arch/arm64/kernel/entry.S                   |  8 +++---
>  arch/arm64/kvm/hyp/hyp-entry.S              |  4 +--
>  arch/arm64/tools/cpucaps                    |  2 ++
>  9 files changed, 62 insertions(+), 36 deletions(-)
> 

[..]

> diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
> index 2e18c9c0f612b..da706c9f9a9a5 100644
> --- a/arch/arm64/kernel/alternative.c
> +++ b/arch/arm64/kernel/alternative.c
> @@ -21,6 +21,11 @@
>  #define ALT_ORIG_PTR(a)		__ALT_PTR(a, orig_offset)
>  #define ALT_REPL_PTR(a)		__ALT_PTR(a, alt_offset)
>  
> +#define ALT_CAP(a)		((a)->cpufeature & ~ARM64_CB_BIT)
> +#define ALT_HAS_CB(a)		((a)->cpufeature & ARM64_CB_BIT)
> +
> +#define ALT_NR_INST(a)		((a)->orig_len / AARCH64_INSN_SIZE)
You introduced this macro, but don't use it.

> +
>  /* Volatile, as we may be patching the guts of READ_ONCE() */
>  static volatile int all_alternatives_applied;
>  
> @@ -143,16 +148,15 @@ static void __nocfi __apply_alternatives(const struct alt_region *region,
>  
>  	for (alt = region->begin; alt < region->end; alt++) {
>  		int nr_inst;
> +		int cap = ALT_CAP(alt);
>  
> -		if (!test_bit(alt->cpufeature, feature_mask))
> +		if (!test_bit(cap, feature_mask))
>  			continue;
>  
> -		/* Use ARM64_CB_PATCH as an unconditional patch */
> -		if (alt->cpufeature < ARM64_CB_PATCH &&
> -		    !cpus_have_cap(alt->cpufeature))
> +		if (!cpus_have_cap(cap))
>  			continue;
>  
> -		if (alt->cpufeature == ARM64_CB_PATCH)
> +		if (ALT_HAS_CB(alt))
>  			BUG_ON(alt->alt_len != 0);
>  		else
>  			BUG_ON(alt->alt_len != alt->orig_len);
> @@ -161,10 +165,10 @@ static void __nocfi __apply_alternatives(const struct alt_region *region,
>  		updptr = is_module ? origptr : lm_alias(origptr);
>  		nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
>  
> -		if (alt->cpufeature < ARM64_CB_PATCH)
> -			alt_cb = patch_alternative;
> -		else
> +		if (ALT_HAS_CB(alt))
>  			alt_cb  = ALT_REPL_PTR(alt);
> +		else
> +			alt_cb = patch_alternative;
>  
>  		alt_cb(alt, origptr, updptr, nr_inst);
>  
> @@ -208,10 +212,10 @@ static int __apply_alternatives_multi_stop(void *unused)
>  			cpu_relax();
>  		isb();
>  	} else {
> -		DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE);
> +		DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
>  
>  		bitmap_complement(remaining_capabilities, boot_capabilities,
> -				  ARM64_NPATCHABLE);
> +				  ARM64_NCAPS);
>  
>  		BUG_ON(all_alternatives_applied);
>  		__apply_alternatives(&kernel_alternatives, false,
> @@ -254,9 +258,9 @@ void apply_alternatives_module(void *start, size_t length)
>  		.begin	= start,
>  		.end	= start + length,
>  	};
> -	DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE);
> +	DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
>  
> -	bitmap_fill(all_capabilities, ARM64_NPATCHABLE);
> +	bitmap_fill(all_capabilities, ARM64_NCAPS);
>  
>  	__apply_alternatives(&region, true, &all_capabilities[0]);
>  }

Thanks,
Joey



More information about the linux-arm-kernel mailing list