[PATCH v6 03/27] arm64/sysreg: Add LINKER_SCRIPT guards for sysreg.h
Will Deacon
will at kernel.org
Mon Jul 4 06:03:48 PDT 2022
On Wed, Jun 29, 2022 at 11:28:10AM +0100, Mark Brown wrote:
> In subsequent patches sysreg.h and gpr-num.h will be included from other
> headers which can be included from linker scripts (e.g. via cache.h), and
> contain assember macro directives that don't work in linker scripts (e.g.
> `.macro` and `.irp`).
>
> To avoid needing LINKER_SCRIPT guards in every file which could have a
> transitive dependency, add those directly to sysreg.h and gpr-num.h.
>
> Signed-off-by: Mark Brown <broonie at kernel.org>
> ---
> arch/arm64/include/asm/gpr-num.h | 3 +++
> arch/arm64/include/asm/sysreg.h | 2 ++
> 2 files changed, 5 insertions(+)
>
> diff --git a/arch/arm64/include/asm/gpr-num.h b/arch/arm64/include/asm/gpr-num.h
> index 05da4a7c5788..72c44f63fb41 100644
> --- a/arch/arm64/include/asm/gpr-num.h
> +++ b/arch/arm64/include/asm/gpr-num.h
> @@ -4,12 +4,15 @@
>
> #ifdef __ASSEMBLY__
>
> +#ifndef LINKER_SCRIPT
> +
> .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
> .equ .L__gpr_num_x\num, \num
> .equ .L__gpr_num_w\num, \num
> .endr
> .equ .L__gpr_num_xzr, 31
> .equ .L__gpr_num_wzr, 31
> +#endif /* !LINKER_SCRIPT */
>
> #else /* __ASSEMBLY__ */
>
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 42ff95dba6da..6c7be1a2114b 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -1219,6 +1219,7 @@
>
> #ifdef __ASSEMBLY__
>
> +#ifndef LINKER_SCRIPT
> .macro mrs_s, rt, sreg
> __emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt))
> .endm
> @@ -1226,6 +1227,7 @@
> .macro msr_s, sreg, rt
> __emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
> .endm
> +#endif /* !LINKER_SCRIPT */
Hmm, this feels quite fragile to me as we have quite a few asm/ headers
using the '.macro' directive and having to add LINKER_SCRIPT guards
because of transitive includes is going to catch people out.
With your series applied on a temporary branch, it looks like the only
issue is the use of L1_CACHE_BYTES in hyp.lds.S and I think this is
because asm/cache.h is wrongly pulling things into the ASSEMBLY block.
The diff below fixes things for me and means we can drop the guards in
this patch. What do you think?
Will
--->8
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 3abedf304ec1..8bedb981439a 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -5,16 +5,10 @@
#ifndef __ASM_CACHE_H
#define __ASM_CACHE_H
-#include <asm/cputype.h>
-#include <asm/mte-def.h>
-#include <asm/sysreg.h>
-
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_EL0_DMINLINE_SHIFT | \
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
-#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
-
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
@@ -40,6 +34,10 @@
#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
+#include <asm/cputype.h>
+#include <asm/mte-def.h>
+#include <asm/sysreg.h>
+
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
@@ -51,6 +49,8 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign()
#endif
+#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
+
#define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1
extern unsigned long __icache_flags;
More information about the linux-arm-kernel
mailing list