[PATCH 10/18] arm64: Introduce FIQ support

Marc Zyngier maz at kernel.org
Sat Feb 6 10:37:52 EST 2021


On Thu, 04 Feb 2021 20:39:43 +0000,
Hector Martin <marcan at marcan.st> wrote:
> 
> Apple SoCs (A11 and newer) have some interrupt sources hardwired to the
> FIQ line. Implement support for this by simply treating IRQs and FIQs
> the same way in the interrupt vectors. This is conditional on the
> ARM64_NEEDS_FIQ CPU feature flag, and thus will not affect other
> systems.
> 
> Root irqchip drivers can discriminate between IRQs and FIQs by checking
> the ISR_EL1 system register.
> 
> Signed-off-by: Hector Martin <marcan at marcan.st>
> ---
>  arch/arm64/include/asm/assembler.h |  4 ++++
>  arch/arm64/include/asm/daifflags.h |  7 +++++++
>  arch/arm64/include/asm/irqflags.h  | 17 +++++++++++++----
>  arch/arm64/kernel/entry.S          | 27 +++++++++++++++++++++++----
>  4 files changed, 47 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index bf125c591116..6acfc372dc76 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -42,7 +42,11 @@
>  
>  	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
>  	.macro enable_da_f
> +alternative_if ARM64_NEEDS_FIQ
> +	msr	daifclr, #(8 | 4)
> +alternative_else
>  	msr	daifclr, #(8 | 4 | 1)
> +alternative_endif

See my digression in patch 8. I really wonder what the benefit is to
treat FIQ independently of IRQ, and we might as well generalise
this. We could always panic on getting a FIQ on platforms that don't
expect one.

It'd be good to rope in the other interested parties (Mark for the
early entry code, James for RAS and SError handling).

>  	.endm
>  
>  /*
> diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
> index 1c26d7baa67f..228a6039c701 100644
> --- a/arch/arm64/include/asm/daifflags.h
> +++ b/arch/arm64/include/asm/daifflags.h
> @@ -112,6 +112,13 @@ static inline void local_daif_restore(unsigned long flags)
>  		 * So we don't need additional synchronization here.
>  		 */
>  		gic_write_pmr(pmr);
> +	} else if (system_uses_fiqs()) {
> +		/*
> +		 * On systems that use FIQs, disable FIQs if IRQs are disabled.
> +		 * This can happen if the DAIF_* flags at the top of this file
> +		 * are used to set DAIF directly.
> +		 */
> +		flags |= PSR_F_BIT;
>  	}
>  
>  	write_sysreg(flags, daif);
> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
> index ff328e5bbb75..689c573c4b47 100644
> --- a/arch/arm64/include/asm/irqflags.h
> +++ b/arch/arm64/include/asm/irqflags.h
> @@ -19,8 +19,9 @@
>   * side effects for other flags. Keeping to this order makes it easier for
>   * entry.S to know which exceptions should be unmasked.
>   *
> - * FIQ is never expected, but we mask it when we disable debug exceptions, and
> - * unmask it at all other times.
> + * FIQ is never expected on most platforms, but we mask it when we disable
> + * debug exceptions, and unmask it at all other times. On platforms that
> + * require FIQs, it tracks IRQ masking.
>   */
>  
>  /*
> @@ -34,8 +35,14 @@ static inline void arch_local_irq_enable(void)
>  		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
>  	}
>  
> -	asm volatile(ALTERNATIVE(
> +	/*
> +	 * Yes, ALTERNATIVE() nests properly... only one of these should be
> +	 * active on any given platform.
> +	 */
> +	asm volatile(ALTERNATIVE(ALTERNATIVE(
>  		"msr	daifclr, #2		// arch_local_irq_enable",
> +		"msr	daifclr, #3		// arch_local_irq_enable",
> +		ARM64_NEEDS_FIQ),

Err... no. Please. It may be a cool hack, but that's an unmaintainable
one in the long run. If you *really* have to have a special case here,
consider using a callback instead, and generate the right instruction
directly.

>  		__msr_s(SYS_ICC_PMR_EL1, "%0"),
>  		ARM64_HAS_IRQ_PRIO_MASKING)
>  		:
> @@ -53,8 +60,10 @@ static inline void arch_local_irq_disable(void)
>  		WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
>  	}
>  
> -	asm volatile(ALTERNATIVE(
> +	asm volatile(ALTERNATIVE(ALTERNATIVE(
>  		"msr	daifset, #2		// arch_local_irq_disable",
> +		"msr	daifset, #3		// arch_local_irq_disable",
> +		ARM64_NEEDS_FIQ),
>  		__msr_s(SYS_ICC_PMR_EL1, "%0"),
>  		ARM64_HAS_IRQ_PRIO_MASKING)
>  		:
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index c9bae73f2621..81ca04ebe37b 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -60,7 +60,7 @@
>  #define BAD_FIQ		2
>  #define BAD_ERROR	3
>  
> -	.macro kernel_ventry, el, label, regsize = 64
> +	.macro kernel_ventry, el, label, regsize = 64, altlabel = 0, alt = 0
>  	.align 7
>  #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
>  	.if	\el == 0
> @@ -87,7 +87,15 @@ alternative_else_nop_endif
>  	tbnz	x0, #THREAD_SHIFT, 0f
>  	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
>  	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
> +	.if	\altlabel != 0
> +	alternative_if \alt
> +	b	el\()\el\()_\altlabel
> +	alternative_else
>  	b	el\()\el\()_\label
> +	alternative_endif
> +	.else
> +	b	el\()\el\()_\label
> +	.endif
>  
>  0:
>  	/*
> @@ -119,7 +127,15 @@ alternative_else_nop_endif
>  	sub	sp, sp, x0
>  	mrs	x0, tpidrro_el0
>  #endif
> +	.if	\altlabel != 0
> +	alternative_if \alt
> +	b	el\()\el\()_\altlabel
> +	alternative_else
>  	b	el\()\el\()_\label
> +	alternative_endif
> +	.else
> +	b	el\()\el\()_\label
> +	.endif
>  	.endm
>  
>  	.macro tramp_alias, dst, sym
> @@ -547,18 +563,21 @@ SYM_CODE_START(vectors)
>  
>  	kernel_ventry	1, sync				// Synchronous EL1h
>  	kernel_ventry	1, irq				// IRQ EL1h
> -	kernel_ventry	1, fiq_invalid			// FIQ EL1h
> +							// FIQ EL1h
> +	kernel_ventry	1, fiq_invalid, 64, irq, ARM64_NEEDS_FIQ

It could be better to create a set of first class FIQ handlers rather
than this alternative target macro. I quickly hacked this instead,
which I find more readable.

Thanks,

	M.

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a8c3e7aaca74..dc65b56626ab 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -547,18 +547,18 @@ SYM_CODE_START(vectors)
 
 	kernel_ventry	1, sync				// Synchronous EL1h
 	kernel_ventry	1, irq				// IRQ EL1h
-	kernel_ventry	1, fiq_invalid			// FIQ EL1h
+	kernel_ventry	1, fiq				// FIQ EL1h
 	kernel_ventry	1, error			// Error EL1h
 
 	kernel_ventry	0, sync				// Synchronous 64-bit EL0
 	kernel_ventry	0, irq				// IRQ 64-bit EL0
-	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
+	kernel_ventry	0, fiq				// FIQ 64-bit EL0
 	kernel_ventry	0, error			// Error 64-bit EL0
 
 #ifdef CONFIG_COMPAT
 	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
 	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
-	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
+	kernel_ventry	0, fiq, 32			// FIQ 32-bit EL0
 	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
 #else
 	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
@@ -658,6 +658,10 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
 SYM_CODE_END(el1_sync)
 
 	.align	6
+SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
+alternative_if_not ARM64_NEEDS_FIQ
+	b	el1_fiq_invalid
+alternative_else_nop_endif
 SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
 	kernel_entry 1
 	gic_prio_irq_setup pmr=x20, tmp=x1
@@ -688,6 +692,7 @@ alternative_else_nop_endif
 
 	kernel_exit 1
 SYM_CODE_END(el1_irq)
+SYM_CODE_END(el1_fiq)
 
 /*
  * EL0 mode handlers.
@@ -710,10 +715,15 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
 SYM_CODE_END(el0_sync_compat)
 
 	.align	6
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
+alternative_if_not ARM64_NEEDS_FIQ
+	b	el0_fiq_invalid
+alternative_else_nop_endif
 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
 	kernel_entry 0, 32
 	b	el0_irq_naked
 SYM_CODE_END(el0_irq_compat)
+SYM_CODE_END(el0_fiq_compat)
 
 SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
 	kernel_entry 0, 32
@@ -722,6 +732,10 @@ SYM_CODE_END(el0_error_compat)
 #endif
 
 	.align	6
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
+alternative_if_not ARM64_NEEDS_FIQ
+	b	el0_fiq_invalid
+alternative_else_nop_endif
 SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
 	kernel_entry 0
 el0_irq_naked:
@@ -736,6 +750,7 @@ el0_irq_naked:
 
 	b	ret_to_user
 SYM_CODE_END(el0_irq)
+SYM_CODE_END(el0_fiq)
 
 SYM_CODE_START_LOCAL(el1_error)
 	kernel_entry 1


-- 
Without deviation from the norm, progress is not possible.



More information about the linux-arm-kernel mailing list