[PATCH v2 10/14] arm64/nmi: Manage masking for superpriority interrupts along with DAIF

Mark Brown broonie at kernel.org
Sat Nov 12 07:17:04 PST 2022


As we do for pseudo NMIs add code to our DAIF management which keeps
superpriority interrupts unmasked when we have asynchronous exceptions
enabled. Since superpriority interrupts are not masked through DAIF like
pseduo NMIs are we also need to modify the assembler macros for managing
DAIF to ensure that the masking is done in the assembly code. At present
users of the assembly macros always mask pseudo NMIs.

There is a difference to the actual handling between pseudo NMIs
and superpriority interrupts in the assembly save_and_disable_irq and
restore_irq macros, these cover both interrupts and FIQs using DAIF
without regard for the use of pseudo NMIs so also mask those but are not
updated here to mask superpriority interrupts. Given the names it is not
clear that the behaviour with pseudo NMIs is particularly intentional,
and in any case these macros are only used in the implementation of
alternatives for software PAN while hardware PAN has been mandatory
since v8.1 so it is not anticipated that practical systems with support
for FEAT_NMI will ever execute the affected code.

This should be a conservative set of masked regions, we may be able to
relax this in future, but this should represent a good starting point.

Signed-off-by: Mark Brown <broonie at kernel.org>
---
 arch/arm64/include/asm/assembler.h | 11 +++++++++++
 arch/arm64/include/asm/daifflags.h | 18 ++++++++++++++++++
 2 files changed, 29 insertions(+)

diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 88d9779a83c0..e85a7e9af9ae 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -52,19 +52,30 @@ alternative_else_nop_endif
 
 	.macro save_and_disable_daif, flags
 	mrs	\flags, daif
+        disable_allint
 	msr	daifset, #0xf
 	.endm
 
 	.macro disable_daif
+        disable_allint
 	msr	daifset, #0xf
 	.endm
 
 	.macro enable_daif
 	msr	daifclr, #0xf
+	enable_allint
 	.endm
 
 	.macro	restore_daif, flags:req
 	msr	daif, \flags
+#ifdef CONFIG_ARM64_NMI
+alternative_if ARM64_HAS_NMI
+	/* If async exceptions are unmasked we can take NMIs */
+	tbnz	\flags, #8, 2004f
+	msr_s	SYS_ALLINT_CLR, xzr
+2004:
+alternative_else_nop_endif
+#endif
 	.endm
 
 	/* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index b3bed2004342..fda73976068f 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -10,6 +10,7 @@
 #include <asm/arch_gicv3.h>
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
+#include <asm/nmi.h>
 #include <asm/ptrace.h>
 
 #define DAIF_PROCCTX		0
@@ -35,6 +36,9 @@ static inline void local_daif_mask(void)
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
+	if (system_uses_nmi())
+		_allint_set();
+
 	trace_hardirqs_off();
 }
 
@@ -50,6 +54,12 @@ static inline unsigned long local_daif_save_flags(void)
 			flags |= PSR_I_BIT | PSR_F_BIT;
 	}
 
+	if (system_uses_nmi()) {
+		/* If IRQs are masked with ALLINT, reflect in in the flags */
+		if (read_sysreg_s(SYS_ALLINT) & ALLINT_ALLINT)
+			flags |= PSR_I_BIT | PSR_F_BIT;
+	}
+
 	return flags;
 }
 
@@ -114,6 +124,10 @@ static inline void local_daif_restore(unsigned long flags)
 		gic_write_pmr(pmr);
 	}
 
+	/* If we can take asynchronous errors we can take NMIs */
+	if (system_uses_nmi() && !(flags & PSR_A_BIT))
+		_allint_clear();
+
 	write_sysreg(flags, daif);
 
 	if (irq_disabled)
@@ -131,6 +145,10 @@ static inline void local_daif_inherit(struct pt_regs *regs)
 	if (interrupts_enabled(regs))
 		trace_hardirqs_on();
 
+	/* If we can take asynchronous errors we can take NMIs */
+	if (system_uses_nmi() && !(flags & PSR_A_BIT))
+		_allint_clear();
+
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(regs->pmr_save);
 
-- 
2.30.2




More information about the linux-arm-kernel mailing list