[PATCH v1 09/18] arm64/entry: Manage ALLINT.ALLINT when FEAT_NMI is active

Mark Brown broonie at kernel.org
Fri Nov 4 16:54:44 PDT 2022


As might not be expected given the name the superpriority interrupts
provided by FEAT_NMI can in fact be masked which must be managed by EL1
code to ensure that the time spent with superpriority interrupts masked
is minimised without causing reentrancy issues.

We configure FEAT_NMI with SCTLR_EL1.SPINTMASK clear since we do not use
PSTATE.SP to manage a kernel stack pointer. This means that on entry to
EL1 ALLINT.ALLINT will be set and it is the responsibility of EL1 to clear
it to ensure that both normal and superpriority interrupts are not masked.
Add appropriate code in each EL1 entry path, with no special handling for
superpriority interrupts introduced yet.

Since we need to handle unmasking differently in the case of superpriority
interrupts this code is not factored out into the assembly code. Due to
the number of special cases in the various entry paths and in order to
ensure that superpriority interrupts are masked for as little time as
possible explicit handling in each of the entry points seemed the simplest
and most robust approach.

Signed-off-by: Mark Brown <broonie at kernel.org>
---
 arch/arm64/kernel/entry-common.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 9173fad279af..32547723fcc8 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -21,6 +21,7 @@
 #include <asm/irq_regs.h>
 #include <asm/kprobes.h>
 #include <asm/mmu.h>
+#include <asm/nmi.h>
 #include <asm/processor.h>
 #include <asm/sdei.h>
 #include <asm/stacktrace.h>
@@ -294,6 +295,8 @@ static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
 
 	__show_regs(regs);
 	panic("Unhandled exception");
+
+	nmi_unmask();
 }
 
 #define UNHANDLED(el, regsize, vector)							\
@@ -420,6 +423,8 @@ asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
 
+	nmi_unmask();
+
 	switch (ESR_ELx_EC(esr)) {
 	case ESR_ELx_EC_DABT_CUR:
 	case ESR_ELx_EC_IABT_CUR:
@@ -477,6 +482,7 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
 static void noinstr el1_interrupt(struct pt_regs *regs,
 				  void (*handler)(struct pt_regs *))
 {
+	nmi_unmask();
 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
 
 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
@@ -499,6 +505,7 @@ asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
 
+	nmi_unmask();
 	local_daif_restore(DAIF_ERRCTX);
 	arm64_enter_nmi(regs);
 	do_serror(regs, esr);
@@ -649,6 +656,8 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
 
+	nmi_unmask();
+
 	switch (ESR_ELx_EC(esr)) {
 	case ESR_ELx_EC_SVC64:
 		el0_svc(regs);
@@ -706,6 +715,7 @@ static void noinstr el0_interrupt(struct pt_regs *regs,
 {
 	enter_from_user_mode(regs);
 
+	nmi_unmask();
 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
 
 	if (regs->pc & BIT(55))
@@ -742,6 +752,7 @@ static void noinstr __el0_error_handler_common(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
 
+	nmi_unmask();
 	enter_from_user_mode(regs);
 	local_daif_restore(DAIF_ERRCTX);
 	arm64_enter_nmi(regs);
@@ -777,6 +788,8 @@ asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
 
+	nmi_unmask();
+
 	switch (ESR_ELx_EC(esr)) {
 	case ESR_ELx_EC_SVC32:
 		el0_svc_compat(regs);
-- 
2.30.2




More information about the linux-arm-kernel mailing list