[PATCH 08/10] ARM: implement IRQ stacks
Ard Biesheuvel
ardb at kernel.org
Tue Sep 21 02:54:06 PDT 2021
Now that we no longer rely on the stack pointer to access the current
task struct or thread info, we can implement support for IRQ stacks
cleanly as well.
Define a per-CPU IRQ stack and switch to this stack when taking an IRQ,
provided that we were not already using that stack in the interrupted
context. This is never the case for IRQs taken from user space, but ones
taken while running in the kernel could fire while one taken from user
space has not completed yet.
Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
---
arch/arm/Kconfig | 4 ++
arch/arm/kernel/entry-armv.S | 40 ++++++++++++++++++--
arch/arm/kernel/irq.c | 23 +++++++++++
arch/arm/kernel/traps.c | 15 +++++++-
arch/arm/lib/backtrace-clang.S | 8 ++++
arch/arm/lib/backtrace.S | 8 ++++
6 files changed, 94 insertions(+), 4 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4f61c9789e7f..a8c1db0736f3 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1163,6 +1163,10 @@ config CURRENT_POINTER_IN_TPIDRURO
def_bool y
depends on SMP && CPU_32v6K && !CPU_V6
+config IRQSTACKS
+ def_bool y
+ depends on GENERIC_IRQ_MULTI_HANDLER && THREAD_INFO_IN_TASK
+
config ARM_CPU_TOPOLOGY
bool "Support cpu topology definition"
depends on SMP && CPU_V7
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 21896f702447..c1b1b125f647 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -36,11 +36,45 @@
/*
* Interrupt handling.
*/
- .macro irq_handler
+ .macro irq_handler, from_user:req
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
mov_l r1, handle_arch_irq
mov r0, sp
+#ifdef CONFIG_IRQSTACKS
+ mov_l r2, irq_stack_ptr @ Take base address
+ mrc p15, 0, r3, c13, c0, 4 @ Get CPU offset
+ mov r7, sp @ Preserve original SP
+ ldr sp, [r2, r3] @ Load SP from per-CPU var
+ .if \from_user == 0
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r3, sp, r7 @ SP above bottom of IRQ stack?
+ rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
+ movcs sp, r7 @ If so, revert to incoming SP
+
+ @
+ @ Inform the unwinder where the next frame lives
+ @
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ movcc ip, r7 @ Store the old SP in the frame record.
+ movcc lr, pc @ Make LR point into .entry.text so
+ @ that we will get a dump of the
+ @ exception stack for this frame.
+ stmdbcc sp!, {fp, ip, lr, pc}
+ addcc fp, sp, #12
+#elif defined(CONFIG_UNWINDER_ARM)
+ARM( mov fp, r7 )
+ARM( .setfp fp, sp )
+THUMB( .setfp r7, sp )
+#endif
+ .endif
+#endif
bl_m [r1]
+#ifdef CONFIG_IRQSTACKS
+ mov sp, r7 @ Restore original SP
+#endif
#else
arch_irq_handler_default
#endif
@@ -200,7 +234,7 @@ ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
- irq_handler
+ irq_handler from_user=0
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -427,7 +461,7 @@ ENDPROC(__dabt_usr)
__irq_usr:
usr_entry
kuser_cmpxchg_check
- irq_handler
+ irq_handler from_user=1
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 20ab1e607522..58af2adb1583 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -43,6 +43,25 @@
unsigned long irq_err_count;
+#ifdef CONFIG_IRQSTACKS
+
+asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr);
+
+static void __init init_irq_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stack = (u8 *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (WARN_ON(!stack))
+ break;
+ per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
+ }
+}
+
+#endif
+
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
@@ -99,6 +118,10 @@ void __init init_IRQ(void)
{
int ret;
+#ifdef CONFIG_IRQSTACKS
+ init_irq_stacks();
+#endif
+
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 89be21ec3b52..b42c446cec9a 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -66,6 +66,19 @@ void dump_backtrace_entry(unsigned long where, unsigned long from,
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
+ IS_ENABLED(CONFIG_CC_IS_GCC) &&
+ end > ALIGN(frame, THREAD_SIZE)) {
+ /*
+ * If we are walking past the end of the stack, it may be due
+ * to the fact that we are on an IRQ or overflow stack. In this
+ * case, we can load the address of the other stack from the
+ * frame record.
+ */
+ frame = ((unsigned long *)frame)[-2] - 4;
+ end = frame + 4 + sizeof(struct pt_regs);
+ }
+
#ifdef CONFIG_KALLSYMS
printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
loglvl, where, (void *)where, from, (void *)from);
@@ -278,7 +291,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ ALIGN(regs->ARM_sp, THREAD_SIZE));
dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S
index 5b2cdb1003e3..9fefb391bb39 100644
--- a/arch/arm/lib/backtrace-clang.S
+++ b/arch/arm/lib/backtrace-clang.S
@@ -197,6 +197,14 @@ finished_setup:
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
+#ifdef CONFIG_IRQSTACKS
+ @
+ @ Kernel stacks may be discontiguous in memory. If the next
+ @ frame is below the previous frame, accept it as long as it
+ @ lives in kernel memory.
+ @
+ cmpls sv_fp, #PAGE_OFFSET
+#endif
bhi for_each_frame
1006: adr r0, .Lbad
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index e8408f22d4dc..293a2716bd20 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -98,6 +98,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
cmp sv_fp, frame @ next frame must be
mov frame, sv_fp @ above the current frame
+#ifdef CONFIG_IRQSTACKS
+ @
+ @ Kernel stacks may be discontiguous in memory. If the next
+ @ frame is below the previous frame, accept it as long as it
+ @ lives in kernel memory.
+ @
+ cmpls sv_fp, #PAGE_OFFSET
+#endif
bhi for_each_frame
1006: adr r0, .Lbad
--
2.30.2
More information about the linux-arm-kernel
mailing list