1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 * This file contains the lowest level x86-specific interrupt
6 * entry, irq-stacks and irq statistics code. All the remaining
7 * irq logic is done by the generic kernel/irq/ code and
8 * by the x86-specific irq controller code. (e.g. i8259.c and
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/notifier.h>
17 #include <linux/cpu.h>
18 #include <linux/delay.h>
19 #include <linux/uaccess.h>
20 #include <linux/percpu.h>
24 #include <asm/nospec-branch.h>
26 #ifdef CONFIG_DEBUG_STACKOVERFLOW
28 int sysctl_panic_on_stackoverflow __read_mostly;
30 /* Debugging check for stack overflow: is there less than 1KB free? */
31 static int check_stack_overflow(void)
35 __asm__ __volatile__("andl %%esp,%0" :
36 "=r" (sp) : "0" (THREAD_SIZE - 1));
38 return sp < (sizeof(struct thread_info) + STACK_WARN);
41 static void print_stack_overflow(void)
43 printk(KERN_WARNING "low stack detected by irq handler\n");
45 if (sysctl_panic_on_stackoverflow)
46 panic("low stack detected by irq handler - check messages\n");
50 static inline int check_stack_overflow(void) { return 0; }
51 static inline void print_stack_overflow(void) { }
54 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
55 DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
57 static void call_on_stack(void *func, void *stack)
59 asm volatile("xchgl %%ebx,%%esp \n"
64 [thunk_target] "D"(func)
65 : "memory", "cc", "edx", "ecx", "eax");
68 static inline void *current_stack(void)
70 return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
73 static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
75 struct irq_stack *curstk, *irqstk;
76 u32 *isp, *prev_esp, arg1;
78 curstk = (struct irq_stack *) current_stack();
79 irqstk = __this_cpu_read(hardirq_stack);
82 * this is where we switch to the IRQ stack. However, if we are
83 * already using the IRQ stack (because we interrupted a hardirq
84 * handler) we can't do that and just have to keep using the
85 * current stack (which is the irq stack already after all)
87 if (unlikely(curstk == irqstk))
90 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
92 /* Save the next esp at the bottom of the stack */
93 prev_esp = (u32 *)irqstk;
94 *prev_esp = current_stack_pointer;
96 if (unlikely(overflow))
97 call_on_stack(print_stack_overflow, isp);
99 asm volatile("xchgl %%ebx,%%esp \n"
101 "movl %%ebx,%%esp \n"
102 : "=a" (arg1), "=b" (isp)
103 : "0" (desc), "1" (isp),
104 [thunk_target] "D" (desc->handle_irq)
105 : "memory", "cc", "ecx");
110 * allocate per-cpu stacks for hardirq and for softirq processing
112 void irq_ctx_init(int cpu)
114 struct irq_stack *irqstk;
116 if (per_cpu(hardirq_stack, cpu))
119 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
122 per_cpu(hardirq_stack, cpu) = irqstk;
124 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
127 per_cpu(softirq_stack, cpu) = irqstk;
129 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
130 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
133 void do_softirq_own_stack(void)
135 struct irq_stack *irqstk;
138 irqstk = __this_cpu_read(softirq_stack);
140 /* build the stack frame on the softirq stack */
141 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
143 /* Push the previous esp onto the stack */
144 prev_esp = (u32 *)irqstk;
145 *prev_esp = current_stack_pointer;
147 call_on_stack(__do_softirq, isp);
150 bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
152 int overflow = check_stack_overflow();
154 if (IS_ERR_OR_NULL(desc))
157 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
158 if (unlikely(overflow))
159 print_stack_overflow();
160 generic_handle_irq_desc(desc);