1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
5 #include <linux/arm-smccc.h>
6 #include <linux/arm_sdei.h>
7 #include <linux/hardirq.h>
8 #include <linux/irqflags.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
12 #include <asm/alternative.h>
13 #include <asm/exception.h>
14 #include <asm/kprobes.h>
16 #include <asm/ptrace.h>
17 #include <asm/sections.h>
18 #include <asm/stacktrace.h>
19 #include <asm/sysreg.h>
20 #include <asm/vmap_stack.h>
22 unsigned long sdei_exit_mode;
25 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
26 * register, meaning SDEI has to switch to its own stack. We need two stacks as
27 * a critical event may interrupt a normal event that has just taken a
28 * synchronous exception, and is using sp as scratch register. For a critical
29 * event interrupting a normal event, we can't reliably tell if we were on the
31 * For now, we allocate stacks when the driver is probed.
33 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
34 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
36 #ifdef CONFIG_VMAP_STACK
37 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
38 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
41 DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
42 DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
44 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
48 p = per_cpu(*ptr, cpu);
50 per_cpu(*ptr, cpu) = NULL;
55 static void free_sdei_stacks(void)
59 for_each_possible_cpu(cpu) {
60 _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
61 _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
65 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
69 p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
72 per_cpu(*ptr, cpu) = p;
77 static int init_sdei_stacks(void)
82 for_each_possible_cpu(cpu) {
83 err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
86 err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
97 static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
99 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
100 unsigned long high = low + SDEI_STACK_SIZE;
102 return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
105 static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
107 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
108 unsigned long high = low + SDEI_STACK_SIZE;
110 return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
113 bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
115 if (!IS_ENABLED(CONFIG_VMAP_STACK))
118 if (on_sdei_critical_stack(sp, info))
121 if (on_sdei_normal_stack(sp, info))
127 unsigned long sdei_arch_get_entry_point(int conduit)
130 * SDEI works between adjacent exception levels. If we booted at EL1 we
131 * assume a hypervisor is marshalling events. If we booted at EL2 and
132 * dropped to EL1 because we don't support VHE, then we can't support
135 if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
136 pr_err("Not supported on this hardware/boot configuration\n");
140 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
141 if (init_sdei_stacks())
145 sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
147 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
148 if (arm64_kernel_unmapped_at_el0()) {
149 unsigned long offset;
151 offset = (unsigned long)__sdei_asm_entry_trampoline -
152 (unsigned long)__entry_tramp_text_start;
153 return TRAMP_VALIAS + offset;
155 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
156 return (unsigned long)__sdei_asm_handler;
161 * __sdei_handler() returns one of:
162 * SDEI_EV_HANDLED - success, return to the interrupted context.
163 * SDEI_EV_FAILED - failure, return this error code to firmare.
164 * virtual-address - success, return to this address.
166 static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
167 struct sdei_registered_event *arg)
171 int clobbered_registers = 4;
172 u64 elr = read_sysreg(elr_el1);
173 u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
174 unsigned long vbar = read_sysreg(vbar_el1);
176 if (arm64_kernel_unmapped_at_el0())
177 clobbered_registers++;
179 /* Retrieve the missing registers values */
180 for (i = 0; i < clobbered_registers; i++) {
181 /* from within the handler, this call always succeeds */
182 sdei_api_event_context(i, ®s->regs[i]);
186 * We didn't take an exception to get here, set PAN. UAO will be cleared
187 * by sdei_event_handler()s force_uaccess_begin() call.
189 __uaccess_enable_hw_pan();
191 err = sdei_event_handler(regs, arg);
193 return SDEI_EV_FAILED;
195 if (elr != read_sysreg(elr_el1)) {
197 * We took a synchronous exception from the SDEI handler.
198 * This could deadlock, and if you interrupt KVM it will
201 pr_warn("unsafe: exception during handler\n");
204 mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
207 * If we interrupted the kernel with interrupts masked, we always go
208 * back to wherever we came from.
210 if (mode == kernel_mode && !interrupts_enabled(regs))
211 return SDEI_EV_HANDLED;
214 * Otherwise, we pretend this was an IRQ. This lets user space tasks
215 * receive signals before we return to them, and KVM to invoke it's
216 * world switch to do the same.
218 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
221 if (mode == kernel_mode)
223 else if (mode & PSR_MODE32_BIT)
230 asmlinkage noinstr unsigned long
231 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
235 arm64_enter_nmi(regs);
237 ret = _sdei_handler(regs, arg);
239 arm64_exit_nmi(regs);