2 * Based on arch/arm/kernel/process.c
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/compat.h>
24 #include <linux/efi.h>
25 #include <linux/export.h>
26 #include <linux/sched.h>
27 #include <linux/sched/debug.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/kernel.h>
32 #include <linux/stddef.h>
33 #include <linux/unistd.h>
34 #include <linux/user.h>
35 #include <linux/delay.h>
36 #include <linux/reboot.h>
37 #include <linux/interrupt.h>
38 #include <linux/init.h>
39 #include <linux/cpu.h>
40 #include <linux/elfcore.h>
42 #include <linux/tick.h>
43 #include <linux/utsname.h>
44 #include <linux/uaccess.h>
45 #include <linux/random.h>
46 #include <linux/hw_breakpoint.h>
47 #include <linux/personality.h>
48 #include <linux/notifier.h>
49 #include <trace/events/power.h>
50 #include <linux/percpu.h>
51 #include <linux/thread_info.h>
53 #include <asm/alternative.h>
54 #include <asm/compat.h>
55 #include <asm/cacheflush.h>
57 #include <asm/fpsimd.h>
58 #include <asm/mmu_context.h>
59 #include <asm/processor.h>
60 #include <asm/stacktrace.h>
62 #ifdef CONFIG_STACKPROTECTOR
63 #include <linux/stackprotector.h>
64 unsigned long __stack_chk_guard __ro_after_init;
65 EXPORT_SYMBOL(__stack_chk_guard);
69 * Function pointers to optional machine specific functions
71 void (*pm_power_off)(void);
72 EXPORT_SYMBOL_GPL(pm_power_off);
74 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
77 * This is our default idle handler.
79 void arch_cpu_idle(void)
82 * This should do all the clock switching and wait for interrupt
85 trace_cpu_idle_rcuidle(1, smp_processor_id());
88 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
91 #ifdef CONFIG_HOTPLUG_CPU
92 void arch_cpu_idle_dead(void)
99 * Called by kexec, immediately prior to machine_kexec().
101 * This must completely disable all secondary CPUs; simply causing those CPUs
102 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
103 * kexec'd kernel to use any and all RAM as it sees fit, without having to
104 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
105 * functionality embodied in disable_nonboot_cpus() to achieve this.
107 void machine_shutdown(void)
109 disable_nonboot_cpus();
113 * Halting simply requires that the secondary CPUs stop performing any
114 * activity (executing tasks, handling interrupts). smp_send_stop()
117 void machine_halt(void)
125 * Power-off simply requires that the secondary CPUs stop performing any
126 * activity (executing tasks, handling interrupts). smp_send_stop()
127 * achieves this. When the system power is turned off, it will take all CPUs
130 void machine_power_off(void)
139 * Restart requires that the secondary CPUs stop performing any activity
140 * while the primary CPU resets the system. Systems with multiple CPUs must
141 * provide a HW restart implementation, to ensure that all CPUs reset at once.
142 * This is required so that any code running after reset on the primary CPU
143 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
144 * executing pre-reset code, and using RAM that the primary CPU's code wishes
145 * to use. Implementing such co-ordination would be essentially impossible.
147 void machine_restart(char *cmd)
149 /* Disable interrupts first */
154 * UpdateCapsule() depends on the system being reset via
157 if (efi_enabled(EFI_RUNTIME_SERVICES))
158 efi_reboot(reboot_mode, NULL);
160 /* Now call the architecture specific reboot code. */
162 arm_pm_restart(reboot_mode, cmd);
164 do_kernel_restart(cmd);
167 * Whoops - the architecture was unable to reboot.
169 printk("Reboot failed -- System halted\n");
173 static void print_pstate(struct pt_regs *regs)
175 u64 pstate = regs->pstate;
177 if (compat_user_mode(regs)) {
178 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
180 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
181 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
182 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
183 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
184 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
185 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
186 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
187 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
188 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
189 pstate & PSR_AA32_F_BIT ? 'F' : 'f');
191 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n",
193 pstate & PSR_N_BIT ? 'N' : 'n',
194 pstate & PSR_Z_BIT ? 'Z' : 'z',
195 pstate & PSR_C_BIT ? 'C' : 'c',
196 pstate & PSR_V_BIT ? 'V' : 'v',
197 pstate & PSR_D_BIT ? 'D' : 'd',
198 pstate & PSR_A_BIT ? 'A' : 'a',
199 pstate & PSR_I_BIT ? 'I' : 'i',
200 pstate & PSR_F_BIT ? 'F' : 'f',
201 pstate & PSR_PAN_BIT ? '+' : '-',
202 pstate & PSR_UAO_BIT ? '+' : '-');
206 void __show_regs(struct pt_regs *regs)
211 if (compat_user_mode(regs)) {
212 lr = regs->compat_lr;
213 sp = regs->compat_sp;
221 show_regs_print_info(KERN_DEFAULT);
224 if (!user_mode(regs)) {
225 printk("pc : %pS\n", (void *)regs->pc);
226 printk("lr : %pS\n", (void *)lr);
228 printk("pc : %016llx\n", regs->pc);
229 printk("lr : %016llx\n", lr);
232 printk("sp : %016llx\n", sp);
237 printk("x%-2d: %016llx ", i, regs->regs[i]);
241 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
249 void show_regs(struct pt_regs * regs)
252 dump_backtrace(regs, NULL);
255 static void tls_thread_flush(void)
257 write_sysreg(0, tpidr_el0);
259 if (is_compat_task()) {
260 current->thread.uw.tp_value = 0;
263 * We need to ensure ordering between the shadow state and the
264 * hardware state, so that we don't corrupt the hardware state
265 * with a stale shadow state during context switch.
268 write_sysreg(0, tpidrro_el0);
272 void flush_thread(void)
274 fpsimd_flush_thread();
276 flush_ptrace_hw_breakpoint(current);
279 void release_thread(struct task_struct *dead_task)
283 void arch_release_task_struct(struct task_struct *tsk)
285 fpsimd_release_task(tsk);
288 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
291 fpsimd_preserve_current_state();
294 /* We rely on the above assignment to initialize dst's thread_flags: */
295 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
298 * Detach src's sve_state (if any) from dst so that it does not
299 * get erroneously used or freed prematurely. dst's sve_state
300 * will be allocated on demand later on if dst uses SVE.
301 * For consistency, also clear TIF_SVE here: this could be done
302 * later in copy_process(), but to avoid tripping up future
303 * maintainers it is best not to leave TIF_SVE and sve_state in
304 * an inconsistent state, even temporarily.
306 dst->thread.sve_state = NULL;
307 clear_tsk_thread_flag(dst, TIF_SVE);
312 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
314 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
315 unsigned long stk_sz, struct task_struct *p)
317 struct pt_regs *childregs = task_pt_regs(p);
319 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
322 * In case p was allocated the same task_struct pointer as some
323 * other recently-exited task, make sure p is disassociated from
324 * any cpu that may have run that now-exited task recently.
325 * Otherwise we could erroneously skip reloading the FPSIMD
328 fpsimd_flush_task_state(p);
330 if (likely(!(p->flags & PF_KTHREAD))) {
331 *childregs = *current_pt_regs();
332 childregs->regs[0] = 0;
335 * Read the current TLS pointer from tpidr_el0 as it may be
336 * out-of-sync with the saved value.
338 *task_user_tls(p) = read_sysreg(tpidr_el0);
341 if (is_compat_thread(task_thread_info(p)))
342 childregs->compat_sp = stack_start;
344 childregs->sp = stack_start;
348 * If a TLS pointer was passed to clone (4th argument), use it
349 * for the new thread.
351 if (clone_flags & CLONE_SETTLS)
352 p->thread.uw.tp_value = childregs->regs[3];
354 memset(childregs, 0, sizeof(struct pt_regs));
355 childregs->pstate = PSR_MODE_EL1h;
356 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
357 cpus_have_const_cap(ARM64_HAS_UAO))
358 childregs->pstate |= PSR_UAO_BIT;
360 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
361 set_ssbs_bit(childregs);
363 p->thread.cpu_context.x19 = stack_start;
364 p->thread.cpu_context.x20 = stk_sz;
366 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
367 p->thread.cpu_context.sp = (unsigned long)childregs;
369 ptrace_hw_copy_thread(p);
374 void tls_preserve_current_state(void)
376 *task_user_tls(current) = read_sysreg(tpidr_el0);
379 static void tls_thread_switch(struct task_struct *next)
381 tls_preserve_current_state();
383 if (is_compat_thread(task_thread_info(next)))
384 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
385 else if (!arm64_kernel_unmapped_at_el0())
386 write_sysreg(0, tpidrro_el0);
388 write_sysreg(*task_user_tls(next), tpidr_el0);
391 /* Restore the UAO state depending on next's addr_limit */
392 void uao_thread_switch(struct task_struct *next)
394 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
395 if (task_thread_info(next)->addr_limit == KERNEL_DS)
396 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
398 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
403 * Force SSBS state on context-switch, since it may be lost after migrating
404 * from a CPU which treats the bit as RES0 in a heterogeneous system.
406 static void ssbs_thread_switch(struct task_struct *next)
408 struct pt_regs *regs = task_pt_regs(next);
411 * Nothing to do for kernel threads, but 'regs' may be junk
412 * (e.g. idle task) so check the flags and bail early.
414 if (unlikely(next->flags & PF_KTHREAD))
418 * If all CPUs implement the SSBS extension, then we just need to
419 * context-switch the PSTATE field.
421 if (cpu_have_feature(cpu_feature(SSBS)))
424 /* If the mitigation is enabled, then we leave SSBS clear. */
425 if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
426 test_tsk_thread_flag(next, TIF_SSBD))
429 if (compat_user_mode(regs))
430 set_compat_ssbs_bit(regs);
431 else if (user_mode(regs))
436 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
437 * shadow copy so that we can restore this upon entry from userspace.
439 * This is *only* for exception entry from EL0, and is not valid until we
440 * __switch_to() a user task.
442 DEFINE_PER_CPU(struct task_struct *, __entry_task);
444 static void entry_task_switch(struct task_struct *next)
446 __this_cpu_write(__entry_task, next);
452 __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
453 struct task_struct *next)
455 struct task_struct *last;
457 fpsimd_thread_switch(next);
458 tls_thread_switch(next);
459 hw_breakpoint_thread_switch(next);
460 contextidr_thread_switch(next);
461 entry_task_switch(next);
462 uao_thread_switch(next);
463 ssbs_thread_switch(next);
466 * Complete any pending TLB or cache maintenance on this CPU in case
467 * the thread migrates to a different CPU.
468 * This full barrier is also required by the membarrier system
473 /* the actual thread switch */
474 last = cpu_switch_to(prev, next);
479 unsigned long get_wchan(struct task_struct *p)
481 struct stackframe frame;
482 unsigned long stack_page, ret = 0;
484 if (!p || p == current || p->state == TASK_RUNNING)
487 stack_page = (unsigned long)try_get_task_stack(p);
491 frame.fp = thread_saved_fp(p);
492 frame.pc = thread_saved_pc(p);
493 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
494 frame.graph = p->curr_ret_stack;
497 if (unwind_frame(p, &frame))
499 if (!in_sched_functions(frame.pc)) {
503 } while (count ++ < 16);
510 unsigned long arch_align_stack(unsigned long sp)
512 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
513 sp -= get_random_int() & ~PAGE_MASK;
517 unsigned long arch_randomize_brk(struct mm_struct *mm)
519 if (is_compat_task())
520 return randomize_page(mm->brk, SZ_32M);
522 return randomize_page(mm->brk, SZ_1G);
526 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
528 void arch_setup_new_exec(void)
530 current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
533 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
534 void __used stackleak_check_alloca(unsigned long size)
536 unsigned long stack_left;
537 unsigned long current_sp = current_stack_pointer;
538 struct stack_info info;
540 BUG_ON(!on_accessible_stack(current, current_sp, &info));
542 stack_left = current_sp - info.low;
545 * There's a good chance we're almost out of stack space if this
546 * is true. Using panic() over BUG() is more likely to give
547 * reliable debugging output.
549 if (size >= stack_left)
550 panic("alloca() over the kernel stack boundary\n");
552 EXPORT_SYMBOL(stackleak_check_alloca);