2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
48 #include <asm/proto.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
55 #include <asm/intel_rdt_sched.h>
56 #include <asm/unistd.h>
57 #ifdef CONFIG_IA32_EMULATION
58 /* Not included via unistd.h */
59 #include <asm/unistd_32_ia32.h>
64 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
66 /* Prints also some state that isn't saved in the pt_regs */
67 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
69 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
70 unsigned long d0, d1, d2, d3, d6, d7;
71 unsigned int fsindex, gsindex;
72 unsigned int ds, cs, es;
76 if (regs->orig_ax != -1)
77 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
81 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
82 regs->ax, regs->bx, regs->cx);
83 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
84 regs->dx, regs->si, regs->di);
85 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
86 regs->bp, regs->r8, regs->r9);
87 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
88 regs->r10, regs->r11, regs->r12);
89 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
90 regs->r13, regs->r14, regs->r15);
92 if (mode == SHOW_REGS_SHORT)
95 if (mode == SHOW_REGS_USER) {
96 rdmsrl(MSR_FS_BASE, fs);
97 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
98 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
103 asm("movl %%ds,%0" : "=r" (ds));
104 asm("movl %%cs,%0" : "=r" (cs));
105 asm("movl %%es,%0" : "=r" (es));
106 asm("movl %%fs,%0" : "=r" (fsindex));
107 asm("movl %%gs,%0" : "=r" (gsindex));
109 rdmsrl(MSR_FS_BASE, fs);
110 rdmsrl(MSR_GS_BASE, gs);
111 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
118 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
119 fs, fsindex, gs, gsindex, shadowgs);
120 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
122 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
132 /* Only print out debug registers if they are in their non-default state. */
133 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
134 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
135 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
137 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
141 if (boot_cpu_has(X86_FEATURE_OSPKE))
142 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
145 void release_thread(struct task_struct *dead_task)
148 #ifdef CONFIG_MODIFY_LDT_SYSCALL
149 if (dead_task->mm->context.ldt) {
150 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
152 dead_task->mm->context.ldt->entries,
153 dead_task->mm->context.ldt->nr_entries);
160 enum which_selector {
166 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
167 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
168 * It's forcibly inlined because it'll generate better code and this function
171 static __always_inline void save_base_legacy(struct task_struct *prev_p,
172 unsigned short selector,
173 enum which_selector which)
175 if (likely(selector == 0)) {
177 * On Intel (without X86_BUG_NULL_SEG), the segment base could
178 * be the pre-existing saved base or it could be zero. On AMD
179 * (with X86_BUG_NULL_SEG), the segment base could be almost
182 * This branch is very hot (it's hit twice on almost every
183 * context switch between 64-bit programs), and avoiding
184 * the RDMSR helps a lot, so we just assume that whatever
185 * value is already saved is correct. This matches historical
186 * Linux behavior, so it won't break existing applications.
188 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
189 * report that the base is zero, it needs to actually be zero:
190 * see the corresponding logic in load_seg_legacy.
194 * If the selector is 1, 2, or 3, then the base is zero on
195 * !X86_BUG_NULL_SEG CPUs and could be anything on
196 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
197 * has never attempted to preserve the base across context
200 * If selector > 3, then it refers to a real segment, and
201 * saving the base isn't necessary.
204 prev_p->thread.fsbase = 0;
206 prev_p->thread.gsbase = 0;
210 static __always_inline void save_fsgs(struct task_struct *task)
212 savesegment(fs, task->thread.fsindex);
213 savesegment(gs, task->thread.gsindex);
214 save_base_legacy(task, task->thread.fsindex, FS);
215 save_base_legacy(task, task->thread.gsindex, GS);
218 #if IS_ENABLED(CONFIG_KVM)
220 * While a process is running,current->thread.fsbase and current->thread.gsbase
221 * may not match the corresponding CPU registers (see save_base_legacy()). KVM
222 * wants an efficient way to save and restore FSBASE and GSBASE.
223 * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
225 void save_fsgs_for_kvm(void)
229 EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
232 static __always_inline void loadseg(enum which_selector which,
236 loadsegment(fs, sel);
241 static __always_inline void load_seg_legacy(unsigned short prev_index,
242 unsigned long prev_base,
243 unsigned short next_index,
244 unsigned long next_base,
245 enum which_selector which)
247 if (likely(next_index <= 3)) {
249 * The next task is using 64-bit TLS, is not using this
250 * segment at all, or is having fun with arcane CPU features.
252 if (next_base == 0) {
254 * Nasty case: on AMD CPUs, we need to forcibly zero
257 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
258 loadseg(which, __USER_DS);
259 loadseg(which, next_index);
262 * We could try to exhaustively detect cases
263 * under which we can skip the segment load,
264 * but there's really only one case that matters
265 * for performance: if both the previous and
266 * next states are fully zeroed, we can skip
269 * (This assumes that prev_base == 0 has no
270 * false positives. This is the case on
273 if (likely(prev_index | next_index | prev_base))
274 loadseg(which, next_index);
277 if (prev_index != next_index)
278 loadseg(which, next_index);
279 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
284 * The next task is using a real segment. Loading the selector
287 loadseg(which, next_index);
291 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
292 unsigned long arg, struct task_struct *p, unsigned long tls)
295 struct pt_regs *childregs;
296 struct fork_frame *fork_frame;
297 struct inactive_task_frame *frame;
298 struct task_struct *me = current;
300 childregs = task_pt_regs(p);
301 fork_frame = container_of(childregs, struct fork_frame, regs);
302 frame = &fork_frame->frame;
305 * For a new task use the RESET flags value since there is no before.
306 * All the status flags are zero; DF and all the system flags must also
307 * be 0, specifically IF must be 0 because we context switch to the new
308 * task with interrupts disabled.
310 frame->flags = X86_EFLAGS_FIXED;
312 frame->ret_addr = (unsigned long) ret_from_fork;
313 p->thread.sp = (unsigned long) fork_frame;
314 p->thread.io_bitmap_ptr = NULL;
316 savesegment(gs, p->thread.gsindex);
317 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
318 savesegment(fs, p->thread.fsindex);
319 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
320 savesegment(es, p->thread.es);
321 savesegment(ds, p->thread.ds);
322 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
324 if (unlikely(p->flags & PF_KTHREAD)) {
326 memset(childregs, 0, sizeof(struct pt_regs));
327 frame->bx = sp; /* function */
332 *childregs = *current_pt_regs();
339 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
340 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
341 IO_BITMAP_BYTES, GFP_KERNEL);
342 if (!p->thread.io_bitmap_ptr) {
343 p->thread.io_bitmap_max = 0;
346 set_tsk_thread_flag(p, TIF_IO_BITMAP);
350 * Set a new TLS for the child thread?
352 if (clone_flags & CLONE_SETTLS) {
353 #ifdef CONFIG_IA32_EMULATION
354 if (in_ia32_syscall())
355 err = do_set_thread_area(p, -1,
356 (struct user_desc __user *)tls, 0);
359 err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
365 if (err && p->thread.io_bitmap_ptr) {
366 kfree(p->thread.io_bitmap_ptr);
367 p->thread.io_bitmap_max = 0;
374 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
375 unsigned long new_sp,
376 unsigned int _cs, unsigned int _ss, unsigned int _ds)
378 WARN_ON_ONCE(regs != current_pt_regs());
380 if (static_cpu_has(X86_BUG_NULL_SEG)) {
381 /* Loading zero below won't clear the base. */
382 loadsegment(fs, __USER_DS);
383 load_gs_index(__USER_DS);
387 loadsegment(es, _ds);
388 loadsegment(ds, _ds);
395 regs->flags = X86_EFLAGS_IF;
400 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
402 start_thread_common(regs, new_ip, new_sp,
403 __USER_CS, __USER_DS, 0);
405 EXPORT_SYMBOL_GPL(start_thread);
408 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
410 start_thread_common(regs, new_ip, new_sp,
411 test_thread_flag(TIF_X32)
412 ? __USER_CS : __USER32_CS,
413 __USER_DS, __USER_DS);
418 * switch_to(x,y) should switch tasks from x to y.
420 * This could still be optimized:
421 * - fold all the options into a flag word and test it with a single test.
422 * - could test fs/gs bitsliced
424 * Kprobes not supported here. Set the probe on schedule instead.
425 * Function graph tracer not supported too.
427 __visible __notrace_funcgraph struct task_struct *
428 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
430 struct thread_struct *prev = &prev_p->thread;
431 struct thread_struct *next = &next_p->thread;
432 struct fpu *prev_fpu = &prev->fpu;
433 struct fpu *next_fpu = &next->fpu;
434 int cpu = smp_processor_id();
436 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
437 this_cpu_read(irq_count) != -1);
439 switch_fpu_prepare(prev_fpu, cpu);
441 /* We must save %fs and %gs before load_TLS() because
442 * %fs and %gs may be cleared by load_TLS().
444 * (e.g. xen_load_tls())
449 * Load TLS before restoring any segments so that segment loads
450 * reference the correct GDT entries.
455 * Leave lazy mode, flushing any hypercalls made here. This
456 * must be done after loading TLS entries in the GDT but before
457 * loading segments that might reference them, and and it must
458 * be done before fpu__restore(), so the TS bit is up to
461 arch_end_context_switch(next_p);
465 * Reading them only returns the selectors, but writing them (if
466 * nonzero) loads the full descriptor from the GDT or LDT. The
467 * LDT for next is loaded in switch_mm, and the GDT is loaded
470 * We therefore need to write new values to the segment
471 * registers on every context switch unless both the new and old
474 * Note that we don't need to do anything for CS and SS, as
475 * those are saved and restored as part of pt_regs.
477 savesegment(es, prev->es);
478 if (unlikely(next->es | prev->es))
479 loadsegment(es, next->es);
481 savesegment(ds, prev->ds);
482 if (unlikely(next->ds | prev->ds))
483 loadsegment(ds, next->ds);
485 load_seg_legacy(prev->fsindex, prev->fsbase,
486 next->fsindex, next->fsbase, FS);
487 load_seg_legacy(prev->gsindex, prev->gsbase,
488 next->gsindex, next->gsbase, GS);
490 switch_fpu_finish(next_fpu, cpu);
493 * Switch the PDA and FPU contexts.
495 this_cpu_write(current_task, next_p);
496 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
499 update_task_stack(next_p);
501 switch_to_extra(prev_p, next_p);
505 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
506 * current_pt_regs()->flags may not match the current task's
507 * intended IOPL. We need to switch it manually.
509 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
510 prev->iopl != next->iopl))
511 xen_set_iopl_mask(next->iopl);
514 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
516 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
517 * does not update the cached descriptor. As a result, if we
518 * do SYSRET while SS is NULL, we'll end up in user mode with
519 * SS apparently equal to __USER_DS but actually unusable.
521 * The straightforward workaround would be to fix it up just
522 * before SYSRET, but that would slow down the system call
523 * fast paths. Instead, we ensure that SS is never NULL in
524 * system call context. We do this by replacing NULL SS
525 * selectors at every context switch. SYSCALL sets up a valid
526 * SS, so the only way to get NULL is to re-enter the kernel
527 * from CPL 3 through an interrupt. Since that can't happen
528 * in the same task as a running syscall, we are guaranteed to
529 * context switch between every interrupt vector entry and a
532 * We read SS first because SS reads are much faster than
533 * writes. Out of caution, we force SS to __KERNEL_DS even if
534 * it previously had a different non-NULL value.
536 unsigned short ss_sel;
537 savesegment(ss, ss_sel);
538 if (ss_sel != __KERNEL_DS)
539 loadsegment(ss, __KERNEL_DS);
542 /* Load the Intel cache allocation PQR MSR. */
543 intel_rdt_sched_in();
548 void set_personality_64bit(void)
550 /* inherit personality from parent */
552 /* Make sure to be in 64bit mode */
553 clear_thread_flag(TIF_IA32);
554 clear_thread_flag(TIF_ADDR32);
555 clear_thread_flag(TIF_X32);
556 /* Pretend that this comes from a 64bit execve */
557 task_pt_regs(current)->orig_ax = __NR_execve;
558 current_thread_info()->status &= ~TS_COMPAT;
560 /* Ensure the corresponding mm is not marked. */
562 current->mm->context.ia32_compat = 0;
564 /* TBD: overwrites user setup. Should have two bits.
565 But 64bit processes have always behaved this way,
566 so it's not too bad. The main problem is just that
567 32bit childs are affected again. */
568 current->personality &= ~READ_IMPLIES_EXEC;
571 static void __set_personality_x32(void)
573 #ifdef CONFIG_X86_X32
574 clear_thread_flag(TIF_IA32);
575 set_thread_flag(TIF_X32);
577 current->mm->context.ia32_compat = TIF_X32;
578 current->personality &= ~READ_IMPLIES_EXEC;
580 * in_compat_syscall() uses the presence of the x32 syscall bit
581 * flag to determine compat status. The x86 mmap() code relies on
582 * the syscall bitness so set x32 syscall bit right here to make
583 * in_compat_syscall() work during exec().
585 * Pretend to come from a x32 execve.
587 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
588 current_thread_info()->status &= ~TS_COMPAT;
592 static void __set_personality_ia32(void)
594 #ifdef CONFIG_IA32_EMULATION
595 set_thread_flag(TIF_IA32);
596 clear_thread_flag(TIF_X32);
598 current->mm->context.ia32_compat = TIF_IA32;
599 current->personality |= force_personality32;
600 /* Prepare the first "return" to user space */
601 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
602 current_thread_info()->status |= TS_COMPAT;
606 void set_personality_ia32(bool x32)
608 /* Make sure to be in 32bit mode */
609 set_thread_flag(TIF_ADDR32);
612 __set_personality_x32();
614 __set_personality_ia32();
616 EXPORT_SYMBOL_GPL(set_personality_ia32);
618 #ifdef CONFIG_CHECKPOINT_RESTORE
619 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
623 ret = map_vdso_once(image, addr);
627 return (long)image->size;
631 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
634 int doit = task == current;
639 if (arg2 >= TASK_SIZE_MAX)
642 task->thread.gsindex = 0;
643 task->thread.gsbase = arg2;
646 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
651 /* Not strictly needed for fs, but do it for symmetry
653 if (arg2 >= TASK_SIZE_MAX)
656 task->thread.fsindex = 0;
657 task->thread.fsbase = arg2;
659 /* set the selector to 0 to not confuse __switch_to */
661 ret = wrmsrl_safe(MSR_FS_BASE, arg2);
669 rdmsrl(MSR_FS_BASE, base);
671 base = task->thread.fsbase;
672 ret = put_user(base, (unsigned long __user *)arg2);
679 rdmsrl(MSR_KERNEL_GS_BASE, base);
681 base = task->thread.gsbase;
682 ret = put_user(base, (unsigned long __user *)arg2);
686 #ifdef CONFIG_CHECKPOINT_RESTORE
687 # ifdef CONFIG_X86_X32_ABI
688 case ARCH_MAP_VDSO_X32:
689 return prctl_map_vdso(&vdso_image_x32, arg2);
691 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
692 case ARCH_MAP_VDSO_32:
693 return prctl_map_vdso(&vdso_image_32, arg2);
695 case ARCH_MAP_VDSO_64:
696 return prctl_map_vdso(&vdso_image_64, arg2);
707 SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
711 ret = do_arch_prctl_64(current, option, arg2);
713 ret = do_arch_prctl_common(current, option, arg2);
718 #ifdef CONFIG_IA32_EMULATION
719 COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
721 return do_arch_prctl_common(current, option, arg2);
725 unsigned long KSTK_ESP(struct task_struct *task)
727 return task_pt_regs(task)->sp;