2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
21 #include <linux/kernel.h>
23 #include <linux/elfcore.h>
24 #include <linux/smp.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/ptrace.h>
31 #include <linux/notifier.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/prctl.h>
35 #include <linux/uaccess.h>
37 #include <linux/ftrace.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/fpu/internal.h>
42 #include <asm/mmu_context.h>
43 #include <asm/prctl.h>
45 #include <asm/proto.h>
48 #include <asm/syscalls.h>
49 #include <asm/debugreg.h>
50 #include <asm/switch_to.h>
51 #include <asm/xen/hypervisor.h>
56 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
58 /* Prints also some state that isn't saved in the pt_regs */
59 void __show_regs(struct pt_regs *regs, int all)
61 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
62 unsigned long d0, d1, d2, d3, d6, d7;
63 unsigned int fsindex, gsindex;
64 unsigned int ds, cs, es;
66 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
67 printk_address(regs->ip);
68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
69 regs->sp, regs->flags);
70 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
71 regs->ax, regs->bx, regs->cx);
72 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
73 regs->dx, regs->si, regs->di);
74 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
75 regs->bp, regs->r8, regs->r9);
76 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
77 regs->r10, regs->r11, regs->r12);
78 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
79 regs->r13, regs->r14, regs->r15);
81 asm("movl %%ds,%0" : "=r" (ds));
82 asm("movl %%cs,%0" : "=r" (cs));
83 asm("movl %%es,%0" : "=r" (es));
84 asm("movl %%fs,%0" : "=r" (fsindex));
85 asm("movl %%gs,%0" : "=r" (gsindex));
87 rdmsrl(MSR_FS_BASE, fs);
88 rdmsrl(MSR_GS_BASE, gs);
89 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
99 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
100 fs, fsindex, gs, gsindex, shadowgs);
101 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
103 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
113 /* Only print out debug registers if they are in their non-default state. */
114 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
115 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
116 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
118 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
122 if (boot_cpu_has(X86_FEATURE_OSPKE))
123 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
126 void release_thread(struct task_struct *dead_task)
129 #ifdef CONFIG_MODIFY_LDT_SYSCALL
130 if (dead_task->mm->context.ldt) {
131 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
133 dead_task->mm->context.ldt->entries,
134 dead_task->mm->context.ldt->size);
141 enum which_selector {
147 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
148 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
149 * It's forcibly inlined because it'll generate better code and this function
152 static __always_inline void save_base_legacy(struct task_struct *prev_p,
153 unsigned short selector,
154 enum which_selector which)
156 if (likely(selector == 0)) {
158 * On Intel (without X86_BUG_NULL_SEG), the segment base could
159 * be the pre-existing saved base or it could be zero. On AMD
160 * (with X86_BUG_NULL_SEG), the segment base could be almost
163 * This branch is very hot (it's hit twice on almost every
164 * context switch between 64-bit programs), and avoiding
165 * the RDMSR helps a lot, so we just assume that whatever
166 * value is already saved is correct. This matches historical
167 * Linux behavior, so it won't break existing applications.
169 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
170 * report that the base is zero, it needs to actually be zero:
171 * see the corresponding logic in load_seg_legacy.
175 * If the selector is 1, 2, or 3, then the base is zero on
176 * !X86_BUG_NULL_SEG CPUs and could be anything on
177 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
178 * has never attempted to preserve the base across context
181 * If selector > 3, then it refers to a real segment, and
182 * saving the base isn't necessary.
185 prev_p->thread.fsbase = 0;
187 prev_p->thread.gsbase = 0;
191 static __always_inline void save_fsgs(struct task_struct *task)
193 savesegment(fs, task->thread.fsindex);
194 savesegment(gs, task->thread.gsindex);
195 save_base_legacy(task, task->thread.fsindex, FS);
196 save_base_legacy(task, task->thread.gsindex, GS);
199 static __always_inline void loadseg(enum which_selector which,
203 loadsegment(fs, sel);
208 static __always_inline void load_seg_legacy(unsigned short prev_index,
209 unsigned long prev_base,
210 unsigned short next_index,
211 unsigned long next_base,
212 enum which_selector which)
214 if (likely(next_index <= 3)) {
216 * The next task is using 64-bit TLS, is not using this
217 * segment at all, or is having fun with arcane CPU features.
219 if (next_base == 0) {
221 * Nasty case: on AMD CPUs, we need to forcibly zero
224 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
225 loadseg(which, __USER_DS);
226 loadseg(which, next_index);
229 * We could try to exhaustively detect cases
230 * under which we can skip the segment load,
231 * but there's really only one case that matters
232 * for performance: if both the previous and
233 * next states are fully zeroed, we can skip
236 * (This assumes that prev_base == 0 has no
237 * false positives. This is the case on
240 if (likely(prev_index | next_index | prev_base))
241 loadseg(which, next_index);
244 if (prev_index != next_index)
245 loadseg(which, next_index);
246 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
251 * The next task is using a real segment. Loading the selector
254 loadseg(which, next_index);
258 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
259 unsigned long arg, struct task_struct *p, unsigned long tls)
262 struct pt_regs *childregs;
263 struct fork_frame *fork_frame;
264 struct inactive_task_frame *frame;
265 struct task_struct *me = current;
267 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
268 childregs = task_pt_regs(p);
269 fork_frame = container_of(childregs, struct fork_frame, regs);
270 frame = &fork_frame->frame;
273 * For a new task use the RESET flags value since there is no before.
274 * All the status flags are zero; DF and all the system flags must also
275 * be 0, specifically IF must be 0 because we context switch to the new
276 * task with interrupts disabled.
278 frame->flags = X86_EFLAGS_FIXED;
280 frame->ret_addr = (unsigned long) ret_from_fork;
281 p->thread.sp = (unsigned long) fork_frame;
282 p->thread.io_bitmap_ptr = NULL;
284 savesegment(gs, p->thread.gsindex);
285 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
286 savesegment(fs, p->thread.fsindex);
287 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
288 savesegment(es, p->thread.es);
289 savesegment(ds, p->thread.ds);
290 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
292 if (unlikely(p->flags & PF_KTHREAD)) {
294 memset(childregs, 0, sizeof(struct pt_regs));
295 frame->bx = sp; /* function */
300 *childregs = *current_pt_regs();
307 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
308 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
309 IO_BITMAP_BYTES, GFP_KERNEL);
310 if (!p->thread.io_bitmap_ptr) {
311 p->thread.io_bitmap_max = 0;
314 set_tsk_thread_flag(p, TIF_IO_BITMAP);
318 * Set a new TLS for the child thread?
320 if (clone_flags & CLONE_SETTLS) {
321 #ifdef CONFIG_IA32_EMULATION
322 if (in_ia32_syscall())
323 err = do_set_thread_area(p, -1,
324 (struct user_desc __user *)tls, 0);
327 err = do_arch_prctl(p, ARCH_SET_FS, tls);
333 if (err && p->thread.io_bitmap_ptr) {
334 kfree(p->thread.io_bitmap_ptr);
335 p->thread.io_bitmap_max = 0;
342 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
343 unsigned long new_sp,
344 unsigned int _cs, unsigned int _ss, unsigned int _ds)
346 WARN_ON_ONCE(regs != current_pt_regs());
348 if (static_cpu_has(X86_BUG_NULL_SEG)) {
349 /* Loading zero below won't clear the base. */
350 loadsegment(fs, __USER_DS);
351 load_gs_index(__USER_DS);
355 loadsegment(es, _ds);
356 loadsegment(ds, _ds);
363 regs->flags = X86_EFLAGS_IF;
368 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
370 start_thread_common(regs, new_ip, new_sp,
371 __USER_CS, __USER_DS, 0);
373 EXPORT_SYMBOL_GPL(start_thread);
376 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
378 start_thread_common(regs, new_ip, new_sp,
379 test_thread_flag(TIF_X32)
380 ? __USER_CS : __USER32_CS,
381 __USER_DS, __USER_DS);
386 * switch_to(x,y) should switch tasks from x to y.
388 * This could still be optimized:
389 * - fold all the options into a flag word and test it with a single test.
390 * - could test fs/gs bitsliced
392 * Kprobes not supported here. Set the probe on schedule instead.
393 * Function graph tracer not supported too.
395 __visible __notrace_funcgraph struct task_struct *
396 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
398 struct thread_struct *prev = &prev_p->thread;
399 struct thread_struct *next = &next_p->thread;
400 struct fpu *prev_fpu = &prev->fpu;
401 struct fpu *next_fpu = &next->fpu;
402 int cpu = smp_processor_id();
403 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
404 fpu_switch_t fpu_switch;
406 fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
408 /* We must save %fs and %gs before load_TLS() because
409 * %fs and %gs may be cleared by load_TLS().
411 * (e.g. xen_load_tls())
416 * Load TLS before restoring any segments so that segment loads
417 * reference the correct GDT entries.
422 * Leave lazy mode, flushing any hypercalls made here. This
423 * must be done after loading TLS entries in the GDT but before
424 * loading segments that might reference them, and and it must
425 * be done before fpu__restore(), so the TS bit is up to
428 arch_end_context_switch(next_p);
432 * Reading them only returns the selectors, but writing them (if
433 * nonzero) loads the full descriptor from the GDT or LDT. The
434 * LDT for next is loaded in switch_mm, and the GDT is loaded
437 * We therefore need to write new values to the segment
438 * registers on every context switch unless both the new and old
441 * Note that we don't need to do anything for CS and SS, as
442 * those are saved and restored as part of pt_regs.
444 savesegment(es, prev->es);
445 if (unlikely(next->es | prev->es))
446 loadsegment(es, next->es);
448 savesegment(ds, prev->ds);
449 if (unlikely(next->ds | prev->ds))
450 loadsegment(ds, next->ds);
452 load_seg_legacy(prev->fsindex, prev->fsbase,
453 next->fsindex, next->fsbase, FS);
454 load_seg_legacy(prev->gsindex, prev->gsbase,
455 next->gsindex, next->gsbase, GS);
457 switch_fpu_finish(next_fpu, fpu_switch);
460 * Switch the PDA and FPU contexts.
462 this_cpu_write(current_task, next_p);
464 /* Reload esp0 and ss1. This changes current_thread_info(). */
467 switch_to_extra(prev_p, next_p);
471 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
472 * current_pt_regs()->flags may not match the current task's
473 * intended IOPL. We need to switch it manually.
475 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
476 prev->iopl != next->iopl))
477 xen_set_iopl_mask(next->iopl);
480 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
482 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
483 * does not update the cached descriptor. As a result, if we
484 * do SYSRET while SS is NULL, we'll end up in user mode with
485 * SS apparently equal to __USER_DS but actually unusable.
487 * The straightforward workaround would be to fix it up just
488 * before SYSRET, but that would slow down the system call
489 * fast paths. Instead, we ensure that SS is never NULL in
490 * system call context. We do this by replacing NULL SS
491 * selectors at every context switch. SYSCALL sets up a valid
492 * SS, so the only way to get NULL is to re-enter the kernel
493 * from CPL 3 through an interrupt. Since that can't happen
494 * in the same task as a running syscall, we are guaranteed to
495 * context switch between every interrupt vector entry and a
498 * We read SS first because SS reads are much faster than
499 * writes. Out of caution, we force SS to __KERNEL_DS even if
500 * it previously had a different non-NULL value.
502 unsigned short ss_sel;
503 savesegment(ss, ss_sel);
504 if (ss_sel != __KERNEL_DS)
505 loadsegment(ss, __KERNEL_DS);
511 void set_personality_64bit(void)
513 /* inherit personality from parent */
515 /* Make sure to be in 64bit mode */
516 clear_thread_flag(TIF_IA32);
517 clear_thread_flag(TIF_ADDR32);
518 clear_thread_flag(TIF_X32);
520 /* Ensure the corresponding mm is not marked. */
522 current->mm->context.ia32_compat = 0;
524 /* TBD: overwrites user setup. Should have two bits.
525 But 64bit processes have always behaved this way,
526 so it's not too bad. The main problem is just that
527 32bit childs are affected again. */
528 current->personality &= ~READ_IMPLIES_EXEC;
531 void set_personality_ia32(bool x32)
533 /* inherit personality from parent */
535 /* Make sure to be in 32bit mode */
536 set_thread_flag(TIF_ADDR32);
538 /* Mark the associated mm as containing 32-bit tasks. */
540 clear_thread_flag(TIF_IA32);
541 set_thread_flag(TIF_X32);
543 current->mm->context.ia32_compat = TIF_X32;
544 current->personality &= ~READ_IMPLIES_EXEC;
545 /* in_compat_syscall() uses the presence of the x32
546 syscall bit flag to determine compat status */
547 current_thread_info()->status &= ~TS_COMPAT;
549 set_thread_flag(TIF_IA32);
550 clear_thread_flag(TIF_X32);
552 current->mm->context.ia32_compat = TIF_IA32;
553 current->personality |= force_personality32;
554 /* Prepare the first "return" to user space */
555 current_thread_info()->status |= TS_COMPAT;
558 EXPORT_SYMBOL_GPL(set_personality_ia32);
560 #ifdef CONFIG_CHECKPOINT_RESTORE
561 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
565 ret = map_vdso_once(image, addr);
569 return (long)image->size;
573 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
576 int doit = task == current;
581 if (addr >= TASK_SIZE_MAX)
584 task->thread.gsindex = 0;
585 task->thread.gsbase = addr;
588 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
593 /* Not strictly needed for fs, but do it for symmetry
595 if (addr >= TASK_SIZE_MAX)
598 task->thread.fsindex = 0;
599 task->thread.fsbase = addr;
601 /* set the selector to 0 to not confuse __switch_to */
603 ret = wrmsrl_safe(MSR_FS_BASE, addr);
610 rdmsrl(MSR_FS_BASE, base);
612 base = task->thread.fsbase;
613 ret = put_user(base, (unsigned long __user *)addr);
619 rdmsrl(MSR_KERNEL_GS_BASE, base);
621 base = task->thread.gsbase;
622 ret = put_user(base, (unsigned long __user *)addr);
626 #ifdef CONFIG_CHECKPOINT_RESTORE
627 # ifdef CONFIG_X86_X32_ABI
628 case ARCH_MAP_VDSO_X32:
629 return prctl_map_vdso(&vdso_image_x32, addr);
631 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
632 case ARCH_MAP_VDSO_32:
633 return prctl_map_vdso(&vdso_image_32, addr);
635 case ARCH_MAP_VDSO_64:
636 return prctl_map_vdso(&vdso_image_64, addr);
647 long sys_arch_prctl(int code, unsigned long addr)
649 return do_arch_prctl(current, code, addr);
652 unsigned long KSTK_ESP(struct task_struct *task)
654 return task_pt_regs(task)->sp;