GNU Linux-libre 4.14.251-gnu1
[releases.git] / arch / x86 / include / asm / switch_to.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SWITCH_TO_H
3 #define _ASM_X86_SWITCH_TO_H
4
5 #include <linux/sched/task_stack.h>
6
7 struct task_struct; /* one of the stranger aspects of C forward declarations */
8
9 struct task_struct *__switch_to_asm(struct task_struct *prev,
10                                     struct task_struct *next);
11
12 __visible struct task_struct *__switch_to(struct task_struct *prev,
13                                           struct task_struct *next);
14
15 /* This runs runs on the previous thread's stack. */
16 static inline void prepare_switch_to(struct task_struct *prev,
17                                      struct task_struct *next)
18 {
19 #ifdef CONFIG_VMAP_STACK
20         /*
21          * If we switch to a stack that has a top-level paging entry
22          * that is not present in the current mm, the resulting #PF will
23          * will be promoted to a double-fault and we'll panic.  Probe
24          * the new stack now so that vmalloc_fault can fix up the page
25          * tables if needed.  This can only happen if we use a stack
26          * in vmap space.
27          *
28          * We assume that the stack is aligned so that it never spans
29          * more than one top-level paging entry.
30          *
31          * To minimize cache pollution, just follow the stack pointer.
32          */
33         READ_ONCE(*(unsigned char *)next->thread.sp);
34 #endif
35 }
36
37 asmlinkage void ret_from_fork(void);
38
39 /*
40  * This is the structure pointed to by thread.sp for an inactive task.  The
41  * order of the fields must match the code in __switch_to_asm().
42  */
43 struct inactive_task_frame {
44         unsigned long flags;
45 #ifdef CONFIG_X86_64
46         unsigned long r15;
47         unsigned long r14;
48         unsigned long r13;
49         unsigned long r12;
50 #else
51         unsigned long si;
52         unsigned long di;
53 #endif
54         unsigned long bx;
55
56         /*
57          * These two fields must be together.  They form a stack frame header,
58          * needed by get_frame_pointer().
59          */
60         unsigned long bp;
61         unsigned long ret_addr;
62 };
63
64 struct fork_frame {
65         struct inactive_task_frame frame;
66         struct pt_regs regs;
67 };
68
69 #define switch_to(prev, next, last)                                     \
70 do {                                                                    \
71         prepare_switch_to(prev, next);                                  \
72                                                                         \
73         ((last) = __switch_to_asm((prev), (next)));                     \
74 } while (0)
75
76 #ifdef CONFIG_X86_32
77 static inline void refresh_sysenter_cs(struct thread_struct *thread)
78 {
79         /* Only happens when SEP is enabled, no need to test "SEP"arately: */
80         if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
81                 return;
82
83         this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
84         wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
85 }
86 #endif
87
88 /* This is used when switching tasks or entering/exiting vm86 mode. */
89 static inline void update_sp0(struct task_struct *task)
90 {
91         /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
92 #ifdef CONFIG_X86_32
93         load_sp0(task->thread.sp0);
94 #else
95         if (static_cpu_has(X86_FEATURE_XENPV))
96                 load_sp0(task_top_of_stack(task));
97 #endif
98 }
99
100 #endif /* _ASM_X86_SWITCH_TO_H */