1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
9 #include <asm/fpu/internal.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/types.h>
13 #include <asm/traps.h>
14 #include <asm/irq_regs.h>
16 #include <linux/hardirq.h>
17 #include <linux/pkeys.h>
19 #define CREATE_TRACE_POINTS
20 #include <asm/trace/fpu.h>
23 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24 * depending on the FPU hardware format:
26 union fpregs_state init_fpstate __read_mostly;
29 * Track whether the kernel is using the FPU state
34 * - by IRQ context code to potentially use the FPU
37 * - to debug kernel_fpu_begin()/end() correctness
39 static DEFINE_PER_CPU(bool, in_kernel_fpu);
42 * Track which context is using the FPU on the CPU:
44 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
46 static bool kernel_fpu_disabled(void)
48 return this_cpu_read(in_kernel_fpu);
51 static bool interrupted_kernel_fpu_idle(void)
53 return !kernel_fpu_disabled();
57 * Were we in user mode (or vm86 mode) when we were
60 * Doing kernel_fpu_begin/end() is ok if we are running
61 * in an interrupt context from user mode - we'll just
62 * save the FPU state as required.
64 static bool interrupted_user_mode(void)
66 struct pt_regs *regs = get_irq_regs();
67 return regs && user_mode(regs);
71 * Can we use the FPU in kernel mode with the
72 * whole "kernel_fpu_begin/end()" sequence?
74 * It's always ok in process context (ie "not interrupt")
75 * but it is sometimes ok even from an irq.
77 bool irq_fpu_usable(void)
79 return !in_interrupt() ||
80 interrupted_user_mode() ||
81 interrupted_kernel_fpu_idle();
83 EXPORT_SYMBOL(irq_fpu_usable);
85 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
89 WARN_ON_FPU(!irq_fpu_usable());
90 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
92 this_cpu_write(in_kernel_fpu, true);
94 if (!(current->flags & PF_KTHREAD) &&
95 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
96 set_thread_flag(TIF_NEED_FPU_LOAD);
98 * Ignore return value -- we don't care if reg state
101 copy_fpregs_to_fpstate(¤t->thread.fpu);
103 __cpu_invalidate_fpregs_state();
105 /* Put sane initial values into the control registers. */
106 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
107 ldmxcsr(MXCSR_DEFAULT);
109 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
110 asm volatile ("fninit");
112 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
114 void kernel_fpu_end(void)
116 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
118 this_cpu_write(in_kernel_fpu, false);
121 EXPORT_SYMBOL_GPL(kernel_fpu_end);
124 * Save the FPU state (mark it for reload if necessary):
126 * This only ever gets called for the current task.
128 void fpu__save(struct fpu *fpu)
130 WARN_ON_FPU(fpu != ¤t->thread.fpu);
133 trace_x86_fpu_before_save(fpu);
135 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
136 if (!copy_fpregs_to_fpstate(fpu)) {
137 copy_kernel_to_fpregs(&fpu->state);
141 trace_x86_fpu_after_save(fpu);
146 * Legacy x87 fpstate state init:
148 static inline void fpstate_init_fstate(struct fregs_state *fp)
150 fp->cwd = 0xffff037fu;
151 fp->swd = 0xffff0000u;
152 fp->twd = 0xffffffffu;
153 fp->fos = 0xffff0000u;
156 void fpstate_init(union fpregs_state *state)
158 if (!static_cpu_has(X86_FEATURE_FPU)) {
159 fpstate_init_soft(&state->soft);
163 memset(state, 0, fpu_kernel_xstate_size);
165 if (static_cpu_has(X86_FEATURE_XSAVES))
166 fpstate_init_xstate(&state->xsave);
167 if (static_cpu_has(X86_FEATURE_FXSR))
168 fpstate_init_fxstate(&state->fxsave);
170 fpstate_init_fstate(&state->fsave);
172 EXPORT_SYMBOL_GPL(fpstate_init);
174 int fpu__copy(struct task_struct *dst, struct task_struct *src)
176 struct fpu *dst_fpu = &dst->thread.fpu;
177 struct fpu *src_fpu = &src->thread.fpu;
179 dst_fpu->last_cpu = -1;
181 if (!static_cpu_has(X86_FEATURE_FPU))
184 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
187 * Don't let 'init optimized' areas of the XSAVE area
188 * leak into the child task:
190 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
193 * If the FPU registers are not current just memcpy() the state.
194 * Otherwise save current FPU registers directly into the child's FPU
195 * context, without any memory-to-memory copying.
197 * ( The function 'fails' in the FNSAVE case, which destroys
198 * register contents so we have to load them back. )
201 if (test_thread_flag(TIF_NEED_FPU_LOAD))
202 memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
204 else if (!copy_fpregs_to_fpstate(dst_fpu))
205 copy_kernel_to_fpregs(&dst_fpu->state);
209 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
211 trace_x86_fpu_copy_src(src_fpu);
212 trace_x86_fpu_copy_dst(dst_fpu);
218 * Activate the current task's in-memory FPU context,
219 * if it has not been used before:
221 static void fpu__initialize(struct fpu *fpu)
223 WARN_ON_FPU(fpu != ¤t->thread.fpu);
225 set_thread_flag(TIF_NEED_FPU_LOAD);
226 fpstate_init(&fpu->state);
227 trace_x86_fpu_init_state(fpu);
231 * This function must be called before we read a task's fpstate.
233 * There's two cases where this gets called:
235 * - for the current task (when coredumping), in which case we have
236 * to save the latest FPU registers into the fpstate,
238 * - or it's called for stopped tasks (ptrace), in which case the
239 * registers were already saved by the context-switch code when
240 * the task scheduled out.
242 * If the task has used the FPU before then save it.
244 void fpu__prepare_read(struct fpu *fpu)
246 if (fpu == ¤t->thread.fpu)
251 * This function must be called before we write a task's fpstate.
253 * Invalidate any cached FPU registers.
255 * After this function call, after registers in the fpstate are
256 * modified and the child task has woken up, the child task will
257 * restore the modified FPU state from the modified context. If we
258 * didn't clear its cached status here then the cached in-registers
259 * state pending on its former CPU could be restored, corrupting
262 void fpu__prepare_write(struct fpu *fpu)
265 * Only stopped child tasks can be used to modify the FPU
266 * state in the fpstate buffer:
268 WARN_ON_FPU(fpu == ¤t->thread.fpu);
270 /* Invalidate any cached state: */
271 __fpu_invalidate_fpregs_state(fpu);
275 * Drops current FPU state: deactivates the fpregs and
276 * the fpstate. NOTE: it still leaves previous contents
277 * in the fpregs in the eager-FPU case.
279 * This function can be used in cases where we know that
280 * a state-restore is coming: either an explicit one,
283 void fpu__drop(struct fpu *fpu)
287 if (fpu == ¤t->thread.fpu) {
288 /* Ignore delayed exceptions from user space */
289 asm volatile("1: fwait\n"
291 _ASM_EXTABLE(1b, 2b));
292 fpregs_deactivate(fpu);
295 trace_x86_fpu_dropped(fpu);
301 * Clear FPU registers by setting them up from
304 static inline void copy_init_fpstate_to_fpregs(void)
309 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
310 else if (static_cpu_has(X86_FEATURE_FXSR))
311 copy_kernel_to_fxregs(&init_fpstate.fxsave);
313 copy_kernel_to_fregs(&init_fpstate.fsave);
315 if (boot_cpu_has(X86_FEATURE_OSPKE))
316 copy_init_pkru_to_fpregs();
318 fpregs_mark_activate();
323 * Clear the FPU state back to init state.
325 * Called by sys_execve(), by the signal handler code and by various
328 void fpu__clear(struct fpu *fpu)
330 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
335 * Make sure fpstate is cleared and initialized.
337 fpu__initialize(fpu);
338 if (static_cpu_has(X86_FEATURE_FPU))
339 copy_init_fpstate_to_fpregs();
343 * Load FPU context before returning to userspace.
345 void switch_fpu_return(void)
347 if (!static_cpu_has(X86_FEATURE_FPU))
350 __fpregs_load_activate();
352 EXPORT_SYMBOL_GPL(switch_fpu_return);
354 #ifdef CONFIG_X86_DEBUG_FPU
356 * If current FPU state according to its tracking (loaded FPU context on this
357 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
358 * loaded on return to userland.
360 void fpregs_assert_state_consistent(void)
362 struct fpu *fpu = ¤t->thread.fpu;
364 if (test_thread_flag(TIF_NEED_FPU_LOAD))
367 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
369 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
372 void fpregs_mark_activate(void)
374 struct fpu *fpu = ¤t->thread.fpu;
376 fpregs_activate(fpu);
377 fpu->last_cpu = smp_processor_id();
378 clear_thread_flag(TIF_NEED_FPU_LOAD);
380 EXPORT_SYMBOL_GPL(fpregs_mark_activate);
383 * x87 math exception handling:
386 int fpu__exception_code(struct fpu *fpu, int trap_nr)
390 if (trap_nr == X86_TRAP_MF) {
391 unsigned short cwd, swd;
393 * (~cwd & swd) will mask out exceptions that are not set to unmasked
394 * status. 0x3f is the exception bits in these regs, 0x200 is the
395 * C1 reg you need in case of a stack fault, 0x040 is the stack
396 * fault bit. We should only be taking one exception at a time,
397 * so if this combination doesn't produce any single exception,
398 * then we have a bad program that isn't synchronizing its FPU usage
399 * and it will suffer the consequences since we won't be able to
400 * fully reproduce the context of the exception.
402 if (boot_cpu_has(X86_FEATURE_FXSR)) {
403 cwd = fpu->state.fxsave.cwd;
404 swd = fpu->state.fxsave.swd;
406 cwd = (unsigned short)fpu->state.fsave.cwd;
407 swd = (unsigned short)fpu->state.fsave.swd;
413 * The SIMD FPU exceptions are handled a little differently, as there
414 * is only a single status/control register. Thus, to determine which
415 * unmasked exception was caught we must mask the exception mask bits
416 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
418 unsigned short mxcsr = MXCSR_DEFAULT;
420 if (boot_cpu_has(X86_FEATURE_XMM))
421 mxcsr = fpu->state.fxsave.mxcsr;
423 err = ~(mxcsr >> 7) & mxcsr;
426 if (err & 0x001) { /* Invalid op */
428 * swd & 0x240 == 0x040: Stack Underflow
429 * swd & 0x240 == 0x240: Stack Overflow
430 * User must clear the SF bit (0x40) if set
433 } else if (err & 0x004) { /* Divide by Zero */
435 } else if (err & 0x008) { /* Overflow */
437 } else if (err & 0x012) { /* Denormal, Underflow */
439 } else if (err & 0x020) { /* Precision */
444 * If we're using IRQ 13, or supposedly even some trap
445 * X86_TRAP_MF implementations, it's possible
446 * we get a spurious trap, which is not an error.