1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * x86-64 work by Andi Kleen 2002
11 #ifndef _ASM_X86_FPU_INTERNAL_H
12 #define _ASM_X86_FPU_INTERNAL_H
14 #include <linux/compat.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
19 #include <asm/fpu/api.h>
20 #include <asm/fpu/xstate.h>
21 #include <asm/cpufeature.h>
22 #include <asm/trace/fpu.h>
25 * High level FPU state handling functions:
27 extern void fpu__initialize(struct fpu *fpu);
28 extern void fpu__prepare_read(struct fpu *fpu);
29 extern void fpu__prepare_write(struct fpu *fpu);
30 extern void fpu__save(struct fpu *fpu);
31 extern void fpu__restore(struct fpu *fpu);
32 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
33 extern void fpu__drop(struct fpu *fpu);
34 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
35 extern void fpu__clear(struct fpu *fpu);
36 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
37 extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
40 * Boot time FPU initialization functions:
42 extern void fpu__init_cpu(void);
43 extern void fpu__init_system_xstate(void);
44 extern void fpu__init_cpu_xstate(void);
45 extern void fpu__init_system(struct cpuinfo_x86 *c);
46 extern void fpu__init_check_bugs(void);
47 extern void fpu__resume_cpu(void);
48 extern u64 fpu__get_supported_xfeatures_mask(void);
53 #ifdef CONFIG_X86_DEBUG_FPU
54 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
56 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
60 * FPU related CPU feature flag helper routines:
62 static __always_inline __pure bool use_xsaveopt(void)
64 return static_cpu_has(X86_FEATURE_XSAVEOPT);
67 static __always_inline __pure bool use_xsave(void)
69 return static_cpu_has(X86_FEATURE_XSAVE);
72 static __always_inline __pure bool use_fxsr(void)
74 return static_cpu_has(X86_FEATURE_FXSR);
78 * fpstate handling functions:
81 extern union fpregs_state init_fpstate;
83 extern void fpstate_init(union fpregs_state *state);
84 #ifdef CONFIG_MATH_EMULATION
85 extern void fpstate_init_soft(struct swregs_state *soft);
87 static inline void fpstate_init_soft(struct swregs_state *soft) {}
90 static inline void fpstate_init_xstate(struct xregs_state *xsave)
93 * XRSTORS requires these bits set in xcomp_bv, or it will
96 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask;
99 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
102 fx->mxcsr = MXCSR_DEFAULT;
104 extern void fpstate_sanitize_xstate(struct fpu *fpu);
106 /* Returns 0 or the negated trap number, which results in -EFAULT for #PF */
107 #define user_insn(insn, output, input...) \
113 asm volatile(ASM_STAC "\n" \
115 "2: " ASM_CLAC "\n" \
116 ".section .fixup,\"ax\"\n" \
120 _ASM_EXTABLE_FAULT(1b, 3b) \
121 : [err] "=a" (err), output \
126 #define kernel_insn(insn, output, input...) \
127 asm volatile("1:" #insn "\n\t" \
129 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
132 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
134 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
137 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
139 if (IS_ENABLED(CONFIG_X86_32))
140 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
141 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
142 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
144 /* See comment in copy_fxregs_to_kernel() below. */
145 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
148 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
150 if (IS_ENABLED(CONFIG_X86_32)) {
151 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
153 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
154 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
156 /* See comment in copy_fxregs_to_kernel() below. */
157 kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
162 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
164 if (IS_ENABLED(CONFIG_X86_32))
165 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
166 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
167 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
169 /* See comment in copy_fxregs_to_kernel() below. */
170 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
174 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
176 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
179 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
181 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
184 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
186 if (IS_ENABLED(CONFIG_X86_32))
187 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
188 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
189 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
191 /* Using "rex64; fxsave %0" is broken because, if the memory
192 * operand uses any extended registers for addressing, a second
193 * REX prefix will be generated (to the assembler, rex64
194 * followed by semicolon is a separate instruction), and hence
195 * the 64-bitness is lost.
197 * Using "fxsaveq %0" would be the ideal choice, but is only
198 * supported starting with gas 2.16.
200 * Using, as a workaround, the properly prefixed form below
201 * isn't accepted by any binutils version so far released,
202 * complaining that the same type of prefix is used twice if
203 * an extended register is needed for addressing (fix submitted
204 * to mainline 2005-11-21).
206 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
208 * This, however, we can work around by forcing the compiler to
209 * select an addressing mode that doesn't require extended
212 asm volatile( "rex64/fxsave (%[fx])"
213 : "=m" (fpu->state.fxsave)
214 : [fx] "R" (&fpu->state.fxsave));
218 static inline void fxsave(struct fxregs_state *fx)
220 if (IS_ENABLED(CONFIG_X86_32))
221 asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
223 asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
226 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
227 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
228 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
229 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
230 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
231 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
234 * After this @err contains 0 on success or the negated trap number when
235 * the operation raises an exception. For faults this results in -EFAULT.
237 #define XSTATE_OP(op, st, lmask, hmask, err) \
238 asm volatile("1:" op "\n\t" \
239 "xor %[err], %[err]\n" \
241 ".pushsection .fixup,\"ax\"\n\t" \
242 "3: negl %%eax\n\t" \
245 _ASM_EXTABLE_FAULT(1b, 3b) \
247 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
251 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
252 * format and supervisor states in addition to modified optimization in
255 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
256 * supports modified optimization which is not supported by XSAVE.
258 * We use XSAVE as a fallback.
260 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
261 * original instruction which gets replaced. We need to use it here as the
262 * address of the instruction where we might get an exception at.
264 #define XSTATE_XSAVE(st, lmask, hmask, err) \
265 asm volatile(ALTERNATIVE_2(XSAVE, \
266 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
267 XSAVES, X86_FEATURE_XSAVES) \
269 "xor %[err], %[err]\n" \
271 ".pushsection .fixup,\"ax\"\n" \
272 "4: movl $-2, %[err]\n" \
275 _ASM_EXTABLE(661b, 4b) \
277 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
281 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
284 #define XSTATE_XRESTORE(st, lmask, hmask) \
285 asm volatile(ALTERNATIVE(XRSTOR, \
286 XRSTORS, X86_FEATURE_XSAVES) \
289 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
291 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
295 * This function is called only during boot time when x86 caps are not set
296 * up and alternative can not be used yet.
298 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
302 u32 hmask = mask >> 32;
305 WARN_ON(system_state != SYSTEM_BOOTING);
307 if (static_cpu_has(X86_FEATURE_XSAVES))
308 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
310 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
313 * We should never fault when copying from a kernel buffer, and the FPU
314 * state we set at boot time should be valid.
320 * Save processor xstate to xsave area.
322 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
326 u32 hmask = mask >> 32;
329 WARN_ON_FPU(!alternatives_patched);
331 XSTATE_XSAVE(xstate, lmask, hmask, err);
333 /* We should never fault when copying to a kernel buffer: */
338 * Restore processor xstate from xsave area.
340 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
343 u32 hmask = mask >> 32;
345 XSTATE_XRESTORE(xstate, lmask, hmask);
349 * Save xstate to user space xsave area.
351 * We don't use modified optimization because xrstor/xrstors might track
352 * a different application.
354 * We don't use compacted format xsave area for
355 * backward compatibility for old applications which don't understand
356 * compacted format of xsave area.
358 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
363 * Clear the xsave header first, so that reserved fields are
364 * initialized to zero.
366 err = __clear_user(&buf->header, sizeof(buf->header));
371 XSTATE_OP(XSAVE, buf, -1, -1, err);
378 * Restore xstate from user space xsave area.
380 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
382 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
384 u32 hmask = mask >> 32;
388 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
395 * These must be called with preempt disabled. Returns
396 * 'true' if the FPU state is still intact and we can
397 * keep registers active.
399 * The legacy FNSAVE instruction cleared all FPU state
400 * unconditionally, so registers are essentially destroyed.
401 * Modern FPU state can be kept in registers, if there are
402 * no pending FP exceptions.
404 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
406 if (likely(use_xsave())) {
407 copy_xregs_to_kernel(&fpu->state.xsave);
411 if (likely(use_fxsr())) {
412 copy_fxregs_to_kernel(fpu);
417 * Legacy FPU register saving, FNSAVE always clears FPU registers,
418 * so we have to mark them inactive:
420 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
425 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
428 copy_kernel_to_xregs(&fpstate->xsave, mask);
431 copy_kernel_to_fxregs(&fpstate->fxsave);
433 copy_kernel_to_fregs(&fpstate->fsave);
437 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
440 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
441 * pending. Clear the x87 state here by setting it to fixed values.
442 * "m" is a random variable that should be in L1.
444 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
448 "fildl %P[addr]" /* set F?P to defined value */
449 : : [addr] "m" (fpstate));
452 __copy_kernel_to_fpregs(fpstate, -1);
455 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
458 * FPU context switch related helper methods:
461 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
464 * The in-register FPU state for an FPU context on a CPU is assumed to be
465 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
468 * If the FPU register state is valid, the kernel can skip restoring the
469 * FPU state from memory.
471 * Any code that clobbers the FPU registers or updates the in-memory
472 * FPU state for a task MUST let the rest of the kernel know that the
473 * FPU registers are no longer valid for this task.
475 * Either one of these invalidation functions is enough. Invalidate
476 * a resource you control: CPU if using the CPU for something else
477 * (with preemption disabled), FPU for the current task, or a task that
478 * is prevented from running by the current task.
480 static inline void __cpu_invalidate_fpregs_state(void)
482 __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
485 static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
490 static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
492 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
496 * These generally need preemption protection to work,
497 * do try to avoid using these on their own:
499 static inline void fpregs_deactivate(struct fpu *fpu)
501 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
502 trace_x86_fpu_regs_deactivated(fpu);
505 static inline void fpregs_activate(struct fpu *fpu)
507 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
508 trace_x86_fpu_regs_activated(fpu);
512 * FPU state switching for scheduling.
514 * This is a two-stage process:
516 * - switch_fpu_prepare() saves the old state.
517 * This is done within the context of the old process.
519 * - switch_fpu_finish() restores the new state as
523 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
525 if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
526 if (!copy_fpregs_to_fpstate(old_fpu))
527 old_fpu->last_cpu = -1;
529 old_fpu->last_cpu = cpu;
531 /* But leave fpu_fpregs_owner_ctx! */
532 trace_x86_fpu_regs_deactivated(old_fpu);
534 old_fpu->last_cpu = -1;
538 * Misc helper functions:
542 * Set up the userspace FPU context for the new task, if the task
545 static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
547 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
548 new_fpu->initialized;
551 if (!fpregs_state_valid(new_fpu, cpu))
552 copy_kernel_to_fpregs(&new_fpu->state);
553 fpregs_activate(new_fpu);
558 * Needs to be preemption-safe.
560 * NOTE! user_fpu_begin() must be used only immediately before restoring
561 * the save state. It does not do any saving/restoring on its own. In
562 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
563 * the task can lose the FPU right after preempt_enable().
565 static inline void user_fpu_begin(void)
567 struct fpu *fpu = ¤t->thread.fpu;
570 fpregs_activate(fpu);
575 * MXCSR and XCR definitions:
578 extern unsigned int mxcsr_feature_mask;
580 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
582 static inline u64 xgetbv(u32 index)
586 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
587 : "=a" (eax), "=d" (edx)
589 return eax + ((u64)edx << 32);
592 static inline void xsetbv(u32 index, u64 value)
595 u32 edx = value >> 32;
597 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
598 : : "a" (eax), "d" (edx), "c" (index));
601 #endif /* _ASM_X86_FPU_INTERNAL_H */