2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_FPU_INTERNAL_H
11 #define _ASM_X86_FPU_INTERNAL_H
13 #include <linux/compat.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
18 #include <asm/fpu/api.h>
19 #include <asm/fpu/xstate.h>
20 #include <asm/cpufeature.h>
21 #include <asm/trace/fpu.h>
24 * High level FPU state handling functions:
26 extern void fpu__activate_curr(struct fpu *fpu);
27 extern void fpu__activate_fpstate_read(struct fpu *fpu);
28 extern void fpu__activate_fpstate_write(struct fpu *fpu);
29 extern void fpu__current_fpstate_write_begin(void);
30 extern void fpu__current_fpstate_write_end(void);
31 extern void fpu__save(struct fpu *fpu);
32 extern void fpu__restore(struct fpu *fpu);
33 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
34 extern void fpu__drop(struct fpu *fpu);
35 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
36 extern void fpu__clear(struct fpu *fpu);
37 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
38 extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
41 * Boot time FPU initialization functions:
43 extern void fpu__init_cpu(void);
44 extern void fpu__init_system_xstate(void);
45 extern void fpu__init_cpu_xstate(void);
46 extern void fpu__init_system(struct cpuinfo_x86 *c);
47 extern void fpu__init_check_bugs(void);
48 extern void fpu__resume_cpu(void);
49 extern u64 fpu__get_supported_xfeatures_mask(void);
54 #ifdef CONFIG_X86_DEBUG_FPU
55 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
57 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
61 * FPU related CPU feature flag helper routines:
63 static __always_inline __pure bool use_xsaveopt(void)
65 return static_cpu_has(X86_FEATURE_XSAVEOPT);
68 static __always_inline __pure bool use_xsave(void)
70 return static_cpu_has(X86_FEATURE_XSAVE);
73 static __always_inline __pure bool use_fxsr(void)
75 return static_cpu_has(X86_FEATURE_FXSR);
79 * fpstate handling functions:
82 extern union fpregs_state init_fpstate;
84 extern void fpstate_init(union fpregs_state *state);
85 #ifdef CONFIG_MATH_EMULATION
86 extern void fpstate_init_soft(struct swregs_state *soft);
88 static inline void fpstate_init_soft(struct swregs_state *soft) {}
90 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
93 fx->mxcsr = MXCSR_DEFAULT;
95 extern void fpstate_sanitize_xstate(struct fpu *fpu);
97 /* Returns 0 or the negated trap number, which results in -EFAULT for #PF */
98 #define user_insn(insn, output, input...) \
104 asm volatile(ASM_STAC "\n" \
106 "2: " ASM_CLAC "\n" \
107 ".section .fixup,\"ax\"\n" \
111 _ASM_EXTABLE_FAULT(1b, 3b) \
112 : [err] "=a" (err), output \
117 #define check_insn(insn, output, input...) \
120 asm volatile("1:" #insn "\n\t" \
122 ".section .fixup,\"ax\"\n" \
123 "3: movl $-1,%[err]\n" \
126 _ASM_EXTABLE(1b, 3b) \
127 : [err] "=r" (err), output \
132 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
134 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
137 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
139 if (IS_ENABLED(CONFIG_X86_32))
140 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
141 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
142 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
144 /* See comment in copy_fxregs_to_kernel() below. */
145 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
148 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
152 if (IS_ENABLED(CONFIG_X86_32)) {
153 err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
155 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
156 err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
158 /* See comment in copy_fxregs_to_kernel() below. */
159 err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
162 /* Copying from a kernel buffer to FPU registers should never fail: */
166 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
168 if (IS_ENABLED(CONFIG_X86_32))
169 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
170 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
171 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
173 /* See comment in copy_fxregs_to_kernel() below. */
174 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
178 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
180 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
185 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
187 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
190 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
192 if (IS_ENABLED(CONFIG_X86_32))
193 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
194 else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
195 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
197 /* Using "rex64; fxsave %0" is broken because, if the memory
198 * operand uses any extended registers for addressing, a second
199 * REX prefix will be generated (to the assembler, rex64
200 * followed by semicolon is a separate instruction), and hence
201 * the 64-bitness is lost.
203 * Using "fxsaveq %0" would be the ideal choice, but is only
204 * supported starting with gas 2.16.
206 * Using, as a workaround, the properly prefixed form below
207 * isn't accepted by any binutils version so far released,
208 * complaining that the same type of prefix is used twice if
209 * an extended register is needed for addressing (fix submitted
210 * to mainline 2005-11-21).
212 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
214 * This, however, we can work around by forcing the compiler to
215 * select an addressing mode that doesn't require extended
218 asm volatile( "rex64/fxsave (%[fx])"
219 : "=m" (fpu->state.fxsave)
220 : [fx] "R" (&fpu->state.fxsave));
224 static inline void fxsave(struct fxregs_state *fx)
226 if (IS_ENABLED(CONFIG_X86_32))
227 asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
229 asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
232 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
233 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
234 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
235 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
236 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
237 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
240 * After this @err contains 0 on success or the negated trap number when
241 * the operation raises an exception. For faults this results in -EFAULT.
243 #define XSTATE_OP(op, st, lmask, hmask, err) \
244 asm volatile("1:" op "\n\t" \
245 "xor %[err], %[err]\n" \
247 ".pushsection .fixup,\"ax\"\n\t" \
248 "3: negl %%eax\n\t" \
251 _ASM_EXTABLE_FAULT(1b, 3b) \
253 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
257 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
258 * format and supervisor states in addition to modified optimization in
261 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
262 * supports modified optimization which is not supported by XSAVE.
264 * We use XSAVE as a fallback.
266 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
267 * original instruction which gets replaced. We need to use it here as the
268 * address of the instruction where we might get an exception at.
270 #define XSTATE_XSAVE(st, lmask, hmask, err) \
271 asm volatile(ALTERNATIVE_2(XSAVE, \
272 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
273 XSAVES, X86_FEATURE_XSAVES) \
275 "xor %[err], %[err]\n" \
277 ".pushsection .fixup,\"ax\"\n" \
278 "4: movl $-2, %[err]\n" \
281 _ASM_EXTABLE(661b, 4b) \
283 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
287 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
290 #define XSTATE_XRESTORE(st, lmask, hmask, err) \
291 asm volatile(ALTERNATIVE(XRSTOR, \
292 XRSTORS, X86_FEATURE_XSAVES) \
294 "xor %[err], %[err]\n" \
296 ".pushsection .fixup,\"ax\"\n" \
297 "4: movl $-2, %[err]\n" \
300 _ASM_EXTABLE(661b, 4b) \
302 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
306 * This function is called only during boot time when x86 caps are not set
307 * up and alternative can not be used yet.
309 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
313 u32 hmask = mask >> 32;
316 WARN_ON(system_state != SYSTEM_BOOTING);
318 if (static_cpu_has(X86_FEATURE_XSAVES))
319 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
321 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
323 /* We should never fault when copying from a kernel buffer: */
328 * Save processor xstate to xsave area.
330 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
334 u32 hmask = mask >> 32;
337 WARN_ON(!alternatives_patched);
339 XSTATE_XSAVE(xstate, lmask, hmask, err);
341 /* We should never fault when copying to a kernel buffer: */
346 * Restore processor xstate from xsave area.
348 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
351 u32 hmask = mask >> 32;
354 XSTATE_XRESTORE(xstate, lmask, hmask, err);
356 /* We should never fault when copying from a kernel buffer: */
361 * Save xstate to user space xsave area.
363 * We don't use modified optimization because xrstor/xrstors might track
364 * a different application.
366 * We don't use compacted format xsave area for
367 * backward compatibility for old applications which don't understand
368 * compacted format of xsave area.
370 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
375 * Clear the xsave header first, so that reserved fields are
376 * initialized to zero.
378 err = __clear_user(&buf->header, sizeof(buf->header));
383 XSTATE_OP(XSAVE, buf, -1, -1, err);
390 * Restore xstate from user space xsave area.
392 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
394 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
396 u32 hmask = mask >> 32;
400 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
407 * These must be called with preempt disabled. Returns
408 * 'true' if the FPU state is still intact and we can
409 * keep registers active.
411 * The legacy FNSAVE instruction cleared all FPU state
412 * unconditionally, so registers are essentially destroyed.
413 * Modern FPU state can be kept in registers, if there are
414 * no pending FP exceptions.
416 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
418 if (likely(use_xsave())) {
419 copy_xregs_to_kernel(&fpu->state.xsave);
423 if (likely(use_fxsr())) {
424 copy_fxregs_to_kernel(fpu);
429 * Legacy FPU register saving, FNSAVE always clears FPU registers,
430 * so we have to mark them inactive:
432 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
437 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
440 copy_kernel_to_xregs(&fpstate->xsave, -1);
443 copy_kernel_to_fxregs(&fpstate->fxsave);
445 copy_kernel_to_fregs(&fpstate->fsave);
449 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
452 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
453 * pending. Clear the x87 state here by setting it to fixed values.
454 * "m" is a random variable that should be in L1.
456 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
460 "fildl %P[addr]" /* set F?P to defined value */
461 : : [addr] "m" (fpstate));
464 __copy_kernel_to_fpregs(fpstate);
467 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
470 * FPU context switch related helper methods:
473 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
476 * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
479 * This will disable any lazy FPU state restore of the current FPU state,
480 * but if the current thread owns the FPU, it will still be saved by.
482 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
484 per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
487 static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
489 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
493 static inline void __fpregs_deactivate(struct fpu *fpu)
495 WARN_ON_FPU(!fpu->fpregs_active);
497 fpu->fpregs_active = 0;
498 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
499 trace_x86_fpu_regs_deactivated(fpu);
502 static inline void __fpregs_activate(struct fpu *fpu)
504 WARN_ON_FPU(fpu->fpregs_active);
506 fpu->fpregs_active = 1;
507 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
508 trace_x86_fpu_regs_activated(fpu);
512 * The question "does this thread have fpu access?"
513 * is slightly racy, since preemption could come in
514 * and revoke it immediately after the test.
516 * However, even in that very unlikely scenario,
517 * we can just assume we have FPU access - typically
518 * to save the FP state - we'll just take a #NM
519 * fault and get the FPU access back.
521 static inline int fpregs_active(void)
523 return current->thread.fpu.fpregs_active;
527 * These generally need preemption protection to work,
528 * do try to avoid using these on their own.
530 static inline void fpregs_activate(struct fpu *fpu)
532 __fpregs_activate(fpu);
535 static inline void fpregs_deactivate(struct fpu *fpu)
537 __fpregs_deactivate(fpu);
541 * FPU state switching for scheduling.
543 * This is a two-stage process:
545 * - switch_fpu_prepare() saves the old state and
546 * sets the new state of the CR0.TS bit. This is
547 * done within the context of the old process.
549 * - switch_fpu_finish() restores the new state as
552 typedef struct { int preload; } fpu_switch_t;
554 static inline fpu_switch_t
555 switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
560 * If the task has used the math, pre-load the FPU on xsave processors
561 * or if the past 5 consecutive context-switches used math.
563 fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
564 new_fpu->fpstate_active;
566 if (old_fpu->fpregs_active) {
567 if (!copy_fpregs_to_fpstate(old_fpu))
568 old_fpu->last_cpu = -1;
570 old_fpu->last_cpu = cpu;
572 /* But leave fpu_fpregs_owner_ctx! */
573 old_fpu->fpregs_active = 0;
574 trace_x86_fpu_regs_deactivated(old_fpu);
576 /* Don't change CR0.TS if we just switch! */
578 __fpregs_activate(new_fpu);
579 trace_x86_fpu_regs_activated(new_fpu);
580 prefetch(&new_fpu->state);
583 old_fpu->last_cpu = -1;
585 if (fpu_want_lazy_restore(new_fpu, cpu))
588 prefetch(&new_fpu->state);
589 fpregs_activate(new_fpu);
596 * Misc helper functions:
600 * By the time this gets called, we've already cleared CR0.TS and
601 * given the process the FPU if we are going to preload the FPU
602 * state - all we need to do is to conditionally restore the register
605 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
607 if (fpu_switch.preload)
608 copy_kernel_to_fpregs(&new_fpu->state);
612 * Needs to be preemption-safe.
614 * NOTE! user_fpu_begin() must be used only immediately before restoring
615 * the save state. It does not do any saving/restoring on its own. In
616 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
617 * the task can lose the FPU right after preempt_enable().
619 static inline void user_fpu_begin(void)
621 struct fpu *fpu = ¤t->thread.fpu;
624 if (!fpregs_active())
625 fpregs_activate(fpu);
630 * MXCSR and XCR definitions:
633 extern unsigned int mxcsr_feature_mask;
635 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
637 static inline u64 xgetbv(u32 index)
641 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
642 : "=a" (eax), "=d" (edx)
644 return eax + ((u64)edx << 32);
647 static inline void xsetbv(u32 index, u64 value)
650 u32 edx = value >> 32;
652 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
653 : : "a" (eax), "d" (edx), "c" (index));
656 #endif /* _ASM_X86_FPU_INTERNAL_H */