1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
4 #include <asm/alternative.h>
8 * Force strict CPU ordering.
9 * And yes, this might be required on UP too when we're talking
14 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
15 X86_FEATURE_XMM2) ::: "memory", "cc")
16 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
17 X86_FEATURE_XMM2) ::: "memory", "cc")
18 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
19 X86_FEATURE_XMM2) ::: "memory", "cc")
21 #define mb() asm volatile("mfence":::"memory")
22 #define rmb() asm volatile("lfence":::"memory")
23 #define wmb() asm volatile("sfence" ::: "memory")
27 * array_index_mask_nospec() - generate a mask that is ~0UL when the
28 * bounds check succeeds and 0 otherwise
29 * @index: array element index
30 * @size: number of elements in array
35 static inline unsigned long array_index_mask_nospec(unsigned long index,
40 asm volatile ("cmp %1,%2; sbb %0,%0;"
42 :"g"(size),"r" (index)
47 /* Override the default implementation from linux/nospec.h. */
48 #define array_index_mask_nospec array_index_mask_nospec
50 /* Prevent speculative execution past this barrier. */
51 #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
52 "lfence", X86_FEATURE_LFENCE_RDTSC)
54 #ifdef CONFIG_X86_PPRO_FENCE
55 #define dma_rmb() rmb()
57 #define dma_rmb() barrier()
59 #define dma_wmb() barrier()
61 #define __smp_mb() mb()
62 #define __smp_rmb() dma_rmb()
63 #define __smp_wmb() barrier()
64 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
66 #if defined(CONFIG_X86_PPRO_FENCE)
69 * For this option x86 doesn't have a strong TSO memory
70 * model and we should fall back to full barriers.
73 #define __smp_store_release(p, v) \
75 compiletime_assert_atomic_type(*p); \
80 #define __smp_load_acquire(p) \
82 typeof(*p) ___p1 = READ_ONCE(*p); \
83 compiletime_assert_atomic_type(*p); \
88 #else /* regular x86 TSO memory ordering */
90 #define __smp_store_release(p, v) \
92 compiletime_assert_atomic_type(*p); \
97 #define __smp_load_acquire(p) \
99 typeof(*p) ___p1 = READ_ONCE(*p); \
100 compiletime_assert_atomic_type(*p); \
107 /* Atomic operations are already serializing on x86 */
108 #define __smp_mb__before_atomic() do { } while (0)
109 #define __smp_mb__after_atomic() do { } while (0)
111 #include <asm-generic/barrier.h>
114 * Make previous memory operations globally visible before
117 * MFENCE makes writes visible, but only affects load/store
118 * instructions. WRMSR is unfortunately not a load/store
119 * instruction and is unaffected by MFENCE. The LFENCE ensures
120 * that the WRMSR is not reordered.
122 * Most WRMSRs are full serializing instructions themselves and
123 * do not require this barrier. This is only required for the
124 * IA32_TSC_DEADLINE and X2APIC MSRs.
126 static inline void weak_wrmsr_fence(void)
128 asm volatile("mfence; lfence" : : : "memory");
131 #endif /* _ASM_X86_BARRIER_H */