1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BARRIER_H
3 #define _ASM_X86_BARRIER_H
5 #include <asm/alternative.h>
9 * Force strict CPU ordering.
10 * And yes, this might be required on UP too when we're talking
15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
16 X86_FEATURE_XMM2) ::: "memory", "cc")
17 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
18 X86_FEATURE_XMM2) ::: "memory", "cc")
19 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
20 X86_FEATURE_XMM2) ::: "memory", "cc")
22 #define mb() asm volatile("mfence":::"memory")
23 #define rmb() asm volatile("lfence":::"memory")
24 #define wmb() asm volatile("sfence" ::: "memory")
28 * array_index_mask_nospec() - generate a mask that is ~0UL when the
29 * bounds check succeeds and 0 otherwise
30 * @index: array element index
31 * @size: number of elements in array
36 static inline unsigned long array_index_mask_nospec(unsigned long index,
41 asm volatile ("cmp %1,%2; sbb %0,%0;"
43 :"g"(size),"r" (index)
48 /* Override the default implementation from linux/nospec.h. */
49 #define array_index_mask_nospec array_index_mask_nospec
51 /* Prevent speculative execution past this barrier. */
52 #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
53 "lfence", X86_FEATURE_LFENCE_RDTSC)
55 #ifdef CONFIG_X86_PPRO_FENCE
56 #define dma_rmb() rmb()
58 #define dma_rmb() barrier()
60 #define dma_wmb() barrier()
62 #define __smp_mb() mb()
63 #define __smp_rmb() dma_rmb()
64 #define __smp_wmb() barrier()
65 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
67 #if defined(CONFIG_X86_PPRO_FENCE)
70 * For this option x86 doesn't have a strong TSO memory
71 * model and we should fall back to full barriers.
74 #define __smp_store_release(p, v) \
76 compiletime_assert_atomic_type(*p); \
81 #define __smp_load_acquire(p) \
83 typeof(*p) ___p1 = READ_ONCE(*p); \
84 compiletime_assert_atomic_type(*p); \
89 #else /* regular x86 TSO memory ordering */
91 #define __smp_store_release(p, v) \
93 compiletime_assert_atomic_type(*p); \
98 #define __smp_load_acquire(p) \
100 typeof(*p) ___p1 = READ_ONCE(*p); \
101 compiletime_assert_atomic_type(*p); \
108 /* Atomic operations are already serializing on x86 */
109 #define __smp_mb__before_atomic() do { } while (0)
110 #define __smp_mb__after_atomic() do { } while (0)
112 #include <asm-generic/barrier.h>
115 * Make previous memory operations globally visible before
118 * MFENCE makes writes visible, but only affects load/store
119 * instructions. WRMSR is unfortunately not a load/store
120 * instruction and is unaffected by MFENCE. The LFENCE ensures
121 * that the WRMSR is not reordered.
123 * Most WRMSRs are full serializing instructions themselves and
124 * do not require this barrier. This is only required for the
125 * IA32_TSC_DEADLINE and X2APIC MSRs.
127 static inline void weak_wrmsr_fence(void)
129 asm volatile("mfence; lfence" : : : "memory");
132 #endif /* _ASM_X86_BARRIER_H */