GNU Linux-libre 4.9.318-gnu1
[releases.git] / arch / x86 / include / asm / barrier.h
1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
3
4 #include <asm/alternative.h>
5 #include <asm/nops.h>
6
7 /*
8  * Force strict CPU ordering.
9  * And yes, this might be required on UP too when we're talking
10  * to devices.
11  */
12
13 #ifdef CONFIG_X86_32
14 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
15                                       X86_FEATURE_XMM2) ::: "memory", "cc")
16 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
17                                        X86_FEATURE_XMM2) ::: "memory", "cc")
18 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
19                                        X86_FEATURE_XMM2) ::: "memory", "cc")
20 #else
21 #define mb()    asm volatile("mfence":::"memory")
22 #define rmb()   asm volatile("lfence":::"memory")
23 #define wmb()   asm volatile("sfence" ::: "memory")
24 #endif
25
26 /**
27  * array_index_mask_nospec() - generate a mask that is ~0UL when the
28  *      bounds check succeeds and 0 otherwise
29  * @index: array element index
30  * @size: number of elements in array
31  *
32  * Returns:
33  *     0 - (index < size)
34  */
35 static inline unsigned long array_index_mask_nospec(unsigned long index,
36                 unsigned long size)
37 {
38         unsigned long mask;
39
40         asm volatile ("cmp %1,%2; sbb %0,%0;"
41                         :"=r" (mask)
42                         :"g"(size),"r" (index)
43                         :"cc");
44         return mask;
45 }
46
47 /* Override the default implementation from linux/nospec.h. */
48 #define array_index_mask_nospec array_index_mask_nospec
49
50 /* Prevent speculative execution past this barrier. */
51 #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
52                                            "lfence", X86_FEATURE_LFENCE_RDTSC)
53
54 #ifdef CONFIG_X86_PPRO_FENCE
55 #define dma_rmb()       rmb()
56 #else
57 #define dma_rmb()       barrier()
58 #endif
59 #define dma_wmb()       barrier()
60
61 #define __smp_mb()      mb()
62 #define __smp_rmb()     dma_rmb()
63 #define __smp_wmb()     barrier()
64 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
65
66 #if defined(CONFIG_X86_PPRO_FENCE)
67
68 /*
69  * For this option x86 doesn't have a strong TSO memory
70  * model and we should fall back to full barriers.
71  */
72
73 #define __smp_store_release(p, v)                                       \
74 do {                                                                    \
75         compiletime_assert_atomic_type(*p);                             \
76         __smp_mb();                                                     \
77         WRITE_ONCE(*p, v);                                              \
78 } while (0)
79
80 #define __smp_load_acquire(p)                                           \
81 ({                                                                      \
82         typeof(*p) ___p1 = READ_ONCE(*p);                               \
83         compiletime_assert_atomic_type(*p);                             \
84         __smp_mb();                                                     \
85         ___p1;                                                          \
86 })
87
88 #else /* regular x86 TSO memory ordering */
89
90 #define __smp_store_release(p, v)                                       \
91 do {                                                                    \
92         compiletime_assert_atomic_type(*p);                             \
93         barrier();                                                      \
94         WRITE_ONCE(*p, v);                                              \
95 } while (0)
96
97 #define __smp_load_acquire(p)                                           \
98 ({                                                                      \
99         typeof(*p) ___p1 = READ_ONCE(*p);                               \
100         compiletime_assert_atomic_type(*p);                             \
101         barrier();                                                      \
102         ___p1;                                                          \
103 })
104
105 #endif
106
107 /* Atomic operations are already serializing on x86 */
108 #define __smp_mb__before_atomic()       do { } while (0)
109 #define __smp_mb__after_atomic()        do { } while (0)
110
111 #include <asm-generic/barrier.h>
112
113 /*
114  * Make previous memory operations globally visible before
115  * a WRMSR.
116  *
117  * MFENCE makes writes visible, but only affects load/store
118  * instructions.  WRMSR is unfortunately not a load/store
119  * instruction and is unaffected by MFENCE.  The LFENCE ensures
120  * that the WRMSR is not reordered.
121  *
122  * Most WRMSRs are full serializing instructions themselves and
123  * do not require this barrier.  This is only required for the
124  * IA32_TSC_DEADLINE and X2APIC MSRs.
125  */
126 static inline void weak_wrmsr_fence(void)
127 {
128         asm volatile("mfence; lfence" : : : "memory");
129 }
130
131 #endif /* _ASM_X86_BARRIER_H */