GNU Linux-libre 4.4.297-gnu1
[releases.git] / arch / mips / include / asm / barrier.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
7  */
8 #ifndef __ASM_BARRIER_H
9 #define __ASM_BARRIER_H
10
11 #include <asm/addrspace.h>
12
13 #define read_barrier_depends()          do { } while(0)
14 #define smp_read_barrier_depends()      do { } while(0)
15
16 #ifdef CONFIG_CPU_HAS_SYNC
17 #define __sync()                                \
18         __asm__ __volatile__(                   \
19                 ".set   push\n\t"               \
20                 ".set   noreorder\n\t"          \
21                 ".set   mips2\n\t"              \
22                 "sync\n\t"                      \
23                 ".set   pop"                    \
24                 : /* no output */               \
25                 : /* no input */                \
26                 : "memory")
27 #else
28 #define __sync()        do { } while(0)
29 #endif
30
31 #define __fast_iob()                            \
32         __asm__ __volatile__(                   \
33                 ".set   push\n\t"               \
34                 ".set   noreorder\n\t"          \
35                 "lw     $0,%0\n\t"              \
36                 "nop\n\t"                       \
37                 ".set   pop"                    \
38                 : /* no output */               \
39                 : "m" (*(int *)CKSEG1)          \
40                 : "memory")
41 #ifdef CONFIG_CPU_CAVIUM_OCTEON
42 # define OCTEON_SYNCW_STR       ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
43 # define __syncw()      __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
44
45 # define fast_wmb()     __syncw()
46 # define fast_rmb()     barrier()
47 # define fast_mb()      __sync()
48 # define fast_iob()     do { } while (0)
49 #else /* ! CONFIG_CPU_CAVIUM_OCTEON */
50 # define fast_wmb()     __sync()
51 # define fast_rmb()     __sync()
52 # define fast_mb()      __sync()
53 # ifdef CONFIG_SGI_IP28
54 #  define fast_iob()                            \
55         __asm__ __volatile__(                   \
56                 ".set   push\n\t"               \
57                 ".set   noreorder\n\t"          \
58                 "lw     $0,%0\n\t"              \
59                 "sync\n\t"                      \
60                 "lw     $0,%0\n\t"              \
61                 ".set   pop"                    \
62                 : /* no output */               \
63                 : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \
64                 : "memory")
65 # else
66 #  define fast_iob()                            \
67         do {                                    \
68                 __sync();                       \
69                 __fast_iob();                   \
70         } while (0)
71 # endif
72 #endif /* CONFIG_CPU_CAVIUM_OCTEON */
73
74 #ifdef CONFIG_CPU_HAS_WB
75
76 #include <asm/wbflush.h>
77
78 #define mb()            wbflush()
79 #define iob()           wbflush()
80
81 #else /* !CONFIG_CPU_HAS_WB */
82
83 #define mb()            fast_mb()
84 #define iob()           fast_iob()
85
86 #endif /* !CONFIG_CPU_HAS_WB */
87
88 #define wmb()           fast_wmb()
89 #define rmb()           fast_rmb()
90 #define dma_wmb()       fast_wmb()
91 #define dma_rmb()       fast_rmb()
92
93 #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
94 # ifdef CONFIG_CPU_CAVIUM_OCTEON
95 #  define smp_mb()      __sync()
96 #  define smp_rmb()     barrier()
97 #  define smp_wmb()     __syncw()
98 # else
99 #  define smp_mb()      __asm__ __volatile__("sync" : : :"memory")
100 #  define smp_rmb()     __asm__ __volatile__("sync" : : :"memory")
101 #  define smp_wmb()     __asm__ __volatile__("sync" : : :"memory")
102 # endif
103 #else
104 #define smp_mb()        barrier()
105 #define smp_rmb()       barrier()
106 #define smp_wmb()       barrier()
107 #endif
108
109 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
110 #define __WEAK_LLSC_MB          "       sync    \n"
111 #else
112 #define __WEAK_LLSC_MB          "               \n"
113 #endif
114
115 #define smp_store_mb(var, value) \
116         do { WRITE_ONCE(var, value); smp_mb(); } while (0)
117
118 #define smp_llsc_mb()   __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
119
120 #ifdef CONFIG_CPU_CAVIUM_OCTEON
121 #define smp_mb__before_llsc() smp_wmb()
122 /* Cause previous writes to become visible on all CPUs as soon as possible */
123 #define nudge_writes() __asm__ __volatile__(".set push\n\t"             \
124                                             ".set arch=octeon\n\t"      \
125                                             "syncw\n\t"                 \
126                                             ".set pop" : : : "memory")
127 #else
128 #define smp_mb__before_llsc() smp_llsc_mb()
129 #define nudge_writes() mb()
130 #endif
131
132 #define smp_store_release(p, v)                                         \
133 do {                                                                    \
134         compiletime_assert_atomic_type(*p);                             \
135         smp_mb();                                                       \
136         WRITE_ONCE(*p, v);                                              \
137 } while (0)
138
139 #define smp_load_acquire(p)                                             \
140 ({                                                                      \
141         typeof(*p) ___p1 = READ_ONCE(*p);                               \
142         compiletime_assert_atomic_type(*p);                             \
143         smp_mb();                                                       \
144         ___p1;                                                          \
145 })
146
147 #define smp_mb__before_atomic() smp_mb__before_llsc()
148 #define smp_mb__after_atomic()  smp_llsc_mb()
149
150 #endif /* __ASM_BARRIER_H */