Mention branches and keyring.
[releases.git] / x86 / include / asm / cmpxchg_32.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_CMPXCHG_32_H
3 #define _ASM_X86_CMPXCHG_32_H
4
5 /*
6  * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
7  *       you need to test for the feature in boot_cpu_data.
8  */
9
10 /*
11  * CMPXCHG8B only writes to the target if we had the previous
12  * value in registers, otherwise it acts as a read and gives us the
13  * "new previous" value.  That is why there is a loop.  Preloading
14  * EDX:EAX is a performance optimization: in the common case it means
15  * we need only one locked operation.
16  *
17  * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
18  * least an FPU save and/or %cr0.ts manipulation.
19  *
20  * cmpxchg8b must be used with the lock prefix here to allow the
21  * instruction to be executed atomically.  We need to have the reader
22  * side to see the coherent 64bit value.
23  */
24 static inline void set_64bit(volatile u64 *ptr, u64 value)
25 {
26         u32 low  = value;
27         u32 high = value >> 32;
28         u64 prev = *ptr;
29
30         asm volatile("\n1:\t"
31                      LOCK_PREFIX "cmpxchg8b %0\n\t"
32                      "jnz 1b"
33                      : "=m" (*ptr), "+A" (prev)
34                      : "b" (low), "c" (high)
35                      : "memory");
36 }
37
38 #ifdef CONFIG_X86_CMPXCHG64
39 #define arch_cmpxchg64(ptr, o, n)                                       \
40         ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
41                                          (unsigned long long)(n)))
42 #define arch_cmpxchg64_local(ptr, o, n)                                 \
43         ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
44                                                (unsigned long long)(n)))
45 #define arch_try_cmpxchg64(ptr, po, n)                                  \
46         __try_cmpxchg64((ptr), (unsigned long long *)(po), \
47                         (unsigned long long)(n))
48 #endif
49
50 static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
51 {
52         u64 prev;
53         asm volatile(LOCK_PREFIX "cmpxchg8b %1"
54                      : "=A" (prev),
55                        "+m" (*ptr)
56                      : "b" ((u32)new),
57                        "c" ((u32)(new >> 32)),
58                        "0" (old)
59                      : "memory");
60         return prev;
61 }
62
63 static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
64 {
65         u64 prev;
66         asm volatile("cmpxchg8b %1"
67                      : "=A" (prev),
68                        "+m" (*ptr)
69                      : "b" ((u32)new),
70                        "c" ((u32)(new >> 32)),
71                        "0" (old)
72                      : "memory");
73         return prev;
74 }
75
76 static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
77 {
78         bool success;
79         u64 old = *pold;
80         asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
81                      CC_SET(z)
82                      : CC_OUT(z) (success),
83                        [ptr] "+m" (*ptr),
84                        "+A" (old)
85                      : "b" ((u32)new),
86                        "c" ((u32)(new >> 32))
87                      : "memory");
88
89         if (unlikely(!success))
90                 *pold = old;
91         return success;
92 }
93
94 #ifndef CONFIG_X86_CMPXCHG64
95 /*
96  * Building a kernel capable running on 80386 and 80486. It may be necessary
97  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
98  */
99
100 #define arch_cmpxchg64(ptr, o, n)                               \
101 ({                                                              \
102         __typeof__(*(ptr)) __ret;                               \
103         __typeof__(*(ptr)) __old = (o);                         \
104         __typeof__(*(ptr)) __new = (n);                         \
105         alternative_io(LOCK_PREFIX_HERE                         \
106                         "call cmpxchg8b_emu",                   \
107                         "lock; cmpxchg8b (%%esi)" ,             \
108                        X86_FEATURE_CX8,                         \
109                        "=A" (__ret),                            \
110                        "S" ((ptr)), "0" (__old),                \
111                        "b" ((unsigned int)__new),               \
112                        "c" ((unsigned int)(__new>>32))          \
113                        : "memory");                             \
114         __ret; })
115
116
117 #define arch_cmpxchg64_local(ptr, o, n)                         \
118 ({                                                              \
119         __typeof__(*(ptr)) __ret;                               \
120         __typeof__(*(ptr)) __old = (o);                         \
121         __typeof__(*(ptr)) __new = (n);                         \
122         alternative_io("call cmpxchg8b_emu",                    \
123                        "cmpxchg8b (%%esi)" ,                    \
124                        X86_FEATURE_CX8,                         \
125                        "=A" (__ret),                            \
126                        "S" ((ptr)), "0" (__old),                \
127                        "b" ((unsigned int)__new),               \
128                        "c" ((unsigned int)(__new>>32))          \
129                        : "memory");                             \
130         __ret; })
131
132 #endif
133
134 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
135
136 #endif /* _ASM_X86_CMPXCHG_32_H */