1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
5 #include <linux/compiler_types.h>
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 int expect, int is_constant);
20 #define likely_notrace(x) __builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
23 #define __branch_check__(x, expect, is_constant) ({ \
25 static struct ftrace_likely_data \
26 __attribute__((__aligned__(4))) \
27 __attribute__((section("_ftrace_annotated_branch"))) \
29 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
33 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
45 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
48 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57 #define __trace_if(cond) \
58 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
61 static struct ftrace_branch_data \
62 __attribute__((__aligned__(4))) \
63 __attribute__((section("_ftrace_branch"))) \
70 ______f.miss_hit[______r]++; \
73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
76 # define likely(x) __builtin_expect(!!(x), 1)
77 # define unlikely(x) __builtin_expect(!!(x), 0)
80 /* Optimization barrier */
82 # define barrier() __memory_barrier()
86 # define barrier_data(ptr) barrier()
89 /* workaround for GCC PR82365 if needed */
90 #ifndef barrier_before_unreachable
91 # define barrier_before_unreachable() do { } while (0)
94 /* Unreachable code */
95 #ifdef CONFIG_STACK_VALIDATION
96 #define annotate_reachable() ({ \
98 ".pushsection .discard.reachable\n\t" \
99 ".long %c0b - .\n\t" \
100 ".popsection\n\t" : : "i" (__COUNTER__)); \
102 #define annotate_unreachable() ({ \
104 ".pushsection .discard.unreachable\n\t" \
105 ".long %c0b - .\n\t" \
106 ".popsection\n\t" : : "i" (__COUNTER__)); \
108 #define ASM_UNREACHABLE \
110 ".pushsection .discard.unreachable\n\t" \
111 ".long 999b - .\n\t" \
114 #define annotate_reachable()
115 #define annotate_unreachable()
118 #ifndef ASM_UNREACHABLE
119 # define ASM_UNREACHABLE
122 # define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
126 * KENTRY - kernel entry point
127 * This can be used to annotate symbols (functions or data) that are used
128 * without their linker symbol being referenced explicitly. For example,
129 * interrupt vector handlers, or functions in the kernel image that are found
132 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
133 * are handled in their own way (with KEEP() in linker scripts).
135 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
136 * linker script. For example an architecture could KEEP() its entire
137 * boot/exception vector code rather than annotate each function and data.
140 # define KENTRY(sym) \
141 extern typeof(sym) sym; \
142 static const unsigned long __kentry_##sym \
144 __attribute__((section("___kentry" "+" #sym ), used)) \
145 = (unsigned long)&sym;
149 # define RELOC_HIDE(ptr, off) \
150 ({ unsigned long __ptr; \
151 __ptr = (unsigned long) (ptr); \
152 (typeof(ptr)) (__ptr + (off)); })
155 #define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
157 #ifndef OPTIMIZER_HIDE_VAR
158 #define OPTIMIZER_HIDE_VAR(var) barrier()
161 /* Not-quite-unique ID. */
163 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
166 #include <uapi/linux/types.h>
168 #define __READ_ONCE_SIZE \
171 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
172 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
173 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
174 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
177 __builtin_memcpy((void *)res, (const void *)p, size); \
182 static __always_inline
183 void __read_once_size(const volatile void *p, void *res, int size)
190 * We can't declare function 'inline' because __no_sanitize_address confilcts
191 * with inlining. Attempt to inline it may cause a build failure.
192 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
193 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
195 # define __no_kasan_or_inline __no_sanitize_address __maybe_unused
197 # define __no_kasan_or_inline __always_inline
200 static __no_kasan_or_inline
201 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
206 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
209 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
210 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
211 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
212 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
215 __builtin_memcpy((void *)p, (const void *)res, size);
221 * Prevent the compiler from merging or refetching reads or writes. The
222 * compiler is also forbidden from reordering successive instances of
223 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
224 * compiler is aware of some particular ordering. One way to make the
225 * compiler aware of ordering is to put the two invocations of READ_ONCE,
226 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
228 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
229 * data types like structs or unions. If the size of the accessed data
230 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
231 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
232 * least two memcpy()s: one for the __builtin_memcpy() and then one for
233 * the macro doing the copy of variable - '__u' allocated on the stack.
235 * Their two major use cases are: (1) Mediating communication between
236 * process-level code and irq/NMI handlers, all running on the same CPU,
237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
238 * mutilate accesses that either do not require ordering or that interact
239 * with an explicit memory barrier or atomic instruction that provides the
242 #include <asm/barrier.h>
243 #include <linux/kasan-checks.h>
245 #define __READ_ONCE(x, check) \
247 union { typeof(x) __val; char __c[1]; } __u; \
249 __read_once_size(&(x), __u.__c, sizeof(x)); \
251 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
252 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
255 #define READ_ONCE(x) __READ_ONCE(x, 1)
258 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
259 * to hide memory access from KASAN.
261 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
263 static __no_kasan_or_inline
264 unsigned long read_word_at_a_time(const void *addr)
266 kasan_check_read(addr, 1);
267 return *(unsigned long *)addr;
270 #define WRITE_ONCE(x, val) \
272 union { typeof(x) __val; char __c[1]; } __u = \
273 { .__val = (__force typeof(x)) (val) }; \
274 __write_once_size(&(x), __u.__c, sizeof(x)); \
278 #endif /* __KERNEL__ */
280 #endif /* __ASSEMBLY__ */
283 # define __optimize(level)
286 /* Compile time object size, -1 for unknown */
287 #ifndef __compiletime_object_size
288 # define __compiletime_object_size(obj) -1
290 #ifndef __compiletime_warning
291 # define __compiletime_warning(message)
293 #ifndef __compiletime_error
294 # define __compiletime_error(message)
296 * Sparse complains of variable sized arrays due to the temporary variable in
297 * __compiletime_assert. Unfortunately we can't just expand it out to make
298 * sparse see a constant array size without breaking compiletime_assert on old
299 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
302 # define __compiletime_error_fallback(condition) \
303 do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
306 #ifndef __compiletime_error_fallback
307 # define __compiletime_error_fallback(condition) do { } while (0)
311 # define __compiletime_assert(condition, msg, prefix, suffix) \
313 bool __cond = !(condition); \
314 extern void prefix ## suffix(void) __compiletime_error(msg); \
316 prefix ## suffix(); \
317 __compiletime_error_fallback(__cond); \
320 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
323 #define _compiletime_assert(condition, msg, prefix, suffix) \
324 __compiletime_assert(condition, msg, prefix, suffix)
327 * compiletime_assert - break build and emit msg if condition is false
328 * @condition: a compile-time constant condition to check
329 * @msg: a message to emit if condition is false
331 * In tradition of POSIX assert, this macro will break the build if the
332 * supplied condition is *false*, emitting the supplied error message if the
333 * compiler has support to do so.
335 #define compiletime_assert(condition, msg) \
336 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
338 #define compiletime_assert_atomic_type(t) \
339 compiletime_assert(__native_word(t), \
340 "Need native word sized stores/loads for atomicity.")
343 * Prevent the compiler from merging or refetching accesses. The compiler
344 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
345 * but only when the compiler is aware of some particular ordering. One way
346 * to make the compiler aware of ordering is to put the two invocations of
347 * ACCESS_ONCE() in different C statements.
349 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
350 * on a union member will work as long as the size of the member matches the
351 * size of the union and the size is smaller than word size.
353 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
354 * between process-level code and irq/NMI handlers, all running on the same CPU,
355 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
356 * mutilate accesses that either do not require ordering or that interact
357 * with an explicit memory barrier or atomic instruction that provides the
360 * If possible use READ_ONCE()/WRITE_ONCE() instead.
362 #define __ACCESS_ONCE(x) ({ \
363 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
364 (volatile typeof(x) *)&(x); })
365 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
368 * lockless_dereference() - safely load a pointer for later dereference
369 * @p: The pointer to load
371 * Similar to rcu_dereference(), but for situations where the pointed-to
372 * object's lifetime is managed by something other than RCU. That
373 * "something other" might be reference counting or simple immortality.
375 * The seemingly unused variable ___typecheck_p validates that @p is
376 * indeed a pointer type by using a pointer to typeof(*p) as the type.
377 * Taking a pointer to typeof(*p) again is needed in case p is void *.
379 #define lockless_dereference(p) \
381 typeof(p) _________p1 = READ_ONCE(p); \
382 typeof(*(p)) *___typecheck_p __maybe_unused; \
383 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
388 * This is needed in functions which generate the stack canary, see
389 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
391 #define prevent_tail_call_optimization() mb()
393 #endif /* __LINUX_COMPILER_H */