1 /* SPDX-License-Identifier: GPL-2.0 */
3 * KCSAN access checks and modifiers. These can be used to explicitly check
4 * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
6 * Copyright (C) 2019, Google LLC.
9 #ifndef _LINUX_KCSAN_CHECKS_H
10 #define _LINUX_KCSAN_CHECKS_H
12 /* Note: Only include what is already included by compiler.h. */
13 #include <linux/compiler_attributes.h>
14 #include <linux/types.h>
16 /* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
17 #define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
18 #define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
19 #define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
20 /* The following are special, and never due to compiler instrumentation. */
21 #define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
22 #define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
25 * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
26 * even in compilation units that selectively disable KCSAN, but must use KCSAN
27 * to validate access to an address. Never use these in header files!
31 * __kcsan_check_access - check generic access for races
33 * @ptr: address of access
34 * @size: size of access
35 * @type: access type modifier
37 void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
40 * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
41 * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
42 * memory orders to the LKMM memory orders and vice-versa!
44 #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
45 #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
46 #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
47 #define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
50 * __kcsan_mb - full memory barrier instrumentation
52 void __kcsan_mb(void);
55 * __kcsan_wmb - write memory barrier instrumentation
57 void __kcsan_wmb(void);
60 * __kcsan_rmb - read memory barrier instrumentation
62 void __kcsan_rmb(void);
65 * __kcsan_release - release barrier instrumentation
67 void __kcsan_release(void);
70 * kcsan_disable_current - disable KCSAN for the current context
74 void kcsan_disable_current(void);
77 * kcsan_enable_current - re-enable KCSAN for the current context
81 void kcsan_enable_current(void);
82 void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
85 * kcsan_nestable_atomic_begin - begin nestable atomic region
87 * Accesses within the atomic region may appear to race with other accesses but
88 * should be considered atomic.
90 void kcsan_nestable_atomic_begin(void);
93 * kcsan_nestable_atomic_end - end nestable atomic region
95 void kcsan_nestable_atomic_end(void);
98 * kcsan_flat_atomic_begin - begin flat atomic region
100 * Accesses within the atomic region may appear to race with other accesses but
101 * should be considered atomic.
103 void kcsan_flat_atomic_begin(void);
106 * kcsan_flat_atomic_end - end flat atomic region
108 void kcsan_flat_atomic_end(void);
111 * kcsan_atomic_next - consider following accesses as atomic
113 * Force treating the next n memory accesses for the current context as atomic
116 * @n: number of following memory accesses to treat as atomic.
118 void kcsan_atomic_next(int n);
121 * kcsan_set_access_mask - set access mask
123 * Set the access mask for all accesses for the current context if non-zero.
124 * Only value changes to bits set in the mask will be reported.
128 void kcsan_set_access_mask(unsigned long mask);
130 /* Scoped access information. */
131 struct kcsan_scoped_access {
133 struct list_head list; /* scoped_accesses list */
135 * Not an entry in scoped_accesses list; stack depth from where
136 * the access was initialized.
141 /* Access information. */
142 const volatile void *ptr;
145 /* Location where scoped access was set up. */
149 * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
150 * out of scope; relies on attribute "cleanup", which is supported by all
151 * compilers that support KCSAN.
153 #define __kcsan_cleanup_scoped \
154 __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
157 * kcsan_begin_scoped_access - begin scoped access
159 * Begin scoped access and initialize @sa, which will cause KCSAN to
160 * continuously check the memory range in the current thread until
161 * kcsan_end_scoped_access() is called for @sa.
163 * Scoped accesses are implemented by appending @sa to an internal list for the
164 * current execution context, and then checked on every call into the KCSAN
167 * @ptr: address of access
168 * @size: size of access
169 * @type: access type modifier
170 * @sa: struct kcsan_scoped_access to use for the scope of the access
172 struct kcsan_scoped_access *
173 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
174 struct kcsan_scoped_access *sa);
177 * kcsan_end_scoped_access - end scoped access
179 * End a scoped access, which will stop KCSAN checking the memory range.
180 * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
182 * @sa: a previously initialized struct kcsan_scoped_access
184 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
187 #else /* CONFIG_KCSAN */
189 static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
192 static inline void __kcsan_mb(void) { }
193 static inline void __kcsan_wmb(void) { }
194 static inline void __kcsan_rmb(void) { }
195 static inline void __kcsan_release(void) { }
196 static inline void kcsan_disable_current(void) { }
197 static inline void kcsan_enable_current(void) { }
198 static inline void kcsan_enable_current_nowarn(void) { }
199 static inline void kcsan_nestable_atomic_begin(void) { }
200 static inline void kcsan_nestable_atomic_end(void) { }
201 static inline void kcsan_flat_atomic_begin(void) { }
202 static inline void kcsan_flat_atomic_end(void) { }
203 static inline void kcsan_atomic_next(int n) { }
204 static inline void kcsan_set_access_mask(unsigned long mask) { }
206 struct kcsan_scoped_access { };
207 #define __kcsan_cleanup_scoped __maybe_unused
208 static inline struct kcsan_scoped_access *
209 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
210 struct kcsan_scoped_access *sa) { return sa; }
211 static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
213 #endif /* CONFIG_KCSAN */
215 #ifdef __SANITIZE_THREAD__
217 * Only calls into the runtime when the particular compilation unit has KCSAN
218 * instrumentation enabled. May be used in header files.
220 #define kcsan_check_access __kcsan_check_access
223 * Only use these to disable KCSAN for accesses in the current compilation unit;
224 * calls into libraries may still perform KCSAN checks.
226 #define __kcsan_disable_current kcsan_disable_current
227 #define __kcsan_enable_current kcsan_enable_current_nowarn
228 #else /* __SANITIZE_THREAD__ */
229 static inline void kcsan_check_access(const volatile void *ptr, size_t size,
231 static inline void __kcsan_enable_current(void) { }
232 static inline void __kcsan_disable_current(void) { }
233 #endif /* __SANITIZE_THREAD__ */
235 #if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
237 * Normal barrier instrumentation is not done via explicit calls, but by mapping
238 * to a repurposed __atomic_signal_fence(), which normally does not generate any
239 * real instructions, but is still intercepted by fsanitize=thread. This means,
240 * like any other compile-time instrumentation, barrier instrumentation can be
241 * disabled with the __no_kcsan function attribute.
243 * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
245 * These are all macros, like <asm/barrier.h>, since some architectures use them
246 * in non-static inline functions.
248 #define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
251 __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
254 #define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
255 #define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
256 #define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
257 #define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
258 #elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
259 #define kcsan_mb __kcsan_mb
260 #define kcsan_wmb __kcsan_wmb
261 #define kcsan_rmb __kcsan_rmb
262 #define kcsan_release __kcsan_release
263 #else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
264 #define kcsan_mb() do { } while (0)
265 #define kcsan_wmb() do { } while (0)
266 #define kcsan_rmb() do { } while (0)
267 #define kcsan_release() do { } while (0)
268 #endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
271 * __kcsan_check_read - check regular read access for races
273 * @ptr: address of access
274 * @size: size of access
276 #define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
279 * __kcsan_check_write - check regular write access for races
281 * @ptr: address of access
282 * @size: size of access
284 #define __kcsan_check_write(ptr, size) \
285 __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
288 * __kcsan_check_read_write - check regular read-write access for races
290 * @ptr: address of access
291 * @size: size of access
293 #define __kcsan_check_read_write(ptr, size) \
294 __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
297 * kcsan_check_read - check regular read access for races
299 * @ptr: address of access
300 * @size: size of access
302 #define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
305 * kcsan_check_write - check regular write access for races
307 * @ptr: address of access
308 * @size: size of access
310 #define kcsan_check_write(ptr, size) \
311 kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
314 * kcsan_check_read_write - check regular read-write access for races
316 * @ptr: address of access
317 * @size: size of access
319 #define kcsan_check_read_write(ptr, size) \
320 kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
323 * Check for atomic accesses: if atomic accesses are not ignored, this simply
324 * aliases to kcsan_check_access(), otherwise becomes a no-op.
326 #ifdef CONFIG_KCSAN_IGNORE_ATOMICS
327 #define kcsan_check_atomic_read(...) do { } while (0)
328 #define kcsan_check_atomic_write(...) do { } while (0)
329 #define kcsan_check_atomic_read_write(...) do { } while (0)
331 #define kcsan_check_atomic_read(ptr, size) \
332 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
333 #define kcsan_check_atomic_write(ptr, size) \
334 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
335 #define kcsan_check_atomic_read_write(ptr, size) \
336 kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
340 * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
342 * Assert that there are no concurrent writes to @var; other readers are
343 * allowed. This assertion can be used to specify properties of concurrent code,
344 * where violation cannot be detected as a normal data race.
346 * For example, if we only have a single writer, but multiple concurrent
347 * readers, to avoid data races, all these accesses must be marked; even
348 * concurrent marked writes racing with the single writer are bugs.
349 * Unfortunately, due to being marked, they are no longer data races. For cases
350 * like these, we can use the macro as follows:
354 * void writer(void) {
355 * spin_lock(&update_foo_lock);
356 * ASSERT_EXCLUSIVE_WRITER(shared_foo);
357 * WRITE_ONCE(shared_foo, ...);
358 * spin_unlock(&update_foo_lock);
360 * void reader(void) {
361 * // update_foo_lock does not need to be held!
362 * ... = READ_ONCE(shared_foo);
365 * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
366 * checking if a clear scope where no concurrent writes are expected exists.
368 * @var: variable to assert on
370 #define ASSERT_EXCLUSIVE_WRITER(var) \
371 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
374 * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
375 * expected to be unique for the scope in which instances of kcsan_scoped_access
378 #define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
379 #define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
380 struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
381 __kcsan_cleanup_scoped; \
382 struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
383 __maybe_unused = kcsan_begin_scoped_access( \
384 &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
385 &__kcsan_scoped_name(id, _))
388 * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
390 * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
392 * Assert that there are no concurrent writes to @var for the duration of the
393 * scope in which it is introduced. This provides a better way to fully cover
394 * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
395 * increases the likelihood for KCSAN to detect racing accesses.
397 * For example, it allows finding race-condition bugs that only occur due to
398 * state changes within the scope itself:
402 * void writer(void) {
403 * spin_lock(&update_foo_lock);
405 * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
406 * WRITE_ONCE(shared_foo, 42);
408 * // shared_foo should still be 42 here!
410 * spin_unlock(&update_foo_lock);
413 * if (READ_ONCE(shared_foo) == 42)
414 * WRITE_ONCE(shared_foo, 1); // bug!
417 * @var: variable to assert on
419 #define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
420 __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
423 * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
425 * Assert that there are no concurrent accesses to @var (no readers nor
426 * writers). This assertion can be used to specify properties of concurrent
427 * code, where violation cannot be detected as a normal data race.
429 * For example, where exclusive access is expected after determining no other
430 * users of an object are left, but the object is not actually freed. We can
431 * check that this property actually holds as follows:
435 * if (refcount_dec_and_test(&obj->refcnt)) {
436 * ASSERT_EXCLUSIVE_ACCESS(*obj);
437 * do_some_cleanup(obj);
438 * release_for_reuse(obj);
443 * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
444 * checking if a clear scope where no concurrent accesses are expected exists.
446 * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
447 * fit to detect use-after-free bugs.
449 * @var: variable to assert on
451 #define ASSERT_EXCLUSIVE_ACCESS(var) \
452 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
455 * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
457 * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
459 * Assert that there are no concurrent accesses to @var (no readers nor writers)
460 * for the entire duration of the scope in which it is introduced. This provides
461 * a better way to fully cover the enclosing scope, compared to multiple
462 * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
465 * @var: variable to assert on
467 #define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
468 __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
471 * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
473 * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
475 * Assert that there are no concurrent writes to a subset of bits in @var;
476 * concurrent readers are permitted. This assertion captures more detailed
477 * bit-level properties, compared to the other (word granularity) assertions.
478 * Only the bits set in @mask are checked for concurrent modifications, while
479 * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
482 * Use this for variables, where some bits must not be modified concurrently,
483 * yet other bits are expected to be modified concurrently.
485 * For example, variables where, after initialization, some bits are read-only,
486 * but other bits may still be modified concurrently. A reader may wish to
487 * assert that this is true as follows:
491 * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
492 * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
494 * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
495 * to access the masked bits only, and KCSAN optimistically assumes it is
496 * therefore safe, even in the presence of data races, and marking it with
497 * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
498 * it may still be advisable to do so, since we cannot reason about all compiler
499 * optimizations when it comes to bit manipulations (on the reader and writer
500 * side). If you are sure nothing can go wrong, we can write the above simply
505 * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
506 * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
508 * Another example, where this may be used, is when certain bits of @var may
509 * only be modified when holding the appropriate lock, but other bits may still
510 * be modified concurrently. Writers, where other bits may change concurrently,
511 * could use the assertion as follows:
515 * spin_lock(&foo_lock);
516 * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
518 * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
519 * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
520 * spin_unlock(&foo_lock);
522 * @var: variable to assert on
523 * @mask: only check for modifications to bits set in @mask
525 #define ASSERT_EXCLUSIVE_BITS(var, mask) \
527 kcsan_set_access_mask(mask); \
528 __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
529 kcsan_set_access_mask(0); \
530 kcsan_atomic_next(1); \
533 #endif /* _LINUX_KCSAN_CHECKS_H */