1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
8 #define pr_fmt(fmt) "kasan: test: " fmt
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mempool.h>
18 #include <linux/mman.h>
19 #include <linux/module.h>
20 #include <linux/printk.h>
21 #include <linux/random.h>
22 #include <linux/set_memory.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/tracepoint.h>
26 #include <linux/uaccess.h>
27 #include <linux/vmalloc.h>
28 #include <trace/events/printk.h>
34 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
36 static bool multishot;
38 /* Fields set based on lines observed in the console. */
45 * Some tests use these global variables to store return values from function
46 * calls that could otherwise be eliminated by the compiler as dead code.
48 void *kasan_ptr_result;
51 /* Probe for console output: obtains test_status lines of interest. */
52 static void probe_console(void *ignore, const char *buf, size_t len)
54 if (strnstr(buf, "BUG: KASAN: ", len))
55 WRITE_ONCE(test_status.report_found, true);
56 else if (strnstr(buf, "Asynchronous fault: ", len))
57 WRITE_ONCE(test_status.async_fault, true);
60 static int kasan_suite_init(struct kunit_suite *suite)
62 if (!kasan_enabled()) {
63 pr_err("Can't run KASAN tests with KASAN disabled");
67 /* Stop failing KUnit tests on KASAN reports. */
68 kasan_kunit_test_suite_start();
71 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
72 * report the first detected bug and panic the kernel if panic_on_warn
75 multishot = kasan_save_enable_multi_shot();
77 register_trace_console(probe_console, NULL);
81 static void kasan_suite_exit(struct kunit_suite *suite)
83 kasan_kunit_test_suite_end();
84 kasan_restore_multi_shot(multishot);
85 unregister_trace_console(probe_console, NULL);
86 tracepoint_synchronize_unregister();
89 static void kasan_test_exit(struct kunit *test)
91 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
95 * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a
96 * KASAN report; causes a KUnit test failure otherwise.
98 * @test: Currently executing KUnit test.
99 * @expression: Expression that must produce a KASAN report.
101 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
102 * checking is auto-disabled. When this happens, this test handler reenables
103 * tag checking. As tag checking can be only disabled or enabled per CPU,
104 * this handler disables migration (preemption).
106 * Since the compiler doesn't see that the expression can change the test_status
107 * fields, it can reorder or optimize away the accesses to those fields.
108 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
109 * expression to prevent that.
111 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
112 * as false. This allows detecting KASAN reports that happen outside of the
113 * checks by asserting !test_status.report_found at the start of
114 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
116 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
117 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
118 kasan_sync_fault_possible()) \
120 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \
124 if (kasan_async_fault_possible()) \
125 kasan_force_async_fault(); \
126 if (!READ_ONCE(test_status.report_found)) { \
127 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \
128 "expected in \"" #expression \
129 "\", but none occurred"); \
131 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
132 kasan_sync_fault_possible()) { \
133 if (READ_ONCE(test_status.report_found) && \
134 !READ_ONCE(test_status.async_fault)) \
135 kasan_enable_hw_tags(); \
138 WRITE_ONCE(test_status.report_found, false); \
139 WRITE_ONCE(test_status.async_fault, false); \
142 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
143 if (!IS_ENABLED(config)) \
144 kunit_skip((test), "Test requires " #config "=y"); \
147 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
148 if (IS_ENABLED(config)) \
149 kunit_skip((test), "Test requires " #config "=n"); \
152 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \
153 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
154 break; /* No compiler instrumentation. */ \
155 if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \
156 break; /* Should always be instrumented! */ \
157 if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \
158 kunit_skip((test), "Test requires checked mem*()"); \
161 static void kmalloc_oob_right(struct kunit *test)
164 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
166 ptr = kmalloc(size, GFP_KERNEL);
167 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
169 OPTIMIZER_HIDE_VAR(ptr);
171 * An unaligned access past the requested kmalloc size.
172 * Only generic KASAN can precisely detect these.
174 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
175 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
178 * An aligned access into the first out-of-bounds granule that falls
179 * within the aligned kmalloc object.
181 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
183 /* Out-of-bounds access past the aligned kmalloc object. */
184 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
185 ptr[size + KASAN_GRANULE_SIZE + 5]);
190 static void kmalloc_oob_left(struct kunit *test)
195 ptr = kmalloc(size, GFP_KERNEL);
196 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
198 OPTIMIZER_HIDE_VAR(ptr);
199 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
203 static void kmalloc_node_oob_right(struct kunit *test)
208 ptr = kmalloc_node(size, GFP_KERNEL, 0);
209 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
211 OPTIMIZER_HIDE_VAR(ptr);
212 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
217 * Check that KASAN detects an out-of-bounds access for a big object allocated
218 * via kmalloc(). But not as big as to trigger the page_alloc fallback.
220 static void kmalloc_big_oob_right(struct kunit *test)
223 size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
225 ptr = kmalloc(size, GFP_KERNEL);
226 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
228 OPTIMIZER_HIDE_VAR(ptr);
229 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
234 * The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
235 * that does not fit into the largest slab cache and therefore is allocated via
236 * the page_alloc fallback.
239 static void kmalloc_large_oob_right(struct kunit *test)
242 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
244 ptr = kmalloc(size, GFP_KERNEL);
245 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
247 OPTIMIZER_HIDE_VAR(ptr);
248 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
253 static void kmalloc_large_uaf(struct kunit *test)
256 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
258 ptr = kmalloc(size, GFP_KERNEL);
259 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
262 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
265 static void kmalloc_large_invalid_free(struct kunit *test)
268 size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
270 ptr = kmalloc(size, GFP_KERNEL);
271 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
273 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
276 static void page_alloc_oob_right(struct kunit *test)
281 size_t size = (1UL << (PAGE_SHIFT + order));
284 * With generic KASAN page allocations have no redzones, thus
285 * out-of-bounds detection is not guaranteed.
286 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
288 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
290 pages = alloc_pages(GFP_KERNEL, order);
291 ptr = page_address(pages);
292 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
294 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
295 free_pages((unsigned long)ptr, order);
298 static void page_alloc_uaf(struct kunit *test)
304 pages = alloc_pages(GFP_KERNEL, order);
305 ptr = page_address(pages);
306 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
307 free_pages((unsigned long)ptr, order);
309 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
312 static void krealloc_more_oob_helper(struct kunit *test,
313 size_t size1, size_t size2)
318 KUNIT_ASSERT_LT(test, size1, size2);
319 middle = size1 + (size2 - size1) / 2;
321 ptr1 = kmalloc(size1, GFP_KERNEL);
322 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
324 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
325 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
327 /* Suppress -Warray-bounds warnings. */
328 OPTIMIZER_HIDE_VAR(ptr2);
330 /* All offsets up to size2 must be accessible. */
331 ptr2[size1 - 1] = 'x';
334 ptr2[size2 - 1] = 'x';
336 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
337 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
338 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
340 /* For all modes first aligned offset after size2 must be inaccessible. */
341 KUNIT_EXPECT_KASAN_FAIL(test,
342 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
347 static void krealloc_less_oob_helper(struct kunit *test,
348 size_t size1, size_t size2)
353 KUNIT_ASSERT_LT(test, size2, size1);
354 middle = size2 + (size1 - size2) / 2;
356 ptr1 = kmalloc(size1, GFP_KERNEL);
357 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
359 ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
360 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
362 /* Suppress -Warray-bounds warnings. */
363 OPTIMIZER_HIDE_VAR(ptr2);
365 /* Must be accessible for all modes. */
366 ptr2[size2 - 1] = 'x';
368 /* Generic mode is precise, so unaligned size2 must be inaccessible. */
369 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
370 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
372 /* For all modes first aligned offset after size2 must be inaccessible. */
373 KUNIT_EXPECT_KASAN_FAIL(test,
374 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
377 * For all modes all size2, middle, and size1 should land in separate
378 * granules and thus the latter two offsets should be inaccessible.
380 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
381 round_down(middle, KASAN_GRANULE_SIZE));
382 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
383 round_down(size1, KASAN_GRANULE_SIZE));
384 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
385 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
386 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
391 static void krealloc_more_oob(struct kunit *test)
393 krealloc_more_oob_helper(test, 201, 235);
396 static void krealloc_less_oob(struct kunit *test)
398 krealloc_less_oob_helper(test, 235, 201);
401 static void krealloc_large_more_oob(struct kunit *test)
403 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
404 KMALLOC_MAX_CACHE_SIZE + 235);
407 static void krealloc_large_less_oob(struct kunit *test)
409 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
410 KMALLOC_MAX_CACHE_SIZE + 201);
414 * Check that krealloc() detects a use-after-free, returns NULL,
415 * and doesn't unpoison the freed object.
417 static void krealloc_uaf(struct kunit *test)
423 ptr1 = kmalloc(size1, GFP_KERNEL);
424 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
427 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
428 KUNIT_ASSERT_NULL(test, ptr2);
429 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
432 static void kmalloc_oob_16(struct kunit *test)
438 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
440 /* This test is specifically crafted for the generic mode. */
441 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
443 /* RELOC_HIDE to prevent gcc from warning about short alloc */
444 ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
445 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
447 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
448 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
450 OPTIMIZER_HIDE_VAR(ptr1);
451 OPTIMIZER_HIDE_VAR(ptr2);
452 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
457 static void kmalloc_uaf_16(struct kunit *test)
463 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
465 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
466 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
468 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
469 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
472 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
477 * Note: in the memset tests below, the written range touches both valid and
478 * invalid memory. This makes sure that the instrumentation does not only check
479 * the starting address but the whole range.
482 static void kmalloc_oob_memset_2(struct kunit *test)
485 size_t size = 128 - KASAN_GRANULE_SIZE;
486 size_t memset_size = 2;
488 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
490 ptr = kmalloc(size, GFP_KERNEL);
491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
493 OPTIMIZER_HIDE_VAR(ptr);
494 OPTIMIZER_HIDE_VAR(size);
495 OPTIMIZER_HIDE_VAR(memset_size);
496 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, memset_size));
500 static void kmalloc_oob_memset_4(struct kunit *test)
503 size_t size = 128 - KASAN_GRANULE_SIZE;
504 size_t memset_size = 4;
506 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
508 ptr = kmalloc(size, GFP_KERNEL);
509 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
511 OPTIMIZER_HIDE_VAR(ptr);
512 OPTIMIZER_HIDE_VAR(size);
513 OPTIMIZER_HIDE_VAR(memset_size);
514 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, memset_size));
518 static void kmalloc_oob_memset_8(struct kunit *test)
521 size_t size = 128 - KASAN_GRANULE_SIZE;
522 size_t memset_size = 8;
524 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
526 ptr = kmalloc(size, GFP_KERNEL);
527 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
529 OPTIMIZER_HIDE_VAR(ptr);
530 OPTIMIZER_HIDE_VAR(size);
531 OPTIMIZER_HIDE_VAR(memset_size);
532 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, memset_size));
536 static void kmalloc_oob_memset_16(struct kunit *test)
539 size_t size = 128 - KASAN_GRANULE_SIZE;
540 size_t memset_size = 16;
542 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
544 ptr = kmalloc(size, GFP_KERNEL);
545 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
547 OPTIMIZER_HIDE_VAR(ptr);
548 OPTIMIZER_HIDE_VAR(size);
549 OPTIMIZER_HIDE_VAR(memset_size);
550 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, memset_size));
554 static void kmalloc_oob_in_memset(struct kunit *test)
557 size_t size = 128 - KASAN_GRANULE_SIZE;
559 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
561 ptr = kmalloc(size, GFP_KERNEL);
562 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
564 OPTIMIZER_HIDE_VAR(ptr);
565 OPTIMIZER_HIDE_VAR(size);
566 KUNIT_EXPECT_KASAN_FAIL(test,
567 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
571 static void kmalloc_memmove_negative_size(struct kunit *test)
575 size_t invalid_size = -2;
577 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
580 * Hardware tag-based mode doesn't check memmove for negative size.
581 * As a result, this test introduces a side-effect memory corruption,
582 * which can result in a crash.
584 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
586 ptr = kmalloc(size, GFP_KERNEL);
587 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
589 memset((char *)ptr, 0, 64);
590 OPTIMIZER_HIDE_VAR(ptr);
591 OPTIMIZER_HIDE_VAR(invalid_size);
592 KUNIT_EXPECT_KASAN_FAIL(test,
593 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
597 static void kmalloc_memmove_invalid_size(struct kunit *test)
601 size_t invalid_size = size;
603 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
605 ptr = kmalloc(size, GFP_KERNEL);
606 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
608 memset((char *)ptr, 0, 64);
609 OPTIMIZER_HIDE_VAR(ptr);
610 OPTIMIZER_HIDE_VAR(invalid_size);
611 KUNIT_EXPECT_KASAN_FAIL(test,
612 memmove((char *)ptr, (char *)ptr + 4, invalid_size));
616 static void kmalloc_uaf(struct kunit *test)
621 ptr = kmalloc(size, GFP_KERNEL);
622 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
625 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
628 static void kmalloc_uaf_memset(struct kunit *test)
633 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
636 * Only generic KASAN uses quarantine, which is required to avoid a
637 * kernel memory corruption this test causes.
639 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
641 ptr = kmalloc(size, GFP_KERNEL);
642 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
645 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
648 static void kmalloc_uaf2(struct kunit *test)
655 ptr1 = kmalloc(size, GFP_KERNEL);
656 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
660 ptr2 = kmalloc(size, GFP_KERNEL);
661 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
664 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
665 * Allow up to 16 attempts at generating different tags.
667 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
672 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
673 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
679 * Check that KASAN detects use-after-free when another object was allocated in
680 * the same slot. Relevant for the tag-based modes, which do not use quarantine.
682 static void kmalloc_uaf3(struct kunit *test)
687 /* This test is specifically crafted for tag-based modes. */
688 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
690 ptr1 = kmalloc(size, GFP_KERNEL);
691 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
694 ptr2 = kmalloc(size, GFP_KERNEL);
695 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
698 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
701 static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe)
703 int *i_unsafe = unsafe;
705 KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe));
706 KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42));
707 KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe));
708 KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42));
710 KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe));
711 KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42));
712 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe));
713 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe));
714 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe));
715 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe));
716 KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe));
717 KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe));
718 KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe));
719 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe));
720 KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42));
721 KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42));
722 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42));
723 KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42));
724 KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe));
725 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe));
726 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe));
727 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe));
728 KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42));
729 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe));
730 KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe));
731 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe));
732 KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe));
734 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe));
735 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42));
736 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe));
737 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe));
738 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe));
739 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe));
740 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe));
741 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe));
742 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe));
743 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe));
744 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42));
745 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42));
746 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42));
747 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42));
748 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe));
749 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe));
750 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe));
751 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe));
752 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42));
753 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe));
754 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe));
755 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe));
756 KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe));
759 static void kasan_atomics(struct kunit *test)
764 * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such
765 * that the following 16 bytes will make up the redzone.
767 a1 = kzalloc(48, GFP_KERNEL);
768 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1);
769 a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL);
770 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2);
772 /* Use atomics to access the redzone. */
773 kasan_atomics_helper(test, a1 + 48, a2);
779 static void kmalloc_double_kzfree(struct kunit *test)
784 ptr = kmalloc(size, GFP_KERNEL);
785 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
787 kfree_sensitive(ptr);
788 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
791 /* Check that ksize() does NOT unpoison whole object. */
792 static void ksize_unpoisons_memory(struct kunit *test)
795 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
798 ptr = kmalloc(size, GFP_KERNEL);
799 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
801 real_size = ksize(ptr);
802 KUNIT_EXPECT_GT(test, real_size, size);
804 OPTIMIZER_HIDE_VAR(ptr);
806 /* These accesses shouldn't trigger a KASAN report. */
810 /* These must trigger a KASAN report. */
811 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
812 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
813 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
814 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
820 * Check that a use-after-free is detected by ksize() and via normal accesses
823 static void ksize_uaf(struct kunit *test)
826 int size = 128 - KASAN_GRANULE_SIZE;
828 ptr = kmalloc(size, GFP_KERNEL);
829 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
832 OPTIMIZER_HIDE_VAR(ptr);
833 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
834 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
835 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
839 * The two tests below check that Generic KASAN prints auxiliary stack traces
840 * for RCU callbacks and workqueues. The reports need to be inspected manually.
842 * These tests are still enabled for other KASAN modes to make sure that all
843 * modes report bad accesses in tested scenarios.
846 static struct kasan_rcu_info {
851 static void rcu_uaf_reclaim(struct rcu_head *rp)
853 struct kasan_rcu_info *fp =
854 container_of(rp, struct kasan_rcu_info, rcu);
857 ((volatile struct kasan_rcu_info *)fp)->i;
860 static void rcu_uaf(struct kunit *test)
862 struct kasan_rcu_info *ptr;
864 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
865 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
867 global_rcu_ptr = rcu_dereference_protected(
868 (struct kasan_rcu_info __rcu *)ptr, NULL);
870 KUNIT_EXPECT_KASAN_FAIL(test,
871 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
875 static void workqueue_uaf_work(struct work_struct *work)
880 static void workqueue_uaf(struct kunit *test)
882 struct workqueue_struct *workqueue;
883 struct work_struct *work;
885 workqueue = create_workqueue("kasan_workqueue_test");
886 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
888 work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
889 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
891 INIT_WORK(work, workqueue_uaf_work);
892 queue_work(workqueue, work);
893 destroy_workqueue(workqueue);
895 KUNIT_EXPECT_KASAN_FAIL(test,
896 ((volatile struct work_struct *)work)->data);
899 static void kfree_via_page(struct kunit *test)
904 unsigned long offset;
906 ptr = kmalloc(size, GFP_KERNEL);
907 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
909 page = virt_to_page(ptr);
910 offset = offset_in_page(ptr);
911 kfree(page_address(page) + offset);
914 static void kfree_via_phys(struct kunit *test)
920 ptr = kmalloc(size, GFP_KERNEL);
921 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
923 phys = virt_to_phys(ptr);
924 kfree(phys_to_virt(phys));
927 static void kmem_cache_oob(struct kunit *test)
931 struct kmem_cache *cache;
933 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
934 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
936 p = kmem_cache_alloc(cache, GFP_KERNEL);
938 kunit_err(test, "Allocation failed: %s\n", __func__);
939 kmem_cache_destroy(cache);
943 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
945 kmem_cache_free(cache, p);
946 kmem_cache_destroy(cache);
949 static void kmem_cache_double_free(struct kunit *test)
953 struct kmem_cache *cache;
955 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
956 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
958 p = kmem_cache_alloc(cache, GFP_KERNEL);
960 kunit_err(test, "Allocation failed: %s\n", __func__);
961 kmem_cache_destroy(cache);
965 kmem_cache_free(cache, p);
966 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
967 kmem_cache_destroy(cache);
970 static void kmem_cache_invalid_free(struct kunit *test)
974 struct kmem_cache *cache;
976 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
978 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
980 p = kmem_cache_alloc(cache, GFP_KERNEL);
982 kunit_err(test, "Allocation failed: %s\n", __func__);
983 kmem_cache_destroy(cache);
987 /* Trigger invalid free, the object doesn't get freed. */
988 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
991 * Properly free the object to prevent the "Objects remaining in
992 * test_cache on __kmem_cache_shutdown" BUG failure.
994 kmem_cache_free(cache, p);
996 kmem_cache_destroy(cache);
999 static void empty_cache_ctor(void *object) { }
1001 static void kmem_cache_double_destroy(struct kunit *test)
1003 struct kmem_cache *cache;
1005 /* Provide a constructor to prevent cache merging. */
1006 cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
1007 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1008 kmem_cache_destroy(cache);
1009 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
1012 static void kmem_cache_accounted(struct kunit *test)
1017 struct kmem_cache *cache;
1019 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
1020 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1023 * Several allocations with a delay to allow for lazy per memcg kmem
1026 for (i = 0; i < 5; i++) {
1027 p = kmem_cache_alloc(cache, GFP_KERNEL);
1031 kmem_cache_free(cache, p);
1036 kmem_cache_destroy(cache);
1039 static void kmem_cache_bulk(struct kunit *test)
1041 struct kmem_cache *cache;
1047 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1048 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1050 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
1052 kunit_err(test, "Allocation failed: %s\n", __func__);
1053 kmem_cache_destroy(cache);
1057 for (i = 0; i < ARRAY_SIZE(p); i++)
1058 p[i][0] = p[i][size - 1] = 42;
1060 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
1061 kmem_cache_destroy(cache);
1064 static void *mempool_prepare_kmalloc(struct kunit *test, mempool_t *pool, size_t size)
1070 memset(pool, 0, sizeof(*pool));
1071 ret = mempool_init_kmalloc_pool(pool, pool_size, size);
1072 KUNIT_ASSERT_EQ(test, ret, 0);
1075 * Allocate one element to prevent mempool from freeing elements to the
1076 * underlying allocator and instead make it add them to the element
1077 * list when the tests trigger double-free and invalid-free bugs.
1078 * This allows testing KASAN annotations in add_element().
1080 elem = mempool_alloc_preallocated(pool);
1081 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1086 static struct kmem_cache *mempool_prepare_slab(struct kunit *test, mempool_t *pool, size_t size)
1088 struct kmem_cache *cache;
1092 cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
1093 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
1095 memset(pool, 0, sizeof(*pool));
1096 ret = mempool_init_slab_pool(pool, pool_size, cache);
1097 KUNIT_ASSERT_EQ(test, ret, 0);
1100 * Do not allocate one preallocated element, as we skip the double-free
1101 * and invalid-free tests for slab mempool for simplicity.
1107 static void *mempool_prepare_page(struct kunit *test, mempool_t *pool, int order)
1113 memset(pool, 0, sizeof(*pool));
1114 ret = mempool_init_page_pool(pool, pool_size, order);
1115 KUNIT_ASSERT_EQ(test, ret, 0);
1117 elem = mempool_alloc_preallocated(pool);
1118 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1123 static void mempool_oob_right_helper(struct kunit *test, mempool_t *pool, size_t size)
1127 elem = mempool_alloc_preallocated(pool);
1128 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1130 OPTIMIZER_HIDE_VAR(elem);
1132 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1133 KUNIT_EXPECT_KASAN_FAIL(test,
1134 ((volatile char *)&elem[size])[0]);
1136 KUNIT_EXPECT_KASAN_FAIL(test,
1137 ((volatile char *)&elem[round_up(size, KASAN_GRANULE_SIZE)])[0]);
1139 mempool_free(elem, pool);
1142 static void mempool_kmalloc_oob_right(struct kunit *test)
1145 size_t size = 128 - KASAN_GRANULE_SIZE - 5;
1148 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1150 mempool_oob_right_helper(test, &pool, size);
1152 mempool_free(extra_elem, &pool);
1153 mempool_exit(&pool);
1156 static void mempool_kmalloc_large_oob_right(struct kunit *test)
1159 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1162 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1164 mempool_oob_right_helper(test, &pool, size);
1166 mempool_free(extra_elem, &pool);
1167 mempool_exit(&pool);
1170 static void mempool_slab_oob_right(struct kunit *test)
1174 struct kmem_cache *cache;
1176 cache = mempool_prepare_slab(test, &pool, size);
1178 mempool_oob_right_helper(test, &pool, size);
1180 mempool_exit(&pool);
1181 kmem_cache_destroy(cache);
1185 * Skip the out-of-bounds test for page mempool. With Generic KASAN, page
1186 * allocations have no redzones, and thus the out-of-bounds detection is not
1187 * guaranteed; see https://bugzilla.kernel.org/show_bug.cgi?id=210503. With
1188 * the tag-based KASAN modes, the neighboring allocation might have the same
1189 * tag; see https://bugzilla.kernel.org/show_bug.cgi?id=203505.
1192 static void mempool_uaf_helper(struct kunit *test, mempool_t *pool, bool page)
1196 elem = mempool_alloc_preallocated(pool);
1197 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1199 mempool_free(elem, pool);
1201 ptr = page ? page_address((struct page *)elem) : elem;
1202 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
1205 static void mempool_kmalloc_uaf(struct kunit *test)
1211 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1213 mempool_uaf_helper(test, &pool, false);
1215 mempool_free(extra_elem, &pool);
1216 mempool_exit(&pool);
1219 static void mempool_kmalloc_large_uaf(struct kunit *test)
1222 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1225 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1227 mempool_uaf_helper(test, &pool, false);
1229 mempool_free(extra_elem, &pool);
1230 mempool_exit(&pool);
1233 static void mempool_slab_uaf(struct kunit *test)
1237 struct kmem_cache *cache;
1239 cache = mempool_prepare_slab(test, &pool, size);
1241 mempool_uaf_helper(test, &pool, false);
1243 mempool_exit(&pool);
1244 kmem_cache_destroy(cache);
1247 static void mempool_page_alloc_uaf(struct kunit *test)
1253 extra_elem = mempool_prepare_page(test, &pool, order);
1255 mempool_uaf_helper(test, &pool, true);
1257 mempool_free(extra_elem, &pool);
1258 mempool_exit(&pool);
1261 static void mempool_double_free_helper(struct kunit *test, mempool_t *pool)
1265 elem = mempool_alloc_preallocated(pool);
1266 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1268 mempool_free(elem, pool);
1270 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem, pool));
1273 static void mempool_kmalloc_double_free(struct kunit *test)
1279 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1281 mempool_double_free_helper(test, &pool);
1283 mempool_free(extra_elem, &pool);
1284 mempool_exit(&pool);
1287 static void mempool_kmalloc_large_double_free(struct kunit *test)
1290 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1293 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1295 mempool_double_free_helper(test, &pool);
1297 mempool_free(extra_elem, &pool);
1298 mempool_exit(&pool);
1301 static void mempool_page_alloc_double_free(struct kunit *test)
1307 extra_elem = mempool_prepare_page(test, &pool, order);
1309 mempool_double_free_helper(test, &pool);
1311 mempool_free(extra_elem, &pool);
1312 mempool_exit(&pool);
1315 static void mempool_kmalloc_invalid_free_helper(struct kunit *test, mempool_t *pool)
1319 elem = mempool_alloc_preallocated(pool);
1320 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elem);
1322 KUNIT_EXPECT_KASAN_FAIL(test, mempool_free(elem + 1, pool));
1324 mempool_free(elem, pool);
1327 static void mempool_kmalloc_invalid_free(struct kunit *test)
1333 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1335 mempool_kmalloc_invalid_free_helper(test, &pool);
1337 mempool_free(extra_elem, &pool);
1338 mempool_exit(&pool);
1341 static void mempool_kmalloc_large_invalid_free(struct kunit *test)
1344 size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
1347 extra_elem = mempool_prepare_kmalloc(test, &pool, size);
1349 mempool_kmalloc_invalid_free_helper(test, &pool);
1351 mempool_free(extra_elem, &pool);
1352 mempool_exit(&pool);
1356 * Skip the invalid-free test for page mempool. The invalid-free detection only
1357 * works for compound pages and mempool preallocates all page elements without
1358 * the __GFP_COMP flag.
1361 static char global_array[10];
1363 static void kasan_global_oob_right(struct kunit *test)
1366 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
1367 * from failing here and panicking the kernel, access the array via a
1368 * volatile pointer, which will prevent the compiler from being able to
1369 * determine the array bounds.
1371 * This access uses a volatile pointer to char (char *volatile) rather
1372 * than the more conventional pointer to volatile char (volatile char *)
1373 * because we want to prevent the compiler from making inferences about
1374 * the pointer itself (i.e. its array bounds), not the data that it
1377 char *volatile array = global_array;
1378 char *p = &array[ARRAY_SIZE(global_array) + 3];
1380 /* Only generic mode instruments globals. */
1381 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1383 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1386 static void kasan_global_oob_left(struct kunit *test)
1388 char *volatile array = global_array;
1389 char *p = array - 3;
1392 * GCC is known to fail this test, skip it.
1393 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
1395 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
1396 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1397 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1400 static void kasan_stack_oob(struct kunit *test)
1402 char stack_array[10];
1403 /* See comment in kasan_global_oob_right. */
1404 char *volatile array = stack_array;
1405 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
1407 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1409 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1412 static void kasan_alloca_oob_left(struct kunit *test)
1414 volatile int i = 10;
1415 char alloca_array[i];
1416 /* See comment in kasan_global_oob_right. */
1417 char *volatile array = alloca_array;
1418 char *p = array - 1;
1420 /* Only generic mode instruments dynamic allocas. */
1421 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1422 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1424 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1427 static void kasan_alloca_oob_right(struct kunit *test)
1429 volatile int i = 10;
1430 char alloca_array[i];
1431 /* See comment in kasan_global_oob_right. */
1432 char *volatile array = alloca_array;
1433 char *p = array + i;
1435 /* Only generic mode instruments dynamic allocas. */
1436 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1437 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
1439 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
1442 static void kasan_memchr(struct kunit *test)
1448 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1449 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1451 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1454 size = round_up(size, OOB_TAG_OFF);
1456 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1457 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1459 OPTIMIZER_HIDE_VAR(ptr);
1460 OPTIMIZER_HIDE_VAR(size);
1461 KUNIT_EXPECT_KASAN_FAIL(test,
1462 kasan_ptr_result = memchr(ptr, '1', size + 1));
1467 static void kasan_memcmp(struct kunit *test)
1474 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1475 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1477 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1480 size = round_up(size, OOB_TAG_OFF);
1482 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1483 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1484 memset(arr, 0, sizeof(arr));
1486 OPTIMIZER_HIDE_VAR(ptr);
1487 OPTIMIZER_HIDE_VAR(size);
1488 KUNIT_EXPECT_KASAN_FAIL(test,
1489 kasan_int_result = memcmp(ptr, arr, size+1));
1493 static void kasan_strings(struct kunit *test)
1499 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1500 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1502 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1504 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1505 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1510 * Try to cause only 1 invalid access (less spam in dmesg).
1511 * For that we need ptr to point to zeroed byte.
1512 * Skip metadata that could be stored in freed object so ptr
1513 * will likely point to zeroed byte.
1516 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1518 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1520 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1522 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1524 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1526 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1529 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1531 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1532 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1533 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1534 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1535 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1536 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1537 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1538 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1541 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1543 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1544 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1545 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1546 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1547 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1548 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1549 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1550 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1552 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1553 xor_unlock_is_negative_byte(1 << nr, addr));
1556 static void kasan_bitops_generic(struct kunit *test)
1560 /* This test is specifically crafted for the generic mode. */
1561 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1564 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1565 * this way we do not actually corrupt other memory.
1567 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1568 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1571 * Below calls try to access bit within allocated memory; however, the
1572 * below accesses are still out-of-bounds, since bitops are defined to
1573 * operate on the whole long the bit is in.
1575 kasan_bitops_modify(test, BITS_PER_LONG, bits);
1578 * Below calls try to access bit beyond allocated memory.
1580 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1585 static void kasan_bitops_tags(struct kunit *test)
1589 /* This test is specifically crafted for tag-based modes. */
1590 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1592 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1593 bits = kzalloc(48, GFP_KERNEL);
1594 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1596 /* Do the accesses past the 48 allocated bytes, but within the redone. */
1597 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1598 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1603 static void vmalloc_helpers_tags(struct kunit *test)
1607 /* This test is intended for tag-based modes. */
1608 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1610 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1612 if (!kasan_vmalloc_enabled())
1613 kunit_skip(test, "Test requires kasan.vmalloc=on");
1615 ptr = vmalloc(PAGE_SIZE);
1616 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1618 /* Check that the returned pointer is tagged. */
1619 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1620 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1622 /* Make sure exported vmalloc helpers handle tagged pointers. */
1623 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1624 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1626 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1630 /* Make sure vmalloc'ed memory permissions can be changed. */
1631 rv = set_memory_ro((unsigned long)ptr, 1);
1632 KUNIT_ASSERT_GE(test, rv, 0);
1633 rv = set_memory_rw((unsigned long)ptr, 1);
1634 KUNIT_ASSERT_GE(test, rv, 0);
1641 static void vmalloc_oob(struct kunit *test)
1643 char *v_ptr, *p_ptr;
1645 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1647 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1649 if (!kasan_vmalloc_enabled())
1650 kunit_skip(test, "Test requires kasan.vmalloc=on");
1652 v_ptr = vmalloc(size);
1653 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1655 OPTIMIZER_HIDE_VAR(v_ptr);
1658 * We have to be careful not to hit the guard page in vmalloc tests.
1659 * The MMU will catch that and crash us.
1662 /* Make sure in-bounds accesses are valid. */
1664 v_ptr[size - 1] = 0;
1667 * An unaligned access past the requested vmalloc size.
1668 * Only generic KASAN can precisely detect these.
1670 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1671 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1673 /* An aligned access into the first out-of-bounds granule. */
1674 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1676 /* Check that in-bounds accesses to the physical page are valid. */
1677 page = vmalloc_to_page(v_ptr);
1678 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1679 p_ptr = page_address(page);
1680 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1686 * We can't check for use-after-unmap bugs in this nor in the following
1687 * vmalloc tests, as the page might be fully unmapped and accessing it
1688 * will crash the kernel.
1692 static void vmap_tags(struct kunit *test)
1694 char *p_ptr, *v_ptr;
1695 struct page *p_page, *v_page;
1698 * This test is specifically crafted for the software tag-based mode,
1699 * the only tag-based mode that poisons vmap mappings.
1701 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1703 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1705 if (!kasan_vmalloc_enabled())
1706 kunit_skip(test, "Test requires kasan.vmalloc=on");
1708 p_page = alloc_pages(GFP_KERNEL, 1);
1709 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1710 p_ptr = page_address(p_page);
1711 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1713 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1714 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1717 * We can't check for out-of-bounds bugs in this nor in the following
1718 * vmalloc tests, as allocations have page granularity and accessing
1719 * the guard page will crash the kernel.
1722 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1723 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1725 /* Make sure that in-bounds accesses through both pointers work. */
1729 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1730 v_page = vmalloc_to_page(v_ptr);
1731 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1732 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1735 free_pages((unsigned long)p_ptr, 1);
1738 static void vm_map_ram_tags(struct kunit *test)
1740 char *p_ptr, *v_ptr;
1744 * This test is specifically crafted for the software tag-based mode,
1745 * the only tag-based mode that poisons vm_map_ram mappings.
1747 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1749 page = alloc_pages(GFP_KERNEL, 1);
1750 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1751 p_ptr = page_address(page);
1752 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1754 v_ptr = vm_map_ram(&page, 1, -1);
1755 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1757 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1758 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1760 /* Make sure that in-bounds accesses through both pointers work. */
1764 vm_unmap_ram(v_ptr, 1);
1765 free_pages((unsigned long)p_ptr, 1);
1768 static void vmalloc_percpu(struct kunit *test)
1774 * This test is specifically crafted for the software tag-based mode,
1775 * the only tag-based mode that poisons percpu mappings.
1777 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1779 ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1781 for_each_possible_cpu(cpu) {
1782 char *c_ptr = per_cpu_ptr(ptr, cpu);
1784 KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1785 KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1787 /* Make sure that in-bounds accesses don't crash the kernel. */
1795 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1796 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1799 static void match_all_not_assigned(struct kunit *test)
1805 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1807 for (i = 0; i < 256; i++) {
1808 size = get_random_u32_inclusive(1, 1024);
1809 ptr = kmalloc(size, GFP_KERNEL);
1810 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1811 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1812 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1816 for (i = 0; i < 256; i++) {
1817 order = get_random_u32_inclusive(1, 4);
1818 pages = alloc_pages(GFP_KERNEL, order);
1819 ptr = page_address(pages);
1820 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1821 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1822 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1823 free_pages((unsigned long)ptr, order);
1826 if (!kasan_vmalloc_enabled())
1829 for (i = 0; i < 256; i++) {
1830 size = get_random_u32_inclusive(1, 1024);
1831 ptr = vmalloc(size);
1832 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1833 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1834 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1839 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1840 static void match_all_ptr_tag(struct kunit *test)
1845 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1847 ptr = kmalloc(128, GFP_KERNEL);
1848 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1850 /* Backup the assigned tag. */
1852 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1854 /* Reset the tag to 0xff.*/
1855 ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1857 /* This access shouldn't trigger a KASAN report. */
1860 /* Recover the pointer tag and free. */
1861 ptr = set_tag(ptr, tag);
1865 /* Check that there are no match-all memory tags for tag-based modes. */
1866 static void match_all_mem_tag(struct kunit *test)
1871 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1873 ptr = kmalloc(128, GFP_KERNEL);
1874 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1875 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1877 /* For each possible tag value not matching the pointer tag. */
1878 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1880 * For Software Tag-Based KASAN, skip the majority of tag
1881 * values to avoid the test printing too many reports.
1883 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
1884 tag >= KASAN_TAG_MIN + 8 && tag <= KASAN_TAG_KERNEL - 8)
1887 if (tag == get_tag(ptr))
1890 /* Mark the first memory granule with the chosen memory tag. */
1891 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1893 /* This access must cause a KASAN report. */
1894 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1897 /* Recover the memory tag and free. */
1898 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1902 static struct kunit_case kasan_kunit_test_cases[] = {
1903 KUNIT_CASE(kmalloc_oob_right),
1904 KUNIT_CASE(kmalloc_oob_left),
1905 KUNIT_CASE(kmalloc_node_oob_right),
1906 KUNIT_CASE(kmalloc_big_oob_right),
1907 KUNIT_CASE(kmalloc_large_oob_right),
1908 KUNIT_CASE(kmalloc_large_uaf),
1909 KUNIT_CASE(kmalloc_large_invalid_free),
1910 KUNIT_CASE(page_alloc_oob_right),
1911 KUNIT_CASE(page_alloc_uaf),
1912 KUNIT_CASE(krealloc_more_oob),
1913 KUNIT_CASE(krealloc_less_oob),
1914 KUNIT_CASE(krealloc_large_more_oob),
1915 KUNIT_CASE(krealloc_large_less_oob),
1916 KUNIT_CASE(krealloc_uaf),
1917 KUNIT_CASE(kmalloc_oob_16),
1918 KUNIT_CASE(kmalloc_uaf_16),
1919 KUNIT_CASE(kmalloc_oob_in_memset),
1920 KUNIT_CASE(kmalloc_oob_memset_2),
1921 KUNIT_CASE(kmalloc_oob_memset_4),
1922 KUNIT_CASE(kmalloc_oob_memset_8),
1923 KUNIT_CASE(kmalloc_oob_memset_16),
1924 KUNIT_CASE(kmalloc_memmove_negative_size),
1925 KUNIT_CASE(kmalloc_memmove_invalid_size),
1926 KUNIT_CASE(kmalloc_uaf),
1927 KUNIT_CASE(kmalloc_uaf_memset),
1928 KUNIT_CASE(kmalloc_uaf2),
1929 KUNIT_CASE(kmalloc_uaf3),
1930 KUNIT_CASE(kmalloc_double_kzfree),
1931 KUNIT_CASE(ksize_unpoisons_memory),
1932 KUNIT_CASE(ksize_uaf),
1933 KUNIT_CASE(rcu_uaf),
1934 KUNIT_CASE(workqueue_uaf),
1935 KUNIT_CASE(kfree_via_page),
1936 KUNIT_CASE(kfree_via_phys),
1937 KUNIT_CASE(kmem_cache_oob),
1938 KUNIT_CASE(kmem_cache_double_free),
1939 KUNIT_CASE(kmem_cache_invalid_free),
1940 KUNIT_CASE(kmem_cache_double_destroy),
1941 KUNIT_CASE(kmem_cache_accounted),
1942 KUNIT_CASE(kmem_cache_bulk),
1943 KUNIT_CASE(mempool_kmalloc_oob_right),
1944 KUNIT_CASE(mempool_kmalloc_large_oob_right),
1945 KUNIT_CASE(mempool_slab_oob_right),
1946 KUNIT_CASE(mempool_kmalloc_uaf),
1947 KUNIT_CASE(mempool_kmalloc_large_uaf),
1948 KUNIT_CASE(mempool_slab_uaf),
1949 KUNIT_CASE(mempool_page_alloc_uaf),
1950 KUNIT_CASE(mempool_kmalloc_double_free),
1951 KUNIT_CASE(mempool_kmalloc_large_double_free),
1952 KUNIT_CASE(mempool_page_alloc_double_free),
1953 KUNIT_CASE(mempool_kmalloc_invalid_free),
1954 KUNIT_CASE(mempool_kmalloc_large_invalid_free),
1955 KUNIT_CASE(kasan_global_oob_right),
1956 KUNIT_CASE(kasan_global_oob_left),
1957 KUNIT_CASE(kasan_stack_oob),
1958 KUNIT_CASE(kasan_alloca_oob_left),
1959 KUNIT_CASE(kasan_alloca_oob_right),
1960 KUNIT_CASE(kasan_memchr),
1961 KUNIT_CASE(kasan_memcmp),
1962 KUNIT_CASE(kasan_strings),
1963 KUNIT_CASE(kasan_bitops_generic),
1964 KUNIT_CASE(kasan_bitops_tags),
1965 KUNIT_CASE(kasan_atomics),
1966 KUNIT_CASE(vmalloc_helpers_tags),
1967 KUNIT_CASE(vmalloc_oob),
1968 KUNIT_CASE(vmap_tags),
1969 KUNIT_CASE(vm_map_ram_tags),
1970 KUNIT_CASE(vmalloc_percpu),
1971 KUNIT_CASE(match_all_not_assigned),
1972 KUNIT_CASE(match_all_ptr_tag),
1973 KUNIT_CASE(match_all_mem_tag),
1977 static struct kunit_suite kasan_kunit_test_suite = {
1979 .test_cases = kasan_kunit_test_cases,
1980 .exit = kasan_test_exit,
1981 .suite_init = kasan_suite_init,
1982 .suite_exit = kasan_suite_exit,
1985 kunit_test_suite(kasan_kunit_test_suite);
1987 MODULE_LICENSE("GPL");