2 * This file contains shadow memory manipulation code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <adech.fo@gmail.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37 #include <linux/bug.h>
43 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
44 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
46 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
48 void *shadow_start, *shadow_end;
50 shadow_start = kasan_mem_to_shadow(address);
51 shadow_end = kasan_mem_to_shadow(address + size);
53 memset(shadow_start, value, shadow_end - shadow_start);
56 void kasan_unpoison_shadow(const void *address, size_t size)
58 kasan_poison_shadow(address, size, 0);
60 if (size & KASAN_SHADOW_MASK) {
61 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
62 *shadow = size & KASAN_SHADOW_MASK;
66 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
68 void *base = task_stack_page(task);
69 size_t size = sp - base;
71 kasan_unpoison_shadow(base, size);
74 /* Unpoison the entire stack for a task. */
75 void kasan_unpoison_task_stack(struct task_struct *task)
77 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
80 /* Unpoison the stack for the current task beyond a watermark sp value. */
81 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
84 * Calculate the task stack base address. Avoid using 'current'
85 * because this function is called by early resume code which hasn't
86 * yet set up the percpu register (%gs).
88 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
90 kasan_unpoison_shadow(base, watermark - base);
94 * Clear all poison for the region between the current SP and a provided
95 * watermark value, as is sometimes required prior to hand-crafted asm function
96 * returns in the middle of functions.
98 void kasan_unpoison_stack_above_sp_to(const void *watermark)
100 const void *sp = __builtin_frame_address(0);
101 size_t size = watermark - sp;
103 if (WARN_ON(sp > watermark))
105 kasan_unpoison_shadow(sp, size);
109 * All functions below always inlined so compiler could
110 * perform better optimizations in each of __asan_loadX/__assn_storeX
111 * depending on memory access size X.
114 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
116 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
118 if (unlikely(shadow_value)) {
119 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
120 return unlikely(last_accessible_byte >= shadow_value);
126 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
128 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
130 if (unlikely(*shadow_addr)) {
131 if (memory_is_poisoned_1(addr + 1))
135 * If single shadow byte covers 2-byte access, we don't
136 * need to do anything more. Otherwise, test the first
139 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
142 return unlikely(*(u8 *)shadow_addr);
148 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
150 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
152 if (unlikely(*shadow_addr)) {
153 if (memory_is_poisoned_1(addr + 3))
157 * If single shadow byte covers 4-byte access, we don't
158 * need to do anything more. Otherwise, test the first
161 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
164 return unlikely(*(u8 *)shadow_addr);
170 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
172 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
174 if (unlikely(*shadow_addr)) {
175 if (memory_is_poisoned_1(addr + 7))
179 * If single shadow byte covers 8-byte access, we don't
180 * need to do anything more. Otherwise, test the first
183 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
186 return unlikely(*(u8 *)shadow_addr);
192 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
194 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
196 if (unlikely(*shadow_addr)) {
197 u16 shadow_first_bytes = *(u16 *)shadow_addr;
199 if (unlikely(shadow_first_bytes))
203 * If two shadow bytes covers 16-byte access, we don't
204 * need to do anything more. Otherwise, test the last
207 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
210 return memory_is_poisoned_1(addr + 15);
216 static __always_inline unsigned long bytes_is_zero(const u8 *start,
220 if (unlikely(*start))
221 return (unsigned long)start;
229 static __always_inline unsigned long memory_is_zero(const void *start,
234 unsigned int prefix = (unsigned long)start % 8;
236 if (end - start <= 16)
237 return bytes_is_zero(start, end - start);
241 ret = bytes_is_zero(start, prefix);
247 words = (end - start) / 8;
249 if (unlikely(*(u64 *)start))
250 return bytes_is_zero(start, 8);
255 return bytes_is_zero(start, (end - start) % 8);
258 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
263 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
264 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
267 unsigned long last_byte = addr + size - 1;
268 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
270 if (unlikely(ret != (unsigned long)last_shadow ||
271 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
277 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
279 if (__builtin_constant_p(size)) {
282 return memory_is_poisoned_1(addr);
284 return memory_is_poisoned_2(addr);
286 return memory_is_poisoned_4(addr);
288 return memory_is_poisoned_8(addr);
290 return memory_is_poisoned_16(addr);
296 return memory_is_poisoned_n(addr, size);
299 static __always_inline void check_memory_region_inline(unsigned long addr,
300 size_t size, bool write,
301 unsigned long ret_ip)
303 if (unlikely(size == 0))
306 if (unlikely((void *)addr <
307 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
308 kasan_report(addr, size, write, ret_ip);
312 if (likely(!memory_is_poisoned(addr, size)))
315 kasan_report(addr, size, write, ret_ip);
318 static void check_memory_region(unsigned long addr,
319 size_t size, bool write,
320 unsigned long ret_ip)
322 check_memory_region_inline(addr, size, write, ret_ip);
325 void kasan_check_read(const void *p, unsigned int size)
327 check_memory_region((unsigned long)p, size, false, _RET_IP_);
329 EXPORT_SYMBOL(kasan_check_read);
331 void kasan_check_write(const void *p, unsigned int size)
333 check_memory_region((unsigned long)p, size, true, _RET_IP_);
335 EXPORT_SYMBOL(kasan_check_write);
338 void *memset(void *addr, int c, size_t len)
340 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
342 return __memset(addr, c, len);
346 void *memmove(void *dest, const void *src, size_t len)
348 check_memory_region((unsigned long)src, len, false, _RET_IP_);
349 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
351 return __memmove(dest, src, len);
355 void *memcpy(void *dest, const void *src, size_t len)
357 check_memory_region((unsigned long)src, len, false, _RET_IP_);
358 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
360 return __memcpy(dest, src, len);
363 void kasan_alloc_pages(struct page *page, unsigned int order)
365 if (likely(!PageHighMem(page)))
366 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
369 void kasan_free_pages(struct page *page, unsigned int order)
371 if (likely(!PageHighMem(page)))
372 kasan_poison_shadow(page_address(page),
378 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
379 * For larger allocations larger redzones are used.
381 static size_t optimal_redzone(size_t object_size)
384 object_size <= 64 - 16 ? 16 :
385 object_size <= 128 - 32 ? 32 :
386 object_size <= 512 - 64 ? 64 :
387 object_size <= 4096 - 128 ? 128 :
388 object_size <= (1 << 14) - 256 ? 256 :
389 object_size <= (1 << 15) - 512 ? 512 :
390 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
394 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
395 unsigned long *flags)
398 int orig_size = *size;
400 /* Add alloc meta. */
401 cache->kasan_info.alloc_meta_offset = *size;
402 *size += sizeof(struct kasan_alloc_meta);
405 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
406 cache->object_size < sizeof(struct kasan_free_meta)) {
407 cache->kasan_info.free_meta_offset = *size;
408 *size += sizeof(struct kasan_free_meta);
410 redzone_adjust = optimal_redzone(cache->object_size) -
411 (*size - cache->object_size);
413 if (redzone_adjust > 0)
414 *size += redzone_adjust;
416 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
417 optimal_redzone(cache->object_size)));
420 * If the metadata doesn't fit, don't enable KASAN at all.
422 if (*size <= cache->kasan_info.alloc_meta_offset ||
423 *size <= cache->kasan_info.free_meta_offset) {
424 cache->kasan_info.alloc_meta_offset = 0;
425 cache->kasan_info.free_meta_offset = 0;
430 *flags |= SLAB_KASAN;
433 void kasan_cache_shrink(struct kmem_cache *cache)
435 quarantine_remove_cache(cache);
438 void kasan_cache_destroy(struct kmem_cache *cache)
440 quarantine_remove_cache(cache);
443 size_t kasan_metadata_size(struct kmem_cache *cache)
445 return (cache->kasan_info.alloc_meta_offset ?
446 sizeof(struct kasan_alloc_meta) : 0) +
447 (cache->kasan_info.free_meta_offset ?
448 sizeof(struct kasan_free_meta) : 0);
451 void kasan_poison_slab(struct page *page)
453 kasan_poison_shadow(page_address(page),
454 PAGE_SIZE << compound_order(page),
455 KASAN_KMALLOC_REDZONE);
458 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
460 kasan_unpoison_shadow(object, cache->object_size);
463 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
465 kasan_poison_shadow(object,
466 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
467 KASAN_KMALLOC_REDZONE);
470 static inline int in_irqentry_text(unsigned long ptr)
472 return (ptr >= (unsigned long)&__irqentry_text_start &&
473 ptr < (unsigned long)&__irqentry_text_end) ||
474 (ptr >= (unsigned long)&__softirqentry_text_start &&
475 ptr < (unsigned long)&__softirqentry_text_end);
478 static inline void filter_irq_stacks(struct stack_trace *trace)
482 if (!trace->nr_entries)
484 for (i = 0; i < trace->nr_entries; i++)
485 if (in_irqentry_text(trace->entries[i])) {
486 /* Include the irqentry function into the stack. */
487 trace->nr_entries = i + 1;
492 static inline depot_stack_handle_t save_stack(gfp_t flags)
494 unsigned long entries[KASAN_STACK_DEPTH];
495 struct stack_trace trace = {
498 .max_entries = KASAN_STACK_DEPTH,
502 save_stack_trace(&trace);
503 filter_irq_stacks(&trace);
504 if (trace.nr_entries != 0 &&
505 trace.entries[trace.nr_entries-1] == ULONG_MAX)
508 return depot_save_stack(&trace, flags);
511 static inline void set_track(struct kasan_track *track, gfp_t flags)
513 track->pid = current->pid;
514 track->stack = save_stack(flags);
517 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
520 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
521 return (void *)object + cache->kasan_info.alloc_meta_offset;
524 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
527 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
528 return (void *)object + cache->kasan_info.free_meta_offset;
531 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
533 struct kasan_alloc_meta *alloc_info;
535 if (!(cache->flags & SLAB_KASAN))
538 alloc_info = get_alloc_info(cache, object);
539 __memset(alloc_info, 0, sizeof(*alloc_info));
542 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
544 kasan_kmalloc(cache, object, cache->object_size, flags);
547 static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
549 unsigned long size = cache->object_size;
550 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
552 /* RCU slabs could be legally used after free within the RCU period */
553 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
556 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
559 bool kasan_slab_free(struct kmem_cache *cache, void *object)
563 /* RCU slabs could be legally used after free within the RCU period */
564 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
567 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
568 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
569 kasan_report_double_free(cache, object, shadow_byte);
573 kasan_poison_slab_free(cache, object);
575 if (unlikely(!(cache->flags & SLAB_KASAN)))
578 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
579 quarantine_put(get_free_info(cache, object), cache);
583 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
586 unsigned long redzone_start;
587 unsigned long redzone_end;
589 if (gfpflags_allow_blocking(flags))
592 if (unlikely(object == NULL))
595 redzone_start = round_up((unsigned long)(object + size),
596 KASAN_SHADOW_SCALE_SIZE);
597 redzone_end = round_up((unsigned long)object + cache->object_size,
598 KASAN_SHADOW_SCALE_SIZE);
600 kasan_unpoison_shadow(object, size);
601 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
602 KASAN_KMALLOC_REDZONE);
604 if (cache->flags & SLAB_KASAN)
605 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
607 EXPORT_SYMBOL(kasan_kmalloc);
609 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
612 unsigned long redzone_start;
613 unsigned long redzone_end;
615 if (gfpflags_allow_blocking(flags))
618 if (unlikely(ptr == NULL))
621 page = virt_to_page(ptr);
622 redzone_start = round_up((unsigned long)(ptr + size),
623 KASAN_SHADOW_SCALE_SIZE);
624 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
626 kasan_unpoison_shadow(ptr, size);
627 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
631 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
635 if (unlikely(object == ZERO_SIZE_PTR))
638 page = virt_to_head_page(object);
640 if (unlikely(!PageSlab(page)))
641 kasan_kmalloc_large(object, size, flags);
643 kasan_kmalloc(page->slab_cache, object, size, flags);
646 void kasan_poison_kfree(void *ptr)
650 page = virt_to_head_page(ptr);
652 if (unlikely(!PageSlab(page)))
653 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
656 kasan_poison_slab_free(page->slab_cache, ptr);
659 void kasan_kfree_large(const void *ptr)
661 struct page *page = virt_to_page(ptr);
663 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
667 int kasan_module_alloc(void *addr, size_t size)
672 unsigned long shadow_start;
674 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
675 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
676 shadow_size = round_up(scaled_size, PAGE_SIZE);
678 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
681 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
682 shadow_start + shadow_size,
683 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
684 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
685 __builtin_return_address(0));
688 find_vm_area(addr)->flags |= VM_KASAN;
689 kmemleak_ignore(ret);
696 void kasan_free_shadow(const struct vm_struct *vm)
698 if (vm->flags & VM_KASAN)
699 vfree(kasan_mem_to_shadow(vm->addr));
702 static void register_global(struct kasan_global *global)
704 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
706 kasan_unpoison_shadow(global->beg, global->size);
708 kasan_poison_shadow(global->beg + aligned_size,
709 global->size_with_redzone - aligned_size,
710 KASAN_GLOBAL_REDZONE);
713 void __asan_register_globals(struct kasan_global *globals, size_t size)
717 for (i = 0; i < size; i++)
718 register_global(&globals[i]);
720 EXPORT_SYMBOL(__asan_register_globals);
722 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
725 EXPORT_SYMBOL(__asan_unregister_globals);
727 #define DEFINE_ASAN_LOAD_STORE(size) \
728 void __asan_load##size(unsigned long addr) \
730 check_memory_region_inline(addr, size, false, _RET_IP_);\
732 EXPORT_SYMBOL(__asan_load##size); \
733 __alias(__asan_load##size) \
734 void __asan_load##size##_noabort(unsigned long); \
735 EXPORT_SYMBOL(__asan_load##size##_noabort); \
736 void __asan_store##size(unsigned long addr) \
738 check_memory_region_inline(addr, size, true, _RET_IP_); \
740 EXPORT_SYMBOL(__asan_store##size); \
741 __alias(__asan_store##size) \
742 void __asan_store##size##_noabort(unsigned long); \
743 EXPORT_SYMBOL(__asan_store##size##_noabort)
745 DEFINE_ASAN_LOAD_STORE(1);
746 DEFINE_ASAN_LOAD_STORE(2);
747 DEFINE_ASAN_LOAD_STORE(4);
748 DEFINE_ASAN_LOAD_STORE(8);
749 DEFINE_ASAN_LOAD_STORE(16);
751 void __asan_loadN(unsigned long addr, size_t size)
753 check_memory_region(addr, size, false, _RET_IP_);
755 EXPORT_SYMBOL(__asan_loadN);
757 __alias(__asan_loadN)
758 void __asan_loadN_noabort(unsigned long, size_t);
759 EXPORT_SYMBOL(__asan_loadN_noabort);
761 void __asan_storeN(unsigned long addr, size_t size)
763 check_memory_region(addr, size, true, _RET_IP_);
765 EXPORT_SYMBOL(__asan_storeN);
767 __alias(__asan_storeN)
768 void __asan_storeN_noabort(unsigned long, size_t);
769 EXPORT_SYMBOL(__asan_storeN_noabort);
771 /* to shut up compiler complaints */
772 void __asan_handle_no_return(void) {}
773 EXPORT_SYMBOL(__asan_handle_no_return);
775 /* Emitted by compiler to poison large objects when they go out of scope. */
776 void __asan_poison_stack_memory(const void *addr, size_t size)
779 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
780 * by redzones, so we simply round up size to simplify logic.
782 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
783 KASAN_USE_AFTER_SCOPE);
785 EXPORT_SYMBOL(__asan_poison_stack_memory);
787 /* Emitted by compiler to unpoison large objects when they go into scope. */
788 void __asan_unpoison_stack_memory(const void *addr, size_t size)
790 kasan_unpoison_shadow(addr, size);
792 EXPORT_SYMBOL(__asan_unpoison_stack_memory);
794 #ifdef CONFIG_MEMORY_HOTPLUG
795 static int kasan_mem_notifier(struct notifier_block *nb,
796 unsigned long action, void *data)
798 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
801 static int __init kasan_memhotplug_init(void)
803 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
804 pr_info("Memory hot-add will be disabled\n");
806 hotplug_memory_notifier(kasan_mem_notifier, 0);
811 core_initcall(kasan_memhotplug_init);