GNU Linux-libre 6.9-gnu
[releases.git] / mm / kasan / tags.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common tag-based KASAN code.
4  *
5  * Copyright (c) 2018 Google, Inc.
6  * Copyright (c) 2020 Google, Inc.
7  */
8
9 #include <linux/atomic.h>
10 #include <linux/init.h>
11 #include <linux/kasan.h>
12 #include <linux/kernel.h>
13 #include <linux/memblock.h>
14 #include <linux/memory.h>
15 #include <linux/mm.h>
16 #include <linux/sched/clock.h>
17 #include <linux/stackdepot.h>
18 #include <linux/static_key.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21
22 #include "kasan.h"
23 #include "../slab.h"
24
25 #define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
26
27 enum kasan_arg_stacktrace {
28         KASAN_ARG_STACKTRACE_DEFAULT,
29         KASAN_ARG_STACKTRACE_OFF,
30         KASAN_ARG_STACKTRACE_ON,
31 };
32
33 static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
34
35 /* Whether to collect alloc/free stack traces. */
36 DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
37
38 /* Non-zero, as initial pointer values are 0. */
39 #define STACK_RING_BUSY_PTR ((void *)1)
40
41 struct kasan_stack_ring stack_ring = {
42         .lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
43 };
44
45 /* kasan.stacktrace=off/on */
46 static int __init early_kasan_flag_stacktrace(char *arg)
47 {
48         if (!arg)
49                 return -EINVAL;
50
51         if (!strcmp(arg, "off"))
52                 kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
53         else if (!strcmp(arg, "on"))
54                 kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
55         else
56                 return -EINVAL;
57
58         return 0;
59 }
60 early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
61
62 /* kasan.stack_ring_size=<number of entries> */
63 static int __init early_kasan_flag_stack_ring_size(char *arg)
64 {
65         if (!arg)
66                 return -EINVAL;
67
68         return kstrtoul(arg, 0, &stack_ring.size);
69 }
70 early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
71
72 void __init kasan_init_tags(void)
73 {
74         switch (kasan_arg_stacktrace) {
75         case KASAN_ARG_STACKTRACE_DEFAULT:
76                 /* Default is specified by kasan_flag_stacktrace definition. */
77                 break;
78         case KASAN_ARG_STACKTRACE_OFF:
79                 static_branch_disable(&kasan_flag_stacktrace);
80                 break;
81         case KASAN_ARG_STACKTRACE_ON:
82                 static_branch_enable(&kasan_flag_stacktrace);
83                 break;
84         }
85
86         if (kasan_stack_collection_enabled()) {
87                 if (!stack_ring.size)
88                         stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
89                 stack_ring.entries = memblock_alloc(
90                         sizeof(stack_ring.entries[0]) * stack_ring.size,
91                         SMP_CACHE_BYTES);
92                 if (WARN_ON(!stack_ring.entries))
93                         static_branch_disable(&kasan_flag_stacktrace);
94         }
95 }
96
97 static void save_stack_info(struct kmem_cache *cache, void *object,
98                         gfp_t gfp_flags, bool is_free)
99 {
100         unsigned long flags;
101         depot_stack_handle_t stack, old_stack;
102         u64 pos;
103         struct kasan_stack_ring_entry *entry;
104         void *old_ptr;
105
106         stack = kasan_save_stack(gfp_flags,
107                         STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
108
109         /*
110          * Prevent save_stack_info() from modifying stack ring
111          * when kasan_complete_mode_report_info() is walking it.
112          */
113         read_lock_irqsave(&stack_ring.lock, flags);
114
115 next:
116         pos = atomic64_fetch_add(1, &stack_ring.pos);
117         entry = &stack_ring.entries[pos % stack_ring.size];
118
119         /* Detect stack ring entry slots that are being written to. */
120         old_ptr = READ_ONCE(entry->ptr);
121         if (old_ptr == STACK_RING_BUSY_PTR)
122                 goto next; /* Busy slot. */
123         if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
124                 goto next; /* Busy slot. */
125
126         old_stack = entry->track.stack;
127
128         entry->size = cache->object_size;
129         kasan_set_track(&entry->track, stack);
130         entry->is_free = is_free;
131
132         entry->ptr = object;
133
134         read_unlock_irqrestore(&stack_ring.lock, flags);
135
136         if (old_stack)
137                 stack_depot_put(old_stack);
138 }
139
140 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
141 {
142         save_stack_info(cache, object, flags, false);
143 }
144
145 void kasan_save_free_info(struct kmem_cache *cache, void *object)
146 {
147         save_stack_info(cache, object, 0, true);
148 }