1 // SPDX-License-Identifier: GPL-2.0
3 * KMSAN shadow implementation.
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
10 #include <asm/kmsan.h>
11 #include <asm/tlbflush.h>
12 #include <linux/cacheflush.h>
13 #include <linux/memblock.h>
14 #include <linux/mm_types.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
19 #include "../internal.h"
22 #define shadow_page_for(page) ((page)->kmsan_shadow)
24 #define origin_page_for(page) ((page)->kmsan_origin)
26 static void *shadow_ptr_for(struct page *page)
28 return page_address(shadow_page_for(page));
31 static void *origin_ptr_for(struct page *page)
33 return page_address(origin_page_for(page));
36 static bool page_has_metadata(struct page *page)
38 return shadow_page_for(page) && origin_page_for(page);
41 static void set_no_shadow_origin_page(struct page *page)
43 shadow_page_for(page) = NULL;
44 origin_page_for(page) = NULL;
48 * Dummy load and store pages to be used when the real metadata is unavailable.
49 * There are separate pages for loads and stores, so that every load returns a
50 * zero, and every store doesn't affect other loads.
52 static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
53 static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
55 static unsigned long vmalloc_meta(void *addr, bool is_origin)
57 unsigned long addr64 = (unsigned long)addr, off;
59 KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
60 if (kmsan_internal_is_vmalloc_addr(addr)) {
61 off = addr64 - VMALLOC_START;
62 return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
63 KMSAN_VMALLOC_SHADOW_START);
65 if (kmsan_internal_is_module_addr(addr)) {
66 off = addr64 - MODULES_VADDR;
67 return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
68 KMSAN_MODULES_SHADOW_START);
73 static struct page *virt_to_page_or_null(void *vaddr)
75 if (kmsan_virt_addr_valid(vaddr))
76 return virt_to_page(vaddr);
81 struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
84 struct shadow_origin_ptr ret;
88 * Even if we redirect this memory access to the dummy page, it will
91 KMSAN_WARN_ON(size > PAGE_SIZE);
96 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
97 shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
102 ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
107 /* Ignore this store. */
108 ret.shadow = dummy_store_page;
109 ret.origin = dummy_store_page;
111 /* This load will return zero. */
112 ret.shadow = dummy_load_page;
113 ret.origin = dummy_load_page;
119 * Obtain the shadow or origin pointer for the given address, or NULL if there's
120 * none. The caller must check the return value for being non-NULL if needed.
121 * The return value of this function should not depend on whether we're in the
124 void *kmsan_get_metadata(void *address, bool is_origin)
126 u64 addr = (u64)address, pad, off;
130 if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
131 pad = addr % KMSAN_ORIGIN_SIZE;
134 address = (void *)addr;
135 if (kmsan_internal_is_vmalloc_addr(address) ||
136 kmsan_internal_is_module_addr(address))
137 return (void *)vmalloc_meta(address, is_origin);
139 ret = arch_kmsan_get_meta_or_null(address, is_origin);
143 page = virt_to_page_or_null(address);
146 if (!page_has_metadata(page))
148 off = offset_in_page(addr);
150 return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
153 void kmsan_copy_page_meta(struct page *dst, struct page *src)
155 if (!kmsan_enabled || kmsan_in_runtime())
157 if (!dst || !page_has_metadata(dst))
159 if (!src || !page_has_metadata(src)) {
160 kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
165 kmsan_enter_runtime();
166 __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
167 __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
168 kmsan_leave_runtime();
170 EXPORT_SYMBOL(kmsan_copy_page_meta);
172 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
174 bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
175 struct page *shadow, *origin;
176 depot_stack_handle_t handle;
177 int pages = 1 << order;
182 shadow = shadow_page_for(page);
183 origin = origin_page_for(page);
186 __memset(page_address(shadow), 0, PAGE_SIZE * pages);
187 __memset(page_address(origin), 0, PAGE_SIZE * pages);
191 /* Zero pages allocated by the runtime should also be initialized. */
192 if (kmsan_in_runtime())
195 __memset(page_address(shadow), -1, PAGE_SIZE * pages);
196 kmsan_enter_runtime();
197 handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
198 kmsan_leave_runtime();
200 * Addresses are page-aligned, pages are contiguous, so it's ok
201 * to just fill the origin pages with @handle.
203 for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
204 ((depot_stack_handle_t *)page_address(origin))[i] = handle;
207 void kmsan_free_page(struct page *page, unsigned int order)
209 if (!kmsan_enabled || kmsan_in_runtime())
211 kmsan_enter_runtime();
212 kmsan_internal_poison_memory(page_address(page),
215 KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
216 kmsan_leave_runtime();
219 int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
220 pgprot_t prot, struct page **pages,
221 unsigned int page_shift)
223 unsigned long shadow_start, origin_start, shadow_end, origin_end;
224 struct page **s_pages, **o_pages;
225 int nr, mapped, err = 0;
230 shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
231 shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
235 nr = (end - start) / PAGE_SIZE;
236 s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
237 o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
238 if (!s_pages || !o_pages) {
242 for (int i = 0; i < nr; i++) {
243 s_pages[i] = shadow_page_for(pages[i]);
244 o_pages[i] = origin_page_for(pages[i]);
246 prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
249 origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
250 origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
251 kmsan_enter_runtime();
252 mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
253 s_pages, page_shift);
258 mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
259 o_pages, page_shift);
264 kmsan_leave_runtime();
265 flush_tlb_kernel_range(shadow_start, shadow_end);
266 flush_tlb_kernel_range(origin_start, origin_end);
267 flush_cache_vmap(shadow_start, shadow_end);
268 flush_cache_vmap(origin_start, origin_end);
276 /* Allocate metadata for pages allocated at boot time. */
277 void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
279 struct page *shadow_p, *origin_p;
280 void *shadow, *origin;
284 start = (void *)PAGE_ALIGN_DOWN((u64)start);
285 size = PAGE_ALIGN((u64)end - (u64)start);
286 shadow = memblock_alloc(size, PAGE_SIZE);
287 origin = memblock_alloc(size, PAGE_SIZE);
289 if (!shadow || !origin)
290 panic("%s: Failed to allocate metadata memory for early boot range of size %llu",
293 for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
294 page = virt_to_page_or_null((char *)start + addr);
295 shadow_p = virt_to_page((char *)shadow + addr);
296 set_no_shadow_origin_page(shadow_p);
297 shadow_page_for(page) = shadow_p;
298 origin_p = virt_to_page((char *)origin + addr);
299 set_no_shadow_origin_page(origin_p);
300 origin_page_for(page) = origin_p;
304 void kmsan_setup_meta(struct page *page, struct page *shadow,
305 struct page *origin, int order)
307 for (int i = 0; i < (1 << order); i++) {
308 set_no_shadow_origin_page(&shadow[i]);
309 set_no_shadow_origin_page(&origin[i]);
310 shadow_page_for(&page[i]) = &shadow[i];
311 origin_page_for(&page[i]) = &origin[i];