GNU Linux-libre 4.9.332-gnu1
[releases.git] / drivers / misc / lkdtm_usercopy.c
1 /*
2  * This is for all the tests related to copy_to_user() and copy_from_user()
3  * hardening.
4  */
5 #include "lkdtm.h"
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/mman.h>
9 #include <linux/uaccess.h>
10 #include <asm/cacheflush.h>
11
12 /*
13  * Many of the tests here end up using const sizes, but those would
14  * normally be ignored by hardened usercopy, so force the compiler
15  * into choosing the non-const path to make sure we trigger the
16  * hardened usercopy checks by added "unconst" to all the const copies,
17  * and making sure "cache_size" isn't optimized into a const.
18  */
19 static volatile size_t unconst = 0;
20 static volatile size_t cache_size = 1024;
21 static struct kmem_cache *bad_cache;
22
23 static const unsigned char test_text[] = "This is a test.\n";
24
25 /*
26  * Instead of adding -Wno-return-local-addr, just pass the stack address
27  * through a function to obfuscate it from the compiler.
28  */
29 static noinline unsigned char *trick_compiler(unsigned char *stack)
30 {
31         return stack + unconst;
32 }
33
34 static noinline unsigned char *do_usercopy_stack_callee(int value)
35 {
36         unsigned char buf[128];
37         int i;
38
39         /* Exercise stack to avoid everything living in registers. */
40         for (i = 0; i < sizeof(buf); i++) {
41                 buf[i] = value & 0xff;
42         }
43
44         /*
45          * Put the target buffer in the middle of stack allocation
46          * so that we don't step on future stack users regardless
47          * of stack growth direction.
48          */
49         return trick_compiler(&buf[(128/2)-32]);
50 }
51
52 static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
53 {
54         unsigned long user_addr;
55         unsigned char good_stack[32];
56         unsigned char *bad_stack;
57         int i;
58
59         /* Exercise stack to avoid everything living in registers. */
60         for (i = 0; i < sizeof(good_stack); i++)
61                 good_stack[i] = test_text[i % sizeof(test_text)];
62
63         /* This is a pointer to outside our current stack frame. */
64         if (bad_frame) {
65                 bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
66         } else {
67                 /* Put start address just inside stack. */
68                 bad_stack = task_stack_page(current) + THREAD_SIZE;
69                 bad_stack -= sizeof(unsigned long);
70         }
71
72 #ifdef ARCH_HAS_CURRENT_STACK_POINTER
73         pr_info("stack     : %px\n", (void *)current_stack_pointer);
74 #endif
75         pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
76         pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
77
78         user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
79                             PROT_READ | PROT_WRITE | PROT_EXEC,
80                             MAP_ANONYMOUS | MAP_PRIVATE, 0);
81         if (user_addr >= TASK_SIZE) {
82                 pr_warn("Failed to allocate user memory\n");
83                 return;
84         }
85
86         if (to_user) {
87                 pr_info("attempting good copy_to_user of local stack\n");
88                 if (copy_to_user((void __user *)user_addr, good_stack,
89                                  unconst + sizeof(good_stack))) {
90                         pr_warn("copy_to_user failed unexpectedly?!\n");
91                         goto free_user;
92                 }
93
94                 pr_info("attempting bad copy_to_user of distant stack\n");
95                 if (copy_to_user((void __user *)user_addr, bad_stack,
96                                  unconst + sizeof(good_stack))) {
97                         pr_warn("copy_to_user failed, but lacked Oops\n");
98                         goto free_user;
99                 }
100         } else {
101                 /*
102                  * There isn't a safe way to not be protected by usercopy
103                  * if we're going to write to another thread's stack.
104                  */
105                 if (!bad_frame)
106                         goto free_user;
107
108                 pr_info("attempting good copy_from_user of local stack\n");
109                 if (copy_from_user(good_stack, (void __user *)user_addr,
110                                    unconst + sizeof(good_stack))) {
111                         pr_warn("copy_from_user failed unexpectedly?!\n");
112                         goto free_user;
113                 }
114
115                 pr_info("attempting bad copy_from_user of distant stack\n");
116                 if (copy_from_user(bad_stack, (void __user *)user_addr,
117                                    unconst + sizeof(good_stack))) {
118                         pr_warn("copy_from_user failed, but lacked Oops\n");
119                         goto free_user;
120                 }
121         }
122
123 free_user:
124         vm_munmap(user_addr, PAGE_SIZE);
125 }
126
127 static void do_usercopy_heap_size(bool to_user)
128 {
129         unsigned long user_addr;
130         unsigned char *one, *two;
131         size_t size = unconst + 1024;
132
133         one = kmalloc(size, GFP_KERNEL);
134         two = kmalloc(size, GFP_KERNEL);
135         if (!one || !two) {
136                 pr_warn("Failed to allocate kernel memory\n");
137                 goto free_kernel;
138         }
139
140         user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
141                             PROT_READ | PROT_WRITE | PROT_EXEC,
142                             MAP_ANONYMOUS | MAP_PRIVATE, 0);
143         if (user_addr >= TASK_SIZE) {
144                 pr_warn("Failed to allocate user memory\n");
145                 goto free_kernel;
146         }
147
148         memset(one, 'A', size);
149         memset(two, 'B', size);
150
151         if (to_user) {
152                 pr_info("attempting good copy_to_user of correct size\n");
153                 if (copy_to_user((void __user *)user_addr, one, size)) {
154                         pr_warn("copy_to_user failed unexpectedly?!\n");
155                         goto free_user;
156                 }
157
158                 pr_info("attempting bad copy_to_user of too large size\n");
159                 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
160                         pr_warn("copy_to_user failed, but lacked Oops\n");
161                         goto free_user;
162                 }
163         } else {
164                 pr_info("attempting good copy_from_user of correct size\n");
165                 if (copy_from_user(one, (void __user *)user_addr, size)) {
166                         pr_warn("copy_from_user failed unexpectedly?!\n");
167                         goto free_user;
168                 }
169
170                 pr_info("attempting bad copy_from_user of too large size\n");
171                 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
172                         pr_warn("copy_from_user failed, but lacked Oops\n");
173                         goto free_user;
174                 }
175         }
176
177 free_user:
178         vm_munmap(user_addr, PAGE_SIZE);
179 free_kernel:
180         kfree(one);
181         kfree(two);
182 }
183
184 static void do_usercopy_heap_flag(bool to_user)
185 {
186         unsigned long user_addr;
187         unsigned char *good_buf = NULL;
188         unsigned char *bad_buf = NULL;
189
190         /* Make sure cache was prepared. */
191         if (!bad_cache) {
192                 pr_warn("Failed to allocate kernel cache\n");
193                 return;
194         }
195
196         /*
197          * Allocate one buffer from each cache (kmalloc will have the
198          * SLAB_USERCOPY flag already, but "bad_cache" won't).
199          */
200         good_buf = kmalloc(cache_size, GFP_KERNEL);
201         bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
202         if (!good_buf || !bad_buf) {
203                 pr_warn("Failed to allocate buffers from caches\n");
204                 goto free_alloc;
205         }
206
207         /* Allocate user memory we'll poke at. */
208         user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
209                             PROT_READ | PROT_WRITE | PROT_EXEC,
210                             MAP_ANONYMOUS | MAP_PRIVATE, 0);
211         if (user_addr >= TASK_SIZE) {
212                 pr_warn("Failed to allocate user memory\n");
213                 goto free_alloc;
214         }
215
216         memset(good_buf, 'A', cache_size);
217         memset(bad_buf, 'B', cache_size);
218
219         if (to_user) {
220                 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
221                 if (copy_to_user((void __user *)user_addr, good_buf,
222                                  cache_size)) {
223                         pr_warn("copy_to_user failed unexpectedly?!\n");
224                         goto free_user;
225                 }
226
227                 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
228                 if (copy_to_user((void __user *)user_addr, bad_buf,
229                                  cache_size)) {
230                         pr_warn("copy_to_user failed, but lacked Oops\n");
231                         goto free_user;
232                 }
233         } else {
234                 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
235                 if (copy_from_user(good_buf, (void __user *)user_addr,
236                                    cache_size)) {
237                         pr_warn("copy_from_user failed unexpectedly?!\n");
238                         goto free_user;
239                 }
240
241                 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
242                 if (copy_from_user(bad_buf, (void __user *)user_addr,
243                                    cache_size)) {
244                         pr_warn("copy_from_user failed, but lacked Oops\n");
245                         goto free_user;
246                 }
247         }
248
249 free_user:
250         vm_munmap(user_addr, PAGE_SIZE);
251 free_alloc:
252         if (bad_buf)
253                 kmem_cache_free(bad_cache, bad_buf);
254         kfree(good_buf);
255 }
256
257 /* Callable tests. */
258 void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
259 {
260         do_usercopy_heap_size(true);
261 }
262
263 void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
264 {
265         do_usercopy_heap_size(false);
266 }
267
268 void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
269 {
270         do_usercopy_heap_flag(true);
271 }
272
273 void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
274 {
275         do_usercopy_heap_flag(false);
276 }
277
278 void lkdtm_USERCOPY_STACK_FRAME_TO(void)
279 {
280         do_usercopy_stack(true, true);
281 }
282
283 void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
284 {
285         do_usercopy_stack(false, true);
286 }
287
288 void lkdtm_USERCOPY_STACK_BEYOND(void)
289 {
290         do_usercopy_stack(true, false);
291 }
292
293 void lkdtm_USERCOPY_KERNEL(void)
294 {
295         unsigned long user_addr;
296
297         user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
298                             PROT_READ | PROT_WRITE | PROT_EXEC,
299                             MAP_ANONYMOUS | MAP_PRIVATE, 0);
300         if (user_addr >= TASK_SIZE) {
301                 pr_warn("Failed to allocate user memory\n");
302                 return;
303         }
304
305         pr_info("attempting good copy_to_user from kernel rodata\n");
306         if (copy_to_user((void __user *)user_addr, test_text,
307                          unconst + sizeof(test_text))) {
308                 pr_warn("copy_to_user failed unexpectedly?!\n");
309                 goto free_user;
310         }
311
312         pr_info("attempting bad copy_to_user from kernel text\n");
313         if (copy_to_user((void __user *)user_addr, vm_mmap,
314                          unconst + PAGE_SIZE)) {
315                 pr_warn("copy_to_user failed, but lacked Oops\n");
316                 goto free_user;
317         }
318
319 free_user:
320         vm_munmap(user_addr, PAGE_SIZE);
321 }
322
323 void __init lkdtm_usercopy_init(void)
324 {
325         /* Prepare cache that lacks SLAB_USERCOPY flag. */
326         bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
327                                       0, NULL);
328 }
329
330 void __exit lkdtm_usercopy_exit(void)
331 {
332         kmem_cache_destroy(bad_cache);
333 }