2 * arch/arm/include/asm/uaccess.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
12 * User space memory access functions
14 #include <linux/string.h>
15 #include <linux/thread_info.h>
16 #include <asm/errno.h>
17 #include <asm/memory.h>
18 #include <asm/domain.h>
19 #include <asm/unified.h>
20 #include <asm/compiler.h>
22 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23 #include <asm-generic/uaccess-unaligned.h>
25 #define __get_user_unaligned __get_user
26 #define __put_user_unaligned __put_user
30 #define VERIFY_WRITE 1
33 * The exception table consists of pairs of addresses: the first is the
34 * address of an instruction that is allowed to fault, and the second is
35 * the address at which the program should continue. No registers are
36 * modified, so it is entirely up to the continuation code to figure out
39 * All the routines below use bits of fixup code that are out of line
40 * with the main instruction path. This means when everything is well,
41 * we don't even have to jump over them. Further, they do not intrude
42 * on our cache or tlb entries.
45 struct exception_table_entry
47 unsigned long insn, fixup;
50 extern int fixup_exception(struct pt_regs *regs);
53 * These two functions allow hooking accesses to userspace to increase
54 * system integrity by ensuring that the kernel can not inadvertantly
55 * perform such accesses (eg, via list poison values) which could then
56 * be exploited for priviledge escalation.
58 static inline unsigned int uaccess_save_and_enable(void)
60 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
61 unsigned int old_domain = get_domain();
63 /* Set the current domain access to permit user accesses */
64 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
73 static inline void uaccess_restore(unsigned int flags)
75 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 /* Restore the user access mask */
82 * These two are intentionally not defined anywhere - if the kernel
83 * code generates any references to them, that's a bug.
85 extern int __get_user_bad(void);
86 extern int __put_user_bad(void);
89 * Note that this is actually 0x1,0000,0000
91 #define KERNEL_DS 0x00000000
92 #define get_ds() (KERNEL_DS)
96 #define USER_DS TASK_SIZE
97 #define get_fs() (current_thread_info()->addr_limit)
99 static inline void set_fs(mm_segment_t fs)
101 current_thread_info()->addr_limit = fs;
104 * Prevent a mispredicted conditional call to set_fs from forwarding
105 * the wrong address limit to access_ok under speculation.
110 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
113 #define segment_eq(a, b) ((a) == (b))
115 /* We use 33-bit arithmetic here... */
116 #define __range_ok(addr, size) ({ \
117 unsigned long flag, roksum; \
118 __chk_user_ptr(addr); \
119 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
120 : "=&r" (flag), "=&r" (roksum) \
121 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
126 * This is a type: either unsigned long, if the argument fits into
127 * that type, or otherwise unsigned long long.
129 #define __inttype(x) \
130 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
133 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
134 * is above the current addr_limit.
136 #define uaccess_mask_range_ptr(ptr, size) \
137 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
138 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
141 void __user *safe_ptr = (void __user *)ptr;
147 " addhs %1, %1, #1\n"
148 " subhss %1, %1, %2\n"
150 : "+r" (safe_ptr), "=&r" (tmp)
151 : "r" (size), "r" (current_thread_info()->addr_limit)
159 * Single-value transfer routines. They automatically use the right
160 * size if we just have the right pointer type. Note that the functions
161 * which read from user space (*get_*) need to take care not to leak
162 * kernel data even if the calling code is buggy and fails to check
163 * the return value. This means zeroing out the destination variable
164 * or buffer on error. Normally this is done out of line by the
165 * fixup code, but there are a few places where it intrudes on the
166 * main code path. When we only write to user space, there is no
169 extern int __get_user_1(void *);
170 extern int __get_user_2(void *);
171 extern int __get_user_4(void *);
172 extern int __get_user_32t_8(void *);
173 extern int __get_user_8(void *);
174 extern int __get_user_64t_1(void *);
175 extern int __get_user_64t_2(void *);
176 extern int __get_user_64t_4(void *);
178 #define __GUP_CLOBBER_1 "lr", "cc"
179 #ifdef CONFIG_CPU_USE_DOMAINS
180 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
182 #define __GUP_CLOBBER_2 "lr", "cc"
184 #define __GUP_CLOBBER_4 "lr", "cc"
185 #define __GUP_CLOBBER_32t_8 "lr", "cc"
186 #define __GUP_CLOBBER_8 "lr", "cc"
188 #define __get_user_x(__r2, __p, __e, __l, __s) \
189 __asm__ __volatile__ ( \
190 __asmeq("%0", "r0") __asmeq("%1", "r2") \
191 __asmeq("%3", "r1") \
192 "bl __get_user_" #__s \
193 : "=&r" (__e), "=r" (__r2) \
194 : "0" (__p), "r" (__l) \
195 : __GUP_CLOBBER_##__s)
197 /* narrowing a double-word get into a single 32bit word register: */
199 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
200 __get_user_x(__r2, __p, __e, __l, 32t_8)
202 #define __get_user_x_32t __get_user_x
206 * storing result into proper least significant word of 64bit target var,
207 * different only for big endian case where 64 bit __r2 lsw is r3:
210 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
211 __asm__ __volatile__ ( \
212 __asmeq("%0", "r0") __asmeq("%1", "r2") \
213 __asmeq("%3", "r1") \
214 "bl __get_user_64t_" #__s \
215 : "=&r" (__e), "=r" (__r2) \
216 : "0" (__p), "r" (__l) \
217 : __GUP_CLOBBER_##__s)
219 #define __get_user_x_64t __get_user_x
223 #define __get_user_check(x, p) \
225 unsigned long __limit = current_thread_info()->addr_limit - 1; \
226 register const typeof(*(p)) __user *__p asm("r0") = (p);\
227 register __inttype(x) __r2 asm("r2"); \
228 register unsigned long __l asm("r1") = __limit; \
229 register int __e asm("r0"); \
230 unsigned int __ua_flags = uaccess_save_and_enable(); \
231 switch (sizeof(*(__p))) { \
233 if (sizeof((x)) >= 8) \
234 __get_user_x_64t(__r2, __p, __e, __l, 1); \
236 __get_user_x(__r2, __p, __e, __l, 1); \
239 if (sizeof((x)) >= 8) \
240 __get_user_x_64t(__r2, __p, __e, __l, 2); \
242 __get_user_x(__r2, __p, __e, __l, 2); \
245 if (sizeof((x)) >= 8) \
246 __get_user_x_64t(__r2, __p, __e, __l, 4); \
248 __get_user_x(__r2, __p, __e, __l, 4); \
251 if (sizeof((x)) < 8) \
252 __get_user_x_32t(__r2, __p, __e, __l, 4); \
254 __get_user_x(__r2, __p, __e, __l, 8); \
256 default: __e = __get_user_bad(); break; \
258 uaccess_restore(__ua_flags); \
259 x = (typeof(*(p))) __r2; \
263 #define get_user(x, p) \
266 __get_user_check(x, p); \
269 extern int __put_user_1(void *, unsigned int);
270 extern int __put_user_2(void *, unsigned int);
271 extern int __put_user_4(void *, unsigned int);
272 extern int __put_user_8(void *, unsigned long long);
274 #define __put_user_check(__pu_val, __ptr, __err, __s) \
276 unsigned long __limit = current_thread_info()->addr_limit - 1; \
277 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
278 register const void __user *__p asm("r0") = __ptr; \
279 register unsigned long __l asm("r1") = __limit; \
280 register int __e asm("r0"); \
281 __asm__ __volatile__ ( \
282 __asmeq("%0", "r0") __asmeq("%2", "r2") \
283 __asmeq("%3", "r1") \
284 "bl __put_user_" #__s \
286 : "0" (__p), "r" (__r2), "r" (__l) \
287 : "ip", "lr", "cc"); \
291 #else /* CONFIG_MMU */
294 * uClinux has only one addr space, so has simplified address limits.
296 #define USER_DS KERNEL_DS
298 #define segment_eq(a, b) (1)
299 #define __addr_ok(addr) ((void)(addr), 1)
300 #define __range_ok(addr, size) ((void)(addr), 0)
301 #define get_fs() (KERNEL_DS)
303 static inline void set_fs(mm_segment_t fs)
307 #define get_user(x, p) __get_user(x, p)
308 #define __put_user_check __put_user_nocheck
310 #endif /* CONFIG_MMU */
312 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
314 #define user_addr_max() \
315 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
317 #ifdef CONFIG_CPU_SPECTRE
319 * When mitigating Spectre variant 1, it is not worth fixing the non-
320 * verifying accessors, because we need to add verification of the
321 * address space there. Force these to use the standard get_user()
324 #define __get_user(x, ptr) get_user(x, ptr)
328 * The "__xxx" versions of the user access functions do not verify the
329 * address space - it must have been done previously with a separate
330 * "access_ok()" call.
332 * The "xxx_error" versions set the third argument to EFAULT if an
333 * error occurs, and leave it unchanged on success. Note that these
334 * versions are void (ie, don't return a value as such).
336 #define __get_user(x, ptr) \
339 __get_user_err((x), (ptr), __gu_err); \
343 #define __get_user_err(x, ptr, err) \
345 unsigned long __gu_addr = (unsigned long)(ptr); \
346 unsigned long __gu_val; \
347 unsigned int __ua_flags; \
348 __chk_user_ptr(ptr); \
350 __ua_flags = uaccess_save_and_enable(); \
351 switch (sizeof(*(ptr))) { \
352 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
353 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
354 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
355 default: (__gu_val) = __get_user_bad(); \
357 uaccess_restore(__ua_flags); \
358 (x) = (__typeof__(*(ptr)))__gu_val; \
361 #define __get_user_asm(x, addr, err, instr) \
362 __asm__ __volatile__( \
363 "1: " TUSER(instr) " %1, [%2], #0\n" \
365 " .pushsection .text.fixup,\"ax\"\n" \
371 " .pushsection __ex_table,\"a\"\n" \
375 : "+r" (err), "=&r" (x) \
376 : "r" (addr), "i" (-EFAULT) \
379 #define __get_user_asm_byte(x, addr, err) \
380 __get_user_asm(x, addr, err, ldrb)
382 #if __LINUX_ARM_ARCH__ >= 6
384 #define __get_user_asm_half(x, addr, err) \
385 __get_user_asm(x, addr, err, ldrh)
390 #define __get_user_asm_half(x, __gu_addr, err) \
392 unsigned long __b1, __b2; \
393 __get_user_asm_byte(__b1, __gu_addr, err); \
394 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
395 (x) = __b1 | (__b2 << 8); \
398 #define __get_user_asm_half(x, __gu_addr, err) \
400 unsigned long __b1, __b2; \
401 __get_user_asm_byte(__b1, __gu_addr, err); \
402 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
403 (x) = (__b1 << 8) | __b2; \
407 #endif /* __LINUX_ARM_ARCH__ >= 6 */
409 #define __get_user_asm_word(x, addr, err) \
410 __get_user_asm(x, addr, err, ldr)
414 #define __put_user_switch(x, ptr, __err, __fn) \
416 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
417 __typeof__(*(ptr)) __pu_val = (x); \
418 unsigned int __ua_flags; \
420 __ua_flags = uaccess_save_and_enable(); \
421 switch (sizeof(*(ptr))) { \
422 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
423 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
424 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
425 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
426 default: __err = __put_user_bad(); break; \
428 uaccess_restore(__ua_flags); \
431 #define put_user(x, ptr) \
434 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
438 #ifdef CONFIG_CPU_SPECTRE
440 * When mitigating Spectre variant 1.1, all accessors need to include
441 * verification of the address space.
443 #define __put_user(x, ptr) put_user(x, ptr)
446 #define __put_user(x, ptr) \
449 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
453 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
455 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
456 __put_user_nocheck_##__size(x, __pu_addr, __err); \
459 #define __put_user_nocheck_1 __put_user_asm_byte
460 #define __put_user_nocheck_2 __put_user_asm_half
461 #define __put_user_nocheck_4 __put_user_asm_word
462 #define __put_user_nocheck_8 __put_user_asm_dword
464 #define __put_user_asm(x, __pu_addr, err, instr) \
465 __asm__ __volatile__( \
466 "1: " TUSER(instr) " %1, [%2], #0\n" \
468 " .pushsection .text.fixup,\"ax\"\n" \
473 " .pushsection __ex_table,\"a\"\n" \
478 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
481 #define __put_user_asm_byte(x, __pu_addr, err) \
482 __put_user_asm(x, __pu_addr, err, strb)
484 #if __LINUX_ARM_ARCH__ >= 6
486 #define __put_user_asm_half(x, __pu_addr, err) \
487 __put_user_asm(x, __pu_addr, err, strh)
492 #define __put_user_asm_half(x, __pu_addr, err) \
494 unsigned long __temp = (__force unsigned long)(x); \
495 __put_user_asm_byte(__temp, __pu_addr, err); \
496 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
499 #define __put_user_asm_half(x, __pu_addr, err) \
501 unsigned long __temp = (__force unsigned long)(x); \
502 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
503 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
507 #endif /* __LINUX_ARM_ARCH__ >= 6 */
509 #define __put_user_asm_word(x, __pu_addr, err) \
510 __put_user_asm(x, __pu_addr, err, str)
513 #define __reg_oper0 "%R2"
514 #define __reg_oper1 "%Q2"
516 #define __reg_oper0 "%Q2"
517 #define __reg_oper1 "%R2"
520 #define __put_user_asm_dword(x, __pu_addr, err) \
521 __asm__ __volatile__( \
522 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
523 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
524 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
525 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
527 " .pushsection .text.fixup,\"ax\"\n" \
532 " .pushsection __ex_table,\"a\"\n" \
537 : "+r" (err), "+r" (__pu_addr) \
538 : "r" (x), "i" (-EFAULT) \
541 #endif /* !CONFIG_CPU_SPECTRE */
544 extern unsigned long __must_check
545 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
547 static inline unsigned long __must_check
548 __arch_copy_from_user(void *to, const void __user *from, unsigned long n)
550 unsigned int __ua_flags;
552 __ua_flags = uaccess_save_and_enable();
553 n = arm_copy_from_user(to, from, n);
554 uaccess_restore(__ua_flags);
558 extern unsigned long __must_check
559 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
560 extern unsigned long __must_check
561 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
563 static inline unsigned long __must_check
564 __arch_copy_to_user(void __user *to, const void *from, unsigned long n)
566 #ifndef CONFIG_UACCESS_WITH_MEMCPY
567 unsigned int __ua_flags;
568 __ua_flags = uaccess_save_and_enable();
569 n = arm_copy_to_user(to, from, n);
570 uaccess_restore(__ua_flags);
573 return arm_copy_to_user(to, from, n);
577 extern unsigned long __must_check
578 arm_clear_user(void __user *addr, unsigned long n);
579 extern unsigned long __must_check
580 __clear_user_std(void __user *addr, unsigned long n);
582 static inline unsigned long __must_check
583 __clear_user(void __user *addr, unsigned long n)
585 unsigned int __ua_flags = uaccess_save_and_enable();
586 n = arm_clear_user(addr, n);
587 uaccess_restore(__ua_flags);
592 #define __arch_copy_from_user(to, from, n) \
593 (memcpy(to, (void __force *)from, n), 0)
594 #define __arch_copy_to_user(to, from, n) \
595 (memcpy((void __force *)to, from, n), 0)
596 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
599 static inline unsigned long __must_check
600 __copy_from_user(void *to, const void __user *from, unsigned long n)
602 check_object_size(to, n, false);
603 return __arch_copy_from_user(to, from, n);
606 static inline unsigned long __must_check
607 copy_from_user(void *to, const void __user *from, unsigned long n)
609 unsigned long res = n;
611 check_object_size(to, n, false);
613 if (likely(access_ok(VERIFY_READ, from, n)))
614 res = __arch_copy_from_user(to, from, n);
616 memset(to + (n - res), 0, res);
620 static inline unsigned long __must_check
621 __copy_to_user(void __user *to, const void *from, unsigned long n)
623 check_object_size(from, n, true);
625 return __arch_copy_to_user(to, from, n);
628 static inline unsigned long __must_check
629 copy_to_user(void __user *to, const void *from, unsigned long n)
631 check_object_size(from, n, true);
633 if (access_ok(VERIFY_WRITE, to, n))
634 n = __arch_copy_to_user(to, from, n);
638 #define __copy_to_user_inatomic __copy_to_user
639 #define __copy_from_user_inatomic __copy_from_user
641 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
643 if (access_ok(VERIFY_WRITE, to, n))
644 n = __clear_user(to, n);
648 /* These are from lib/ code, and use __get_user() and friends */
649 extern long strncpy_from_user(char *dest, const char __user *src, long count);
651 extern __must_check long strlen_user(const char __user *str);
652 extern __must_check long strnlen_user(const char __user *str, long n);
654 #endif /* _ASMARM_UACCESS_H */