1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
4 * User space memory access functions
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/thread_info.h>
10 #include <linux/string.h>
14 #include <asm/extable.h>
17 #define VERIFY_WRITE 1
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
24 * For historical reasons, these macros are grossly misnamed.
27 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
29 #define KERNEL_DS MAKE_MM_SEG(-1UL)
30 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
32 #define get_ds() (KERNEL_DS)
33 #define get_fs() (current->thread.addr_limit)
34 #define set_fs(x) (current->thread.addr_limit = (x))
36 #define segment_eq(a, b) ((a).seg == (b).seg)
38 #define user_addr_max() (current->thread.addr_limit.seg)
39 #define __addr_ok(addr) \
40 ((unsigned long __force)(addr) < user_addr_max())
43 * Test whether a block of memory is a valid user space address.
44 * Returns 0 if the range is valid, nonzero otherwise.
46 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
49 * If we have used "sizeof()" for the size,
50 * we know it won't overflow the limit (but
51 * it might overflow the 'addr', so it's
52 * important to subtract the size from the
53 * limit, not add it to the address).
55 if (__builtin_constant_p(size))
56 return unlikely(addr > limit - size);
58 /* Arbitrary sizes? Be careful about overflow */
60 if (unlikely(addr < size))
62 return unlikely(addr > limit);
65 #define __range_not_ok(addr, size, limit) \
67 __chk_user_ptr(addr); \
68 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
71 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
72 # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
74 # define WARN_ON_IN_IRQ()
78 * access_ok: - Checks if a user space pointer is valid
79 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
80 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
81 * to write to a block, it is always safe to read from it.
82 * @addr: User space pointer to start of block to check
83 * @size: Size of block to check
85 * Context: User context only. This function may sleep if pagefaults are
88 * Checks if a pointer to a block of memory in user space is valid.
90 * Returns true (nonzero) if the memory block may be valid, false (zero)
91 * if it is definitely invalid.
93 * Note that, depending on architecture, this function probably just
94 * checks that the pointer is in the user space range - after calling
95 * this function, memory access functions may still return -EFAULT.
97 #define access_ok(type, addr, size) \
100 likely(!__range_not_ok(addr, size, user_addr_max())); \
104 * These are the main single-value transfer routines. They automatically
105 * use the right size if we just have the right pointer type.
107 * This gets kind of ugly. We want to return _two_ values in "get_user()"
108 * and yet we don't want to do any pointers, because that is too much
109 * of a performance impact. Thus we have a few rather ugly macros here,
110 * and hide all the ugliness from the user.
112 * The "__xxx" versions of the user access functions are versions that
113 * do not verify the address space, that must have been done previously
114 * with a separate "access_ok()" call (this is used when we do multiple
115 * accesses to the same area of user memory).
118 extern int __get_user_1(void);
119 extern int __get_user_2(void);
120 extern int __get_user_4(void);
121 extern int __get_user_8(void);
122 extern int __get_user_bad(void);
124 #define __uaccess_begin() stac()
125 #define __uaccess_end() clac()
126 #define __uaccess_begin_nospec() \
133 * This is a type: either unsigned long, if the argument fits into
134 * that type, or otherwise unsigned long long.
136 #define __inttype(x) \
137 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
140 * get_user: - Get a simple variable from user space.
141 * @x: Variable to store result.
142 * @ptr: Source address, in user space.
144 * Context: User context only. This function may sleep if pagefaults are
147 * This macro copies a single simple variable from user space to kernel
148 * space. It supports simple types like char and int, but not larger
149 * data types like structures or arrays.
151 * @ptr must have pointer-to-simple-variable type, and the result of
152 * dereferencing @ptr must be assignable to @x without a cast.
154 * Returns zero on success, or -EFAULT on error.
155 * On error, the variable @x is set to zero.
158 * Careful: we have to cast the result to the type of the pointer
161 * The use of _ASM_DX as the register specifier is a bit of a
162 * simplification, as gcc only cares about it as the starting point
163 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
164 * (%ecx being the next register in gcc's x86 register sequence), and
167 * Clang/LLVM cares about the size of the register, but still wants
168 * the base register for something that ends up being a pair.
170 #define get_user(x, ptr) \
173 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
174 register void *__sp asm(_ASM_SP); \
175 __chk_user_ptr(ptr); \
177 asm volatile("call __get_user_%P4" \
178 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
179 : "0" (ptr), "i" (sizeof(*(ptr)))); \
180 (x) = (__force __typeof__(*(ptr))) __val_gu; \
181 __builtin_expect(__ret_gu, 0); \
184 #define __put_user_x(size, x, ptr, __ret_pu) \
185 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
186 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
191 #define __put_user_asm_u64(x, addr, err, errret) \
193 "1: movl %%eax,0(%2)\n" \
194 "2: movl %%edx,4(%2)\n" \
196 ".section .fixup,\"ax\"\n" \
200 _ASM_EXTABLE(1b, 4b) \
201 _ASM_EXTABLE(2b, 4b) \
203 : "A" (x), "r" (addr), "i" (errret), "0" (err))
205 #define __put_user_asm_ex_u64(x, addr) \
207 "1: movl %%eax,0(%1)\n" \
208 "2: movl %%edx,4(%1)\n" \
210 _ASM_EXTABLE_EX(1b, 2b) \
211 _ASM_EXTABLE_EX(2b, 3b) \
212 : : "A" (x), "r" (addr))
214 #define __put_user_x8(x, ptr, __ret_pu) \
215 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
216 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
218 #define __put_user_asm_u64(x, ptr, retval, errret) \
219 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
220 #define __put_user_asm_ex_u64(x, addr) \
221 __put_user_asm_ex(x, addr, "q", "", "er")
222 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
225 extern void __put_user_bad(void);
228 * Strange magic calling convention: pointer in %ecx,
229 * value in %eax(:%edx), return value in %eax. clobbers %rbx
231 extern void __put_user_1(void);
232 extern void __put_user_2(void);
233 extern void __put_user_4(void);
234 extern void __put_user_8(void);
237 * put_user: - Write a simple value into user space.
238 * @x: Value to copy to user space.
239 * @ptr: Destination address, in user space.
241 * Context: User context only. This function may sleep if pagefaults are
244 * This macro copies a single simple value from kernel space to user
245 * space. It supports simple types like char and int, but not larger
246 * data types like structures or arrays.
248 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
249 * to the result of dereferencing @ptr.
251 * Returns zero on success, or -EFAULT on error.
253 #define put_user(x, ptr) \
256 __typeof__(*(ptr)) __pu_val; \
257 __chk_user_ptr(ptr); \
260 switch (sizeof(*(ptr))) { \
262 __put_user_x(1, __pu_val, ptr, __ret_pu); \
265 __put_user_x(2, __pu_val, ptr, __ret_pu); \
268 __put_user_x(4, __pu_val, ptr, __ret_pu); \
271 __put_user_x8(__pu_val, ptr, __ret_pu); \
274 __put_user_x(X, __pu_val, ptr, __ret_pu); \
277 __builtin_expect(__ret_pu, 0); \
280 #define __put_user_size(x, ptr, size, retval, errret) \
283 __chk_user_ptr(ptr); \
286 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
289 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
292 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
295 __put_user_asm_u64(x, ptr, retval, errret); \
303 * This doesn't do __uaccess_begin/end - the exception handling
304 * around it must do that.
306 #define __put_user_size_ex(x, ptr, size) \
308 __chk_user_ptr(ptr); \
311 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
314 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
317 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
320 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
328 #define __get_user_asm_u64(x, ptr, retval, errret) \
330 __typeof__(ptr) __ptr = (ptr); \
332 "1: movl %2,%%eax\n" \
333 "2: movl %3,%%edx\n" \
335 ".section .fixup,\"ax\"\n" \
337 " xorl %%eax,%%eax\n" \
338 " xorl %%edx,%%edx\n" \
341 _ASM_EXTABLE(1b, 4b) \
342 _ASM_EXTABLE(2b, 4b) \
343 : "=r" (retval), "=&A"(x) \
344 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
345 "i" (errret), "0" (retval)); \
348 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
350 #define __get_user_asm_u64(x, ptr, retval, errret) \
351 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
352 #define __get_user_asm_ex_u64(x, ptr) \
353 __get_user_asm_ex(x, ptr, "q", "", "=r")
356 #define __get_user_size(x, ptr, size, retval, errret) \
359 __chk_user_ptr(ptr); \
362 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
365 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
368 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
371 __get_user_asm_u64(x, ptr, retval, errret); \
374 (x) = __get_user_bad(); \
378 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
380 "1: mov"itype" %2,%"rtype"1\n" \
382 ".section .fixup,\"ax\"\n" \
384 " xor"itype" %"rtype"1,%"rtype"1\n" \
387 _ASM_EXTABLE(1b, 3b) \
388 : "=r" (err), ltype(x) \
389 : "m" (__m(addr)), "i" (errret), "0" (err))
392 * This doesn't do __uaccess_begin/end - the exception handling
393 * around it must do that.
395 #define __get_user_size_ex(x, ptr, size) \
397 __chk_user_ptr(ptr); \
400 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
403 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
406 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
409 __get_user_asm_ex_u64(x, ptr); \
412 (x) = __get_user_bad(); \
416 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
417 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
419 ".section .fixup,\"ax\"\n" \
420 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
423 _ASM_EXTABLE_EX(1b, 3b) \
424 : ltype(x) : "m" (__m(addr)))
426 #define __put_user_nocheck(x, ptr, size) \
429 __typeof__(*(ptr)) __pu_val; \
432 __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
434 __builtin_expect(__pu_err, 0); \
437 #define __get_user_nocheck(x, ptr, size) \
440 __inttype(*(ptr)) __gu_val; \
441 __typeof__(ptr) __gu_ptr = (ptr); \
442 __typeof__(size) __gu_size = (size); \
443 __uaccess_begin_nospec(); \
444 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
446 (x) = (__force __typeof__(*(ptr)))__gu_val; \
447 __builtin_expect(__gu_err, 0); \
450 /* FIXME: this hack is definitely wrong -AK */
451 struct __large_struct { unsigned long buf[100]; };
452 #define __m(x) (*(struct __large_struct __user *)(x))
455 * Tell gcc we read from memory instead of writing: this is because
456 * we do not write to any memory gcc knows about, so there are no
459 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
461 "1: mov"itype" %"rtype"1,%2\n" \
463 ".section .fixup,\"ax\"\n" \
467 _ASM_EXTABLE(1b, 3b) \
469 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
471 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
472 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
474 _ASM_EXTABLE_EX(1b, 2b) \
475 : : ltype(x), "m" (__m(addr)))
478 * uaccess_try and catch
480 #define uaccess_try do { \
481 current->thread.uaccess_err = 0; \
485 #define uaccess_try_nospec do { \
486 current->thread.uaccess_err = 0; \
487 __uaccess_begin_nospec(); \
489 #define uaccess_catch(err) \
491 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
495 * __get_user: - Get a simple variable from user space, with less checking.
496 * @x: Variable to store result.
497 * @ptr: Source address, in user space.
499 * Context: User context only. This function may sleep if pagefaults are
502 * This macro copies a single simple variable from user space to kernel
503 * space. It supports simple types like char and int, but not larger
504 * data types like structures or arrays.
506 * @ptr must have pointer-to-simple-variable type, and the result of
507 * dereferencing @ptr must be assignable to @x without a cast.
509 * Caller must check the pointer with access_ok() before calling this
512 * Returns zero on success, or -EFAULT on error.
513 * On error, the variable @x is set to zero.
516 #define __get_user(x, ptr) \
517 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
520 * __put_user: - Write a simple value into user space, with less checking.
521 * @x: Value to copy to user space.
522 * @ptr: Destination address, in user space.
524 * Context: User context only. This function may sleep if pagefaults are
527 * This macro copies a single simple value from kernel space to user
528 * space. It supports simple types like char and int, but not larger
529 * data types like structures or arrays.
531 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
532 * to the result of dereferencing @ptr.
534 * Caller must check the pointer with access_ok() before calling this
537 * Returns zero on success, or -EFAULT on error.
540 #define __put_user(x, ptr) \
541 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
543 #define __get_user_unaligned __get_user
544 #define __put_user_unaligned __put_user
547 * {get|put}_user_try and catch
551 * } get_user_catch(err)
553 #define get_user_try uaccess_try_nospec
554 #define get_user_catch(err) uaccess_catch(err)
556 #define get_user_ex(x, ptr) do { \
557 unsigned long __gue_val; \
558 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
559 (x) = (__force __typeof__(*(ptr)))__gue_val; \
562 #define put_user_try uaccess_try
563 #define put_user_catch(err) uaccess_catch(err)
565 #define put_user_ex(x, ptr) \
566 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
569 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
570 extern __must_check long
571 strncpy_from_user(char *dst, const char __user *src, long count);
573 extern __must_check long strlen_user(const char __user *str);
574 extern __must_check long strnlen_user(const char __user *str, long n);
576 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
577 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
579 extern void __cmpxchg_wrong_size(void)
580 __compiletime_error("Bad argument size for cmpxchg");
582 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
585 __typeof__(ptr) __uval = (uval); \
586 __typeof__(*(ptr)) __old = (old); \
587 __typeof__(*(ptr)) __new = (new); \
588 __uaccess_begin_nospec(); \
593 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
595 "\t.section .fixup, \"ax\"\n" \
599 _ASM_EXTABLE(1b, 3b) \
600 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
601 : "i" (-EFAULT), "q" (__new), "1" (__old) \
609 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
611 "\t.section .fixup, \"ax\"\n" \
615 _ASM_EXTABLE(1b, 3b) \
616 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
617 : "i" (-EFAULT), "r" (__new), "1" (__old) \
625 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
627 "\t.section .fixup, \"ax\"\n" \
631 _ASM_EXTABLE(1b, 3b) \
632 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
633 : "i" (-EFAULT), "r" (__new), "1" (__old) \
640 if (!IS_ENABLED(CONFIG_X86_64)) \
641 __cmpxchg_wrong_size(); \
644 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
646 "\t.section .fixup, \"ax\"\n" \
650 _ASM_EXTABLE(1b, 3b) \
651 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
652 : "i" (-EFAULT), "r" (__new), "1" (__old) \
658 __cmpxchg_wrong_size(); \
665 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
667 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
668 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
669 (old), (new), sizeof(*(ptr))) : \
674 * movsl can be slow when source and dest are not both 8-byte aligned
676 #ifdef CONFIG_X86_INTEL_USERCOPY
677 extern struct movsl_mask {
679 } ____cacheline_aligned_in_smp movsl_mask;
682 #define ARCH_HAS_NOCACHE_UACCESS 1
685 # include <asm/uaccess_32.h>
687 # include <asm/uaccess_64.h>
690 unsigned long __must_check _copy_from_user(void *to, const void __user *from,
692 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
695 extern void __compiletime_error("usercopy buffer size is too small")
696 __bad_copy_user(void);
698 static inline void copy_user_overflow(int size, unsigned long count)
700 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
703 static __always_inline unsigned long __must_check
704 copy_from_user(void *to, const void __user *from, unsigned long n)
706 int sz = __compiletime_object_size(to);
710 kasan_check_write(to, n);
712 if (likely(sz < 0 || sz >= n)) {
713 check_object_size(to, n, false);
714 n = _copy_from_user(to, from, n);
715 } else if (!__builtin_constant_p(n))
716 copy_user_overflow(sz, n);
723 static __always_inline unsigned long __must_check
724 copy_to_user(void __user *to, const void *from, unsigned long n)
726 int sz = __compiletime_object_size(from);
728 kasan_check_read(from, n);
732 if (likely(sz < 0 || sz >= n)) {
733 check_object_size(from, n, true);
734 n = _copy_to_user(to, from, n);
735 } else if (!__builtin_constant_p(n))
736 copy_user_overflow(sz, n);
744 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
745 * nested NMI paths are careful to preserve CR2.
747 * Caller must use pagefault_enable/disable, or run in interrupt context,
748 * and also do a uaccess_ok() check
750 #define __copy_from_user_nmi __copy_from_user_inatomic
753 * The "unsafe" user accesses aren't really "unsafe", but the naming
754 * is a big fat warning: you have to not only do the access_ok()
755 * checking before using them, but you have to surround them with the
756 * user_access_begin/end() pair.
758 #define user_access_begin() __uaccess_begin()
759 #define user_access_end() __uaccess_end()
761 #define unsafe_put_user(x, ptr, err_label) \
764 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
765 if (unlikely(__pu_err)) goto err_label; \
768 #define unsafe_get_user(x, ptr, err_label) \
771 unsigned long __gu_val; \
772 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
773 (x) = (__force __typeof__(*(ptr)))__gu_val; \
774 if (unlikely(__gu_err)) goto err_label; \
777 #endif /* _ASM_X86_UACCESS_H */