1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
8 #include <asm/extable.h>
12 * The fs value determines whether argument validity checking should be
13 * performed or not. If get_fs() == USER_DS, checking is performed, with
14 * get_fs() == KERNEL_DS, checking is bypassed.
16 * For historical reasons, these macros are grossly misnamed.
18 * The fs/ds values are now the highest legal address in the "segment".
19 * This simplifies the checking in the routines below.
22 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24 #define KERNEL_DS MAKE_MM_SEG(~0UL)
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
29 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
32 #define get_ds() (KERNEL_DS)
33 #define get_fs() (current->thread.addr_limit)
35 static inline void set_fs(mm_segment_t fs)
37 current->thread.addr_limit = fs;
38 /* On user-mode return check addr_limit (fs) is correct */
39 set_thread_flag(TIF_FSCHECK);
42 #define segment_eq(a, b) ((a).seg == (b).seg)
44 #define user_addr_max() (get_fs().seg)
48 * This check is sufficient because there is a large enough
49 * gap between user addresses and the kernel addresses
51 #define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
56 static inline int __access_ok(unsigned long addr, unsigned long size,
61 return (size == 0 || size - 1 <= seg.seg - addr);
66 #define access_ok(type, addr, size) \
67 (__chk_user_ptr(addr), (void)(type), \
68 __access_ok((__force unsigned long)(addr), (size), get_fs()))
71 * These are the main single-value transfer routines. They automatically
72 * use the right size if we just have the right pointer type.
74 * This gets kind of ugly. We want to return _two_ values in "get_user()"
75 * and yet we don't want to do any pointers, because that is too much
76 * of a performance impact. Thus we have a few rather ugly macros here,
77 * and hide all the ugliness from the user.
79 * The "__xxx" versions of the user access functions are versions that
80 * do not verify the address space, that must have been done previously
81 * with a separate "access_ok()" call (this is used when we do multiple
82 * accesses to the same area of user memory).
84 * As we use the same address space for kernel and user data on the
85 * PowerPC, we can just do these as direct assignments. (Of course, the
86 * exception handling means that it's no longer "just"...)
89 #define get_user(x, ptr) \
90 __get_user_check((x), (ptr), sizeof(*(ptr)))
91 #define put_user(x, ptr) \
92 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
94 #define __get_user(x, ptr) \
95 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
96 #define __put_user(x, ptr) \
97 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
99 #define __get_user_allowed(x, ptr) \
100 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
101 #define __put_user_allowed(x, ptr) \
102 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
104 #define __get_user_inatomic(x, ptr) \
105 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
106 #define __put_user_inatomic(x, ptr) \
107 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
109 extern long __put_user_bad(void);
112 * We don't tell gcc that we are accessing memory, but this is OK
113 * because we do not write to any memory gcc knows about, so there
114 * are no aliasing issues.
116 #define __put_user_asm(x, addr, err, op) \
117 __asm__ __volatile__( \
118 "1: " op " %1,0(%2) # put_user\n" \
120 ".section .fixup,\"ax\"\n" \
126 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
129 #define __put_user_asm2(x, ptr, retval) \
130 __put_user_asm(x, ptr, retval, "std")
131 #else /* __powerpc64__ */
132 #define __put_user_asm2(x, addr, err) \
133 __asm__ __volatile__( \
134 "1: stw %1,0(%2)\n" \
135 "2: stw %1+1,4(%2)\n" \
137 ".section .fixup,\"ax\"\n" \
144 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
145 #endif /* __powerpc64__ */
147 #define __put_user_size_allowed(x, ptr, size, retval) \
151 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
152 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
153 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
154 case 8: __put_user_asm2(x, ptr, retval); break; \
155 default: __put_user_bad(); \
159 #define __put_user_size(x, ptr, size, retval) \
161 allow_write_to_user(ptr, size); \
162 __put_user_size_allowed(x, ptr, size, retval); \
163 prevent_write_to_user(ptr, size); \
166 #define __put_user_nocheck(x, ptr, size, do_allow) \
169 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
170 __typeof__(*(ptr)) __pu_val = (x); \
171 __typeof__(size) __pu_size = (size); \
173 if (!is_kernel_addr((unsigned long)__pu_addr)) \
175 __chk_user_ptr(__pu_addr); \
177 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
179 __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
184 #define __put_user_check(x, ptr, size) \
186 long __pu_err = -EFAULT; \
187 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
188 __typeof__(*(ptr)) __pu_val = (x); \
189 __typeof__(size) __pu_size = (size); \
192 if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \
193 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
198 #define __put_user_nosleep(x, ptr, size) \
201 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
202 __typeof__(*(ptr)) __pu_val = (x); \
203 __typeof__(size) __pu_size = (size); \
205 __chk_user_ptr(__pu_addr); \
206 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
212 extern long __get_user_bad(void);
215 * This does an atomic 128 byte aligned load from userspace.
216 * Upto caller to do enable_kernel_vmx() before calling!
218 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
219 __asm__ __volatile__( \
221 ".machine altivec\n" \
222 "1: lvx 0,0,%1 # get user\n" \
223 " stvx 0,0,%2 # put kernel\n" \
226 ".section .fixup,\"ax\"\n" \
232 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
234 #define __get_user_asm(x, addr, err, op) \
235 __asm__ __volatile__( \
236 "1: "op" %1,0(%2) # get_user\n" \
238 ".section .fixup,\"ax\"\n" \
244 : "=r" (err), "=r" (x) \
245 : "b" (addr), "i" (-EFAULT), "0" (err))
248 #define __get_user_asm2(x, addr, err) \
249 __get_user_asm(x, addr, err, "ld")
250 #else /* __powerpc64__ */
251 #define __get_user_asm2(x, addr, err) \
252 __asm__ __volatile__( \
253 "1: lwz %1,0(%2)\n" \
254 "2: lwz %1+1,4(%2)\n" \
256 ".section .fixup,\"ax\"\n" \
264 : "=r" (err), "=&r" (x) \
265 : "b" (addr), "i" (-EFAULT), "0" (err))
266 #endif /* __powerpc64__ */
268 #define __get_user_size_allowed(x, ptr, size, retval) \
271 __chk_user_ptr(ptr); \
272 if (size > sizeof(x)) \
273 (x) = __get_user_bad(); \
275 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
276 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
277 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
278 case 8: __get_user_asm2(x, ptr, retval); break; \
279 default: (x) = __get_user_bad(); \
283 #define __get_user_size(x, ptr, size, retval) \
285 allow_read_from_user(ptr, size); \
286 __get_user_size_allowed(x, ptr, size, retval); \
287 prevent_read_from_user(ptr, size); \
291 * This is a type: either unsigned long, if the argument fits into
292 * that type, or otherwise unsigned long long.
294 #define __long_type(x) \
295 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
297 #define __get_user_nocheck(x, ptr, size, do_allow) \
300 __long_type(*(ptr)) __gu_val; \
301 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
302 __typeof__(size) __gu_size = (size); \
304 __chk_user_ptr(__gu_addr); \
305 if (!is_kernel_addr((unsigned long)__gu_addr)) \
309 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
311 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
312 (x) = (__typeof__(*(ptr)))__gu_val; \
317 #define __get_user_check(x, ptr, size) \
319 long __gu_err = -EFAULT; \
320 __long_type(*(ptr)) __gu_val = 0; \
321 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
322 __typeof__(size) __gu_size = (size); \
325 if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \
327 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
329 (x) = (__force __typeof__(*(ptr)))__gu_val; \
334 #define __get_user_nosleep(x, ptr, size) \
337 __long_type(*(ptr)) __gu_val; \
338 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
339 __typeof__(size) __gu_size = (size); \
341 __chk_user_ptr(__gu_addr); \
343 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
344 (x) = (__force __typeof__(*(ptr)))__gu_val; \
350 /* more complex routines */
352 extern unsigned long __copy_tofrom_user(void __user *to,
353 const void __user *from, unsigned long size);
356 static inline unsigned long
357 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
362 allow_user_access(to, from, n);
363 ret = __copy_tofrom_user(to, from, n);
364 prevent_user_access(to, from, n);
367 #endif /* __powerpc64__ */
369 static inline unsigned long raw_copy_from_user(void *to,
370 const void __user *from, unsigned long n)
373 if (__builtin_constant_p(n) && (n <= 8)) {
379 __get_user_size(*(u8 *)to, from, 1, ret);
383 __get_user_size(*(u16 *)to, from, 2, ret);
387 __get_user_size(*(u32 *)to, from, 4, ret);
391 __get_user_size(*(u64 *)to, from, 8, ret);
399 allow_read_from_user(from, n);
400 ret = __copy_tofrom_user((__force void __user *)to, from, n);
401 prevent_read_from_user(from, n);
405 static inline unsigned long
406 raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
408 if (__builtin_constant_p(n) && (n <= 8)) {
409 unsigned long ret = 1;
413 __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
416 __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
419 __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
422 __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
429 return __copy_tofrom_user(to, (__force const void __user *)from, n);
432 static inline unsigned long
433 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
437 allow_write_to_user(to, n);
438 ret = raw_copy_to_user_allowed(to, from, n);
439 prevent_write_to_user(to, n);
443 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
445 static inline unsigned long clear_user(void __user *addr, unsigned long size)
447 unsigned long ret = size;
449 if (likely(access_ok(VERIFY_WRITE, addr, size))) {
450 allow_write_to_user(addr, size);
451 ret = __arch_clear_user(addr, size);
452 prevent_write_to_user(addr, size);
457 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
459 return clear_user(addr, size);
462 extern long strncpy_from_user(char *dst, const char __user *src, long count);
463 extern __must_check long strnlen_user(const char __user *str, long n);
465 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
467 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
470 #define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
471 #define user_access_end() prevent_user_access(NULL, NULL, ~0ul)
473 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
474 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
475 #define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
476 #define unsafe_copy_to_user(d, s, l, e) \
477 unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
479 #endif /* _ARCH_POWERPC_UACCESS_H */