1 #ifndef _ARCH_POWERPC_UACCESS_H
2 #define _ARCH_POWERPC_UACCESS_H
7 #include <linux/sched.h>
8 #include <linux/errno.h>
9 #include <asm/asm-compat.h>
10 #include <asm/processor.h>
15 #define VERIFY_WRITE 1
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
22 * For historical reasons, these macros are grossly misnamed.
24 * The fs/ds values are now the highest legal address in the "segment".
25 * This simplifies the checking in the routines below.
28 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
30 #define KERNEL_DS MAKE_MM_SEG(~0UL)
32 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
33 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
35 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
38 #define get_ds() (KERNEL_DS)
39 #define get_fs() (current->thread.fs)
40 #define set_fs(val) (current->thread.fs = (val))
42 #define segment_eq(a, b) ((a).seg == (b).seg)
44 #define user_addr_max() (get_fs().seg)
48 * This check is sufficient because there is a large enough
49 * gap between user addresses and the kernel addresses
51 #define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
56 #define __access_ok(addr, size, segment) \
57 (((addr) <= (segment).seg) && \
58 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
62 #define access_ok(type, addr, size) \
63 (__chk_user_ptr(addr), (void)(type), \
64 __access_ok((__force unsigned long)(addr), (size), get_fs()))
67 * The exception table consists of pairs of addresses: the first is the
68 * address of an instruction that is allowed to fault, and the second is
69 * the address at which the program should continue. No registers are
70 * modified, so it is entirely up to the continuation code to figure out
73 * All the routines below use bits of fixup code that are out of line
74 * with the main instruction path. This means when everything is well,
75 * we don't even have to jump over them. Further, they do not intrude
76 * on our cache or tlb entries.
79 struct exception_table_entry {
85 * These are the main single-value transfer routines. They automatically
86 * use the right size if we just have the right pointer type.
88 * This gets kind of ugly. We want to return _two_ values in "get_user()"
89 * and yet we don't want to do any pointers, because that is too much
90 * of a performance impact. Thus we have a few rather ugly macros here,
91 * and hide all the ugliness from the user.
93 * The "__xxx" versions of the user access functions are versions that
94 * do not verify the address space, that must have been done previously
95 * with a separate "access_ok()" call (this is used when we do multiple
96 * accesses to the same area of user memory).
98 * As we use the same address space for kernel and user data on the
99 * PowerPC, we can just do these as direct assignments. (Of course, the
100 * exception handling means that it's no longer "just"...)
103 #define get_user(x, ptr) \
104 __get_user_check((x), (ptr), sizeof(*(ptr)))
105 #define put_user(x, ptr) \
106 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
108 #define __get_user(x, ptr) \
109 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
110 #define __put_user(x, ptr) \
111 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
113 #define __get_user_allowed(x, ptr) \
114 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
115 #define __put_user_allowed(x, ptr) \
116 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
118 #define __get_user_inatomic(x, ptr) \
119 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
120 #define __put_user_inatomic(x, ptr) \
121 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
123 #define __get_user_unaligned __get_user
124 #define __put_user_unaligned __put_user
126 extern long __put_user_bad(void);
129 * We don't tell gcc that we are accessing memory, but this is OK
130 * because we do not write to any memory gcc knows about, so there
131 * are no aliasing issues.
133 #define __put_user_asm(x, addr, err, op) \
134 __asm__ __volatile__( \
135 "1: " op " %1,0(%2) # put_user\n" \
137 ".section .fixup,\"ax\"\n" \
141 ".section __ex_table,\"a\"\n" \
142 PPC_LONG_ALIGN "\n" \
146 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
149 #define __put_user_asm2(x, ptr, retval) \
150 __put_user_asm(x, ptr, retval, "std")
151 #else /* __powerpc64__ */
152 #define __put_user_asm2(x, addr, err) \
153 __asm__ __volatile__( \
154 "1: stw %1,0(%2)\n" \
155 "2: stw %1+1,4(%2)\n" \
157 ".section .fixup,\"ax\"\n" \
161 ".section __ex_table,\"a\"\n" \
162 PPC_LONG_ALIGN "\n" \
167 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
168 #endif /* __powerpc64__ */
170 #define __put_user_size_allowed(x, ptr, size, retval) \
174 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
175 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
176 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
177 case 8: __put_user_asm2(x, ptr, retval); break; \
178 default: __put_user_bad(); \
182 #define __put_user_size(x, ptr, size, retval) \
184 allow_write_to_user(ptr, size); \
185 __put_user_size_allowed(x, ptr, size, retval); \
186 prevent_write_to_user(ptr, size); \
189 #define __put_user_nocheck(x, ptr, size, do_allow) \
192 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
193 __typeof__(*(ptr)) __pu_val = (x); \
194 __typeof__(size) __pu_size = (size); \
196 if (!is_kernel_addr((unsigned long)__pu_addr)) \
198 __chk_user_ptr(__pu_addr); \
200 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
202 __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
207 #define __put_user_check(x, ptr, size) \
209 long __pu_err = -EFAULT; \
210 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
211 __typeof__(*(ptr)) __pu_val = (x); \
212 __typeof__(size) __pu_size = (size); \
215 if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \
216 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
221 #define __put_user_nosleep(x, ptr, size) \
224 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
225 __typeof__(*(ptr)) __pu_val = (x); \
226 __typeof__(size) __pu_size = (size); \
228 __chk_user_ptr(__pu_addr); \
229 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
235 extern long __get_user_bad(void);
237 #define __get_user_asm(x, addr, err, op) \
238 __asm__ __volatile__( \
239 "1: "op" %1,0(%2) # get_user\n" \
241 ".section .fixup,\"ax\"\n" \
246 ".section __ex_table,\"a\"\n" \
247 PPC_LONG_ALIGN "\n" \
250 : "=r" (err), "=r" (x) \
251 : "b" (addr), "i" (-EFAULT), "0" (err))
254 #define __get_user_asm2(x, addr, err) \
255 __get_user_asm(x, addr, err, "ld")
256 #else /* __powerpc64__ */
257 #define __get_user_asm2(x, addr, err) \
258 __asm__ __volatile__( \
259 "1: lwz %1,0(%2)\n" \
260 "2: lwz %1+1,4(%2)\n" \
262 ".section .fixup,\"ax\"\n" \
268 ".section __ex_table,\"a\"\n" \
269 PPC_LONG_ALIGN "\n" \
273 : "=r" (err), "=&r" (x) \
274 : "b" (addr), "i" (-EFAULT), "0" (err))
275 #endif /* __powerpc64__ */
277 #define __get_user_size_allowed(x, ptr, size, retval) \
280 __chk_user_ptr(ptr); \
281 if (size > sizeof(x)) \
282 (x) = __get_user_bad(); \
284 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
285 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
286 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
287 case 8: __get_user_asm2(x, ptr, retval); break; \
288 default: (x) = __get_user_bad(); \
292 #define __get_user_size(x, ptr, size, retval) \
294 allow_read_from_user(ptr, size); \
295 __get_user_size_allowed(x, ptr, size, retval); \
296 prevent_read_from_user(ptr, size); \
299 #define __get_user_nocheck(x, ptr, size, do_allow) \
302 unsigned long __gu_val; \
303 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
304 __typeof__(size) __gu_size = (size); \
306 __chk_user_ptr(__gu_addr); \
307 if (!is_kernel_addr((unsigned long)__gu_addr)) \
311 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
313 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
314 (x) = (__typeof__(*(ptr)))__gu_val; \
319 #ifndef __powerpc64__
320 #define __get_user64_nocheck(x, ptr, size) \
323 long long __gu_val; \
324 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
325 __chk_user_ptr(ptr); \
326 if (!is_kernel_addr((unsigned long)__gu_addr)) \
329 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
330 (x) = (__force __typeof__(*(ptr)))__gu_val; \
333 #endif /* __powerpc64__ */
335 #define __get_user_check(x, ptr, size) \
337 long __gu_err = -EFAULT; \
338 unsigned long __gu_val = 0; \
339 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
340 __typeof__(size) __gu_size = (size); \
343 if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \
345 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
347 (x) = (__force __typeof__(*(ptr)))__gu_val; \
352 #define __get_user_nosleep(x, ptr, size) \
355 unsigned long __gu_val; \
356 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
357 __typeof__(size) __gu_size = (size); \
359 __chk_user_ptr(__gu_addr); \
361 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
362 (x) = (__force __typeof__(*(ptr)))__gu_val; \
368 /* more complex routines */
370 extern unsigned long __copy_tofrom_user(void __user *to,
371 const void __user *from, unsigned long size);
373 #ifndef __powerpc64__
375 static inline unsigned long copy_from_user(void *to,
376 const void __user *from, unsigned long n)
380 if (likely(access_ok(VERIFY_READ, from, n))) {
381 allow_user_access(to, from, n);
383 ret = __copy_tofrom_user((__force void __user *)to, from, n);
384 prevent_user_access(to, from, n);
391 static inline unsigned long copy_to_user(void __user *to,
392 const void *from, unsigned long n)
394 if (access_ok(VERIFY_WRITE, to, n))
395 return __copy_tofrom_user(to, (__force void __user *)from, n);
399 #else /* __powerpc64__ */
401 #define __copy_in_user(to, from, size) \
402 __copy_tofrom_user((to), (from), (size))
404 extern unsigned long copy_from_user(void *to, const void __user *from,
406 extern unsigned long copy_to_user(void __user *to, const void *from,
408 extern unsigned long copy_in_user(void __user *to, const void __user *from,
411 #endif /* __powerpc64__ */
413 static inline unsigned long __copy_from_user_inatomic(void *to,
414 const void __user *from, unsigned long n)
417 if (__builtin_constant_p(n) && (n <= 8)) {
423 __get_user_size(*(u8 *)to, from, 1, ret);
427 __get_user_size(*(u16 *)to, from, 2, ret);
431 __get_user_size(*(u32 *)to, from, 4, ret);
435 __get_user_size(*(u64 *)to, from, 8, ret);
443 allow_read_from_user(from, n);
444 ret = __copy_tofrom_user((__force void __user *)to, from, n);
445 prevent_read_from_user(from, n);
449 static inline unsigned long __copy_to_user_inatomic(void __user *to,
450 const void *from, unsigned long n)
454 if (__builtin_constant_p(n) && (n <= 8)) {
459 __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
462 __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
465 __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
468 __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
475 allow_write_to_user(to, n);
476 ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
477 prevent_write_to_user(to, n);
481 static inline unsigned long __copy_from_user(void *to,
482 const void __user *from, unsigned long size)
485 return __copy_from_user_inatomic(to, from, size);
488 static inline unsigned long __copy_to_user(void __user *to,
489 const void *from, unsigned long size)
492 return __copy_to_user_inatomic(to, from, size);
495 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
497 static inline unsigned long clear_user(void __user *addr, unsigned long size)
499 unsigned long ret = size;
501 if (likely(access_ok(VERIFY_WRITE, addr, size))) {
502 allow_write_to_user(addr, size);
503 ret = __arch_clear_user(addr, size);
504 prevent_write_to_user(addr, size);
509 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
511 return clear_user(addr, size);
514 extern long strncpy_from_user(char *dst, const char __user *src, long count);
515 extern __must_check long strlen_user(const char __user *str);
516 extern __must_check long strnlen_user(const char __user *str, long n);
519 #define user_access_begin() do { } while (0)
520 #define user_access_end() prevent_user_access(NULL, NULL, ~0ul)
522 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
523 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
524 #define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
525 #define unsafe_copy_to_user(d, s, l, e) \
526 unsafe_op_wrap(__copy_to_user_inatomic(d, s, l), e)
528 #endif /* __ASSEMBLY__ */
529 #endif /* __KERNEL__ */
531 #endif /* _ARCH_POWERPC_UACCESS_H */