1 /* MN10300 userspace access functions
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
15 * User space memory access functions
17 #include <linux/thread_info.h>
18 #include <linux/kernel.h>
20 #include <asm/errno.h>
23 #define VERIFY_WRITE 1
26 * The fs value determines whether argument validity checking should be
27 * performed or not. If get_fs() == USER_DS, checking is performed, with
28 * get_fs() == KERNEL_DS, checking is bypassed.
30 * For historical reasons, these macros are grossly misnamed.
32 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
34 #define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
35 #define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
36 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
38 #define get_ds() (KERNEL_DS)
39 #define get_fs() (current_thread_info()->addr_limit)
40 #define set_fs(x) (current_thread_info()->addr_limit = (x))
42 #define segment_eq(a, b) ((a).seg == (b).seg)
44 #define __addr_ok(addr) \
45 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
48 * check that a range of addresses falls within the current address limit
50 static inline int ___range_ok(unsigned long addr, unsigned int size)
54 asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */
56 " cmp %4,%1 \n" /* jump if addr+size>limit (error) */
58 " clr %0 \n" /* mark okay */
60 : "=r"(flag), "=&r"(tmp)
61 : "1"(addr), "ir"(size),
62 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
69 #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
71 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
72 #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
75 * The exception table consists of pairs of addresses: the first is the
76 * address of an instruction that is allowed to fault, and the second is
77 * the address at which the program should continue. No registers are
78 * modified, so it is entirely up to the continuation code to figure out
81 * All the routines below use bits of fixup code that are out of line
82 * with the main instruction path. This means when everything is well,
83 * we don't even have to jump over them. Further, they do not intrude
84 * on our cache or tlb entries.
87 struct exception_table_entry
89 unsigned long insn, fixup;
92 /* Returns 0 if exception not found and fixup otherwise. */
93 extern int fixup_exception(struct pt_regs *regs);
95 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
96 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
99 * The "__xxx" versions do not do address space checking, useful when
100 * doing multiple accesses to the same area (the user has to do the
101 * checks by hand with "access_ok()")
103 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
104 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
106 struct __large_struct { unsigned long buf[100]; };
107 #define __m(x) (*(struct __large_struct *)(x))
109 #define __get_user_nocheck(x, ptr, size) \
111 unsigned long __gu_addr; \
113 __gu_addr = (unsigned long) (ptr); \
116 unsigned char __gu_val; \
117 __get_user_asm("bu"); \
118 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
122 unsigned short __gu_val; \
123 __get_user_asm("hu"); \
124 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
128 unsigned int __gu_val; \
129 __get_user_asm(""); \
130 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
134 __get_user_unknown(); \
140 #define __get_user_check(x, ptr, size) \
142 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
144 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
145 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
148 (x) = (__typeof__(x))0; \
153 #define __get_user_asm(INSN) \
157 " mov"INSN" %2,%1\n" \
160 " .section .fixup,\"ax\"\n" \
166 " .section __ex_table,\"a\"\n" \
170 : "=&r" (__gu_err), "=&r" (__gu_val) \
171 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
174 extern int __get_user_unknown(void);
176 #define __put_user_nocheck(x, ptr, size) \
179 __typeof__(*(ptr)) val; \
182 unsigned long __pu_addr; \
184 __pu_val.val = (x); \
185 __pu_addr = (unsigned long) (ptr); \
187 case 1: __put_user_asm("bu"); break; \
188 case 2: __put_user_asm("hu"); break; \
189 case 4: __put_user_asm("" ); break; \
190 case 8: __put_user_asm8(); break; \
191 default: __pu_err = __put_user_unknown(); break; \
196 #define __put_user_check(x, ptr, size) \
199 __typeof__(*(ptr)) val; \
202 unsigned long __pu_addr; \
204 __pu_val.val = (x); \
205 __pu_addr = (unsigned long) (ptr); \
206 if (likely(__access_ok(__pu_addr, size))) { \
208 case 1: __put_user_asm("bu"); break; \
209 case 2: __put_user_asm("hu"); break; \
210 case 4: __put_user_asm("" ); break; \
211 case 8: __put_user_asm8(); break; \
212 default: __pu_err = __put_user_unknown(); break; \
216 __pu_err = -EFAULT; \
221 #define __put_user_asm(INSN) \
225 " mov"INSN" %1,%2\n" \
228 " .section .fixup,\"ax\"\n" \
233 " .section __ex_table,\"a\"\n" \
238 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
243 #define __put_user_asm8() \
250 " .section .fixup,\"ax\" \n" \
255 " .section __ex_table,\"a\"\n" \
261 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
262 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
267 extern int __put_user_unknown(void);
271 * Copy To/From Userspace
273 /* Generic arbitrary sized copy. */
274 #define __copy_user(to, from, size) \
278 const void *__from = from; \
281 "0: movbu (%0),%3;\n" \
282 "1: movbu %3,(%1);\n" \
288 " .section .fixup,\"ax\"\n" \
291 " .section __ex_table,\"a\"\n" \
296 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
297 : "0"(__from), "1"(__to), "2"(size) \
302 #define __copy_user_zeroing(to, from, size) \
306 const void *__from = from; \
309 "0: movbu (%0),%3;\n" \
310 "1: movbu %3,(%1);\n" \
316 " .section .fixup,\"ax\"\n" \
320 "4: movbu %3,(%1);\n" \
327 " .section __ex_table,\"a\"\n" \
332 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
333 : "0"(__from), "1"(__to), "2"(size) \
338 /* We let the __ versions of copy_from/to_user inline, because they're often
339 * used in fast paths and have only a small space overhead.
342 unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
345 __copy_user_zeroing(to, from, n);
350 unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
353 __copy_user(to, from, n);
359 #error "don't use - these macros don't increment to & from pointers"
360 /* Optimize just a little bit when we know the size of the move. */
361 #define __constant_copy_user(to, from, size) \
365 "0: movbu (%1),d3;\n" \
366 "1: movbu d3,(%2);\n" \
370 ".section .fixup,\"ax\"\n" \
373 ".section __ex_table,\"a\"\n" \
379 : "d"(size), "d"(to), "d"(from) \
383 /* Optimize just a little bit when we know the size of the move. */
384 #define __constant_copy_user_zeroing(to, from, size) \
388 "0: movbu (%1),d3;\n" \
389 "1: movbu d3,(%2);\n" \
393 ".section .fixup,\"ax\"\n" \
396 ".section __ex_table,\"a\"\n" \
402 : "d"(size), "d"(to), "d"(from) \
407 unsigned long __constant_copy_to_user(void *to, const void *from,
410 if (access_ok(VERIFY_WRITE, to, n))
411 __constant_copy_user(to, from, n);
416 unsigned long __constant_copy_from_user(void *to, const void *from,
419 if (access_ok(VERIFY_READ, from, n))
420 __constant_copy_user_zeroing(to, from, n);
425 unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
428 __constant_copy_user(to, from, n);
433 unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
436 __constant_copy_user_zeroing(to, from, n);
441 extern unsigned long __generic_copy_to_user(void __user *, const void *,
443 extern unsigned long __generic_copy_from_user(void *, const void __user *,
446 #define __copy_to_user_inatomic(to, from, n) \
447 __generic_copy_to_user_nocheck((to), (from), (n))
448 #define __copy_from_user_inatomic(to, from, n) \
449 __generic_copy_from_user_nocheck((to), (from), (n))
451 #define __copy_to_user(to, from, n) \
454 __copy_to_user_inatomic((to), (from), (n)); \
457 #define __copy_from_user(to, from, n) \
460 __copy_from_user_inatomic((to), (from), (n)); \
464 #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
465 #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
467 extern long strncpy_from_user(char *dst, const char __user *src, long count);
468 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
469 extern long strnlen_user(const char __user *str, long n);
470 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
471 extern unsigned long clear_user(void __user *mem, unsigned long len);
472 extern unsigned long __clear_user(void __user *mem, unsigned long len);
474 #endif /* _ASM_UACCESS_H */