GNU Linux-libre 4.9.330-gnu1
[releases.git] / arch / arm / include / asm / uaccess.h
1 /*
2  *  arch/arm/include/asm/uaccess.h
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
10
11 /*
12  * User space memory access functions
13  */
14 #include <linux/string.h>
15 #include <linux/thread_info.h>
16 #include <asm/errno.h>
17 #include <asm/memory.h>
18 #include <asm/domain.h>
19 #include <asm/unified.h>
20 #include <asm/compiler.h>
21
22 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23 #include <asm-generic/uaccess-unaligned.h>
24 #else
25 #define __get_user_unaligned __get_user
26 #define __put_user_unaligned __put_user
27 #endif
28
29 #define VERIFY_READ 0
30 #define VERIFY_WRITE 1
31
32 /*
33  * The exception table consists of pairs of addresses: the first is the
34  * address of an instruction that is allowed to fault, and the second is
35  * the address at which the program should continue.  No registers are
36  * modified, so it is entirely up to the continuation code to figure out
37  * what to do.
38  *
39  * All the routines below use bits of fixup code that are out of line
40  * with the main instruction path.  This means when everything is well,
41  * we don't even have to jump over them.  Further, they do not intrude
42  * on our cache or tlb entries.
43  */
44
45 struct exception_table_entry
46 {
47         unsigned long insn, fixup;
48 };
49
50 extern int fixup_exception(struct pt_regs *regs);
51
52 /*
53  * These two functions allow hooking accesses to userspace to increase
54  * system integrity by ensuring that the kernel can not inadvertantly
55  * perform such accesses (eg, via list poison values) which could then
56  * be exploited for priviledge escalation.
57  */
58 static inline unsigned int uaccess_save_and_enable(void)
59 {
60 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
61         unsigned int old_domain = get_domain();
62
63         /* Set the current domain access to permit user accesses */
64         set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65                    domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66
67         return old_domain;
68 #else
69         return 0;
70 #endif
71 }
72
73 static inline void uaccess_restore(unsigned int flags)
74 {
75 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
76         /* Restore the user access mask */
77         set_domain(flags);
78 #endif
79 }
80
81 /*
82  * These two are intentionally not defined anywhere - if the kernel
83  * code generates any references to them, that's a bug.
84  */
85 extern int __get_user_bad(void);
86 extern int __put_user_bad(void);
87
88 /*
89  * Note that this is actually 0x1,0000,0000
90  */
91 #define KERNEL_DS       0x00000000
92 #define get_ds()        (KERNEL_DS)
93
94 #ifdef CONFIG_MMU
95
96 #define USER_DS         TASK_SIZE
97 #define get_fs()        (current_thread_info()->addr_limit)
98
99 static inline void set_fs(mm_segment_t fs)
100 {
101         current_thread_info()->addr_limit = fs;
102
103         /*
104          * Prevent a mispredicted conditional call to set_fs from forwarding
105          * the wrong address limit to access_ok under speculation.
106          */
107         dsb(nsh);
108         isb();
109
110         modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
111 }
112
113 #define segment_eq(a, b)        ((a) == (b))
114
115 /* We use 33-bit arithmetic here... */
116 #define __range_ok(addr, size) ({ \
117         unsigned long flag, roksum; \
118         __chk_user_ptr(addr);   \
119         __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
120                 : "=&r" (flag), "=&r" (roksum) \
121                 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
122                 : "cc"); \
123         flag; })
124
125 /*
126  * This is a type: either unsigned long, if the argument fits into
127  * that type, or otherwise unsigned long long.
128  */
129 #define __inttype(x) \
130         __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
131
132 /*
133  * Sanitise a uaccess pointer such that it becomes NULL if addr+size
134  * is above the current addr_limit.
135  */
136 #define uaccess_mask_range_ptr(ptr, size)                       \
137         ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
138 static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
139                                                     size_t size)
140 {
141         void __user *safe_ptr = (void __user *)ptr;
142         unsigned long tmp;
143
144         asm volatile(
145         "       sub     %1, %3, #1\n"
146         "       subs    %1, %1, %0\n"
147         "       addhs   %1, %1, #1\n"
148         "       subhss  %1, %1, %2\n"
149         "       movlo   %0, #0\n"
150         : "+r" (safe_ptr), "=&r" (tmp)
151         : "r" (size), "r" (current_thread_info()->addr_limit)
152         : "cc");
153
154         csdb();
155         return safe_ptr;
156 }
157
158 /*
159  * Single-value transfer routines.  They automatically use the right
160  * size if we just have the right pointer type.  Note that the functions
161  * which read from user space (*get_*) need to take care not to leak
162  * kernel data even if the calling code is buggy and fails to check
163  * the return value.  This means zeroing out the destination variable
164  * or buffer on error.  Normally this is done out of line by the
165  * fixup code, but there are a few places where it intrudes on the
166  * main code path.  When we only write to user space, there is no
167  * problem.
168  */
169 extern int __get_user_1(void *);
170 extern int __get_user_2(void *);
171 extern int __get_user_4(void *);
172 extern int __get_user_32t_8(void *);
173 extern int __get_user_8(void *);
174 extern int __get_user_64t_1(void *);
175 extern int __get_user_64t_2(void *);
176 extern int __get_user_64t_4(void *);
177
178 #define __GUP_CLOBBER_1 "lr", "cc"
179 #ifdef CONFIG_CPU_USE_DOMAINS
180 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
181 #else
182 #define __GUP_CLOBBER_2 "lr", "cc"
183 #endif
184 #define __GUP_CLOBBER_4 "lr", "cc"
185 #define __GUP_CLOBBER_32t_8 "lr", "cc"
186 #define __GUP_CLOBBER_8 "lr", "cc"
187
188 #define __get_user_x(__r2, __p, __e, __l, __s)                          \
189            __asm__ __volatile__ (                                       \
190                 __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
191                 __asmeq("%3", "r1")                                     \
192                 "bl     __get_user_" #__s                               \
193                 : "=&r" (__e), "=r" (__r2)                              \
194                 : "0" (__p), "r" (__l)                                  \
195                 : __GUP_CLOBBER_##__s)
196
197 /* narrowing a double-word get into a single 32bit word register: */
198 #ifdef __ARMEB__
199 #define __get_user_x_32t(__r2, __p, __e, __l, __s)                      \
200         __get_user_x(__r2, __p, __e, __l, 32t_8)
201 #else
202 #define __get_user_x_32t __get_user_x
203 #endif
204
205 /*
206  * storing result into proper least significant word of 64bit target var,
207  * different only for big endian case where 64 bit __r2 lsw is r3:
208  */
209 #ifdef __ARMEB__
210 #define __get_user_x_64t(__r2, __p, __e, __l, __s)                      \
211            __asm__ __volatile__ (                                       \
212                 __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
213                 __asmeq("%3", "r1")                                     \
214                 "bl     __get_user_64t_" #__s                           \
215                 : "=&r" (__e), "=r" (__r2)                              \
216                 : "0" (__p), "r" (__l)                                  \
217                 : __GUP_CLOBBER_##__s)
218 #else
219 #define __get_user_x_64t __get_user_x
220 #endif
221
222
223 #define __get_user_check(x, p)                                          \
224         ({                                                              \
225                 unsigned long __limit = current_thread_info()->addr_limit - 1; \
226                 register const typeof(*(p)) __user *__p asm("r0") = (p);\
227                 register __inttype(x) __r2 asm("r2");                   \
228                 register unsigned long __l asm("r1") = __limit;         \
229                 register int __e asm("r0");                             \
230                 unsigned int __ua_flags = uaccess_save_and_enable();    \
231                 switch (sizeof(*(__p))) {                               \
232                 case 1:                                                 \
233                         if (sizeof((x)) >= 8)                           \
234                                 __get_user_x_64t(__r2, __p, __e, __l, 1); \
235                         else                                            \
236                                 __get_user_x(__r2, __p, __e, __l, 1);   \
237                         break;                                          \
238                 case 2:                                                 \
239                         if (sizeof((x)) >= 8)                           \
240                                 __get_user_x_64t(__r2, __p, __e, __l, 2); \
241                         else                                            \
242                                 __get_user_x(__r2, __p, __e, __l, 2);   \
243                         break;                                          \
244                 case 4:                                                 \
245                         if (sizeof((x)) >= 8)                           \
246                                 __get_user_x_64t(__r2, __p, __e, __l, 4); \
247                         else                                            \
248                                 __get_user_x(__r2, __p, __e, __l, 4);   \
249                         break;                                          \
250                 case 8:                                                 \
251                         if (sizeof((x)) < 8)                            \
252                                 __get_user_x_32t(__r2, __p, __e, __l, 4); \
253                         else                                            \
254                                 __get_user_x(__r2, __p, __e, __l, 8);   \
255                         break;                                          \
256                 default: __e = __get_user_bad(); break;                 \
257                 }                                                       \
258                 uaccess_restore(__ua_flags);                            \
259                 x = (typeof(*(p))) __r2;                                \
260                 __e;                                                    \
261         })
262
263 #define get_user(x, p)                                                  \
264         ({                                                              \
265                 might_fault();                                          \
266                 __get_user_check(x, p);                                 \
267          })
268
269 extern int __put_user_1(void *, unsigned int);
270 extern int __put_user_2(void *, unsigned int);
271 extern int __put_user_4(void *, unsigned int);
272 extern int __put_user_8(void *, unsigned long long);
273
274 #define __put_user_check(__pu_val, __ptr, __err, __s)                   \
275         ({                                                              \
276                 unsigned long __limit = current_thread_info()->addr_limit - 1; \
277                 register typeof(__pu_val) __r2 asm("r2") = __pu_val;    \
278                 register const void __user *__p asm("r0") = __ptr;      \
279                 register unsigned long __l asm("r1") = __limit;         \
280                 register int __e asm("r0");                             \
281                 __asm__ __volatile__ (                                  \
282                         __asmeq("%0", "r0") __asmeq("%2", "r2")         \
283                         __asmeq("%3", "r1")                             \
284                         "bl     __put_user_" #__s                       \
285                         : "=&r" (__e)                                   \
286                         : "0" (__p), "r" (__r2), "r" (__l)              \
287                         : "ip", "lr", "cc");                            \
288                 __err = __e;                                            \
289         })
290
291 #else /* CONFIG_MMU */
292
293 /*
294  * uClinux has only one addr space, so has simplified address limits.
295  */
296 #define USER_DS                 KERNEL_DS
297
298 #define segment_eq(a, b)                (1)
299 #define __addr_ok(addr)         ((void)(addr), 1)
300 #define __range_ok(addr, size)  ((void)(addr), 0)
301 #define get_fs()                (KERNEL_DS)
302
303 static inline void set_fs(mm_segment_t fs)
304 {
305 }
306
307 #define get_user(x, p)  __get_user(x, p)
308 #define __put_user_check __put_user_nocheck
309
310 #endif /* CONFIG_MMU */
311
312 #define access_ok(type, addr, size)     (__range_ok(addr, size) == 0)
313
314 #define user_addr_max() \
315         (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
316
317 #ifdef CONFIG_CPU_SPECTRE
318 /*
319  * When mitigating Spectre variant 1, it is not worth fixing the non-
320  * verifying accessors, because we need to add verification of the
321  * address space there.  Force these to use the standard get_user()
322  * version instead.
323  */
324 #define __get_user(x, ptr) get_user(x, ptr)
325 #else
326
327 /*
328  * The "__xxx" versions of the user access functions do not verify the
329  * address space - it must have been done previously with a separate
330  * "access_ok()" call.
331  *
332  * The "xxx_error" versions set the third argument to EFAULT if an
333  * error occurs, and leave it unchanged on success.  Note that these
334  * versions are void (ie, don't return a value as such).
335  */
336 #define __get_user(x, ptr)                                              \
337 ({                                                                      \
338         long __gu_err = 0;                                              \
339         __get_user_err((x), (ptr), __gu_err);                           \
340         __gu_err;                                                       \
341 })
342
343 #define __get_user_err(x, ptr, err)                                     \
344 do {                                                                    \
345         unsigned long __gu_addr = (unsigned long)(ptr);                 \
346         unsigned long __gu_val;                                         \
347         unsigned int __ua_flags;                                        \
348         __chk_user_ptr(ptr);                                            \
349         might_fault();                                                  \
350         __ua_flags = uaccess_save_and_enable();                         \
351         switch (sizeof(*(ptr))) {                                       \
352         case 1: __get_user_asm_byte(__gu_val, __gu_addr, err);  break;  \
353         case 2: __get_user_asm_half(__gu_val, __gu_addr, err);  break;  \
354         case 4: __get_user_asm_word(__gu_val, __gu_addr, err);  break;  \
355         default: (__gu_val) = __get_user_bad();                         \
356         }                                                               \
357         uaccess_restore(__ua_flags);                                    \
358         (x) = (__typeof__(*(ptr)))__gu_val;                             \
359 } while (0)
360
361 #define __get_user_asm(x, addr, err, instr)                     \
362         __asm__ __volatile__(                                   \
363         "1:     " TUSER(instr) " %1, [%2], #0\n"                \
364         "2:\n"                                                  \
365         "       .pushsection .text.fixup,\"ax\"\n"              \
366         "       .align  2\n"                                    \
367         "3:     mov     %0, %3\n"                               \
368         "       mov     %1, #0\n"                               \
369         "       b       2b\n"                                   \
370         "       .popsection\n"                                  \
371         "       .pushsection __ex_table,\"a\"\n"                \
372         "       .align  3\n"                                    \
373         "       .long   1b, 3b\n"                               \
374         "       .popsection"                                    \
375         : "+r" (err), "=&r" (x)                                 \
376         : "r" (addr), "i" (-EFAULT)                             \
377         : "cc")
378
379 #define __get_user_asm_byte(x, addr, err)                       \
380         __get_user_asm(x, addr, err, ldrb)
381
382 #if __LINUX_ARM_ARCH__ >= 6
383
384 #define __get_user_asm_half(x, addr, err)                       \
385         __get_user_asm(x, addr, err, ldrh)
386
387 #else
388
389 #ifndef __ARMEB__
390 #define __get_user_asm_half(x, __gu_addr, err)                  \
391 ({                                                              \
392         unsigned long __b1, __b2;                               \
393         __get_user_asm_byte(__b1, __gu_addr, err);              \
394         __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
395         (x) = __b1 | (__b2 << 8);                               \
396 })
397 #else
398 #define __get_user_asm_half(x, __gu_addr, err)                  \
399 ({                                                              \
400         unsigned long __b1, __b2;                               \
401         __get_user_asm_byte(__b1, __gu_addr, err);              \
402         __get_user_asm_byte(__b2, __gu_addr + 1, err);          \
403         (x) = (__b1 << 8) | __b2;                               \
404 })
405 #endif
406
407 #endif /* __LINUX_ARM_ARCH__ >= 6 */
408
409 #define __get_user_asm_word(x, addr, err)                       \
410         __get_user_asm(x, addr, err, ldr)
411 #endif
412
413
414 #define __put_user_switch(x, ptr, __err, __fn)                          \
415         do {                                                            \
416                 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);      \
417                 __typeof__(*(ptr)) __pu_val = (x);                      \
418                 unsigned int __ua_flags;                                \
419                 might_fault();                                          \
420                 __ua_flags = uaccess_save_and_enable();                 \
421                 switch (sizeof(*(ptr))) {                               \
422                 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;      \
423                 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break;      \
424                 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break;      \
425                 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break;      \
426                 default: __err = __put_user_bad(); break;               \
427                 }                                                       \
428                 uaccess_restore(__ua_flags);                            \
429         } while (0)
430
431 #define put_user(x, ptr)                                                \
432 ({                                                                      \
433         int __pu_err = 0;                                               \
434         __put_user_switch((x), (ptr), __pu_err, __put_user_check);      \
435         __pu_err;                                                       \
436 })
437
438 #ifdef CONFIG_CPU_SPECTRE
439 /*
440  * When mitigating Spectre variant 1.1, all accessors need to include
441  * verification of the address space.
442  */
443 #define __put_user(x, ptr) put_user(x, ptr)
444
445 #else
446 #define __put_user(x, ptr)                                              \
447 ({                                                                      \
448         long __pu_err = 0;                                              \
449         __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);    \
450         __pu_err;                                                       \
451 })
452
453 #define __put_user_nocheck(x, __pu_ptr, __err, __size)                  \
454         do {                                                            \
455                 unsigned long __pu_addr = (unsigned long)__pu_ptr;      \
456                 __put_user_nocheck_##__size(x, __pu_addr, __err);       \
457         } while (0)
458
459 #define __put_user_nocheck_1 __put_user_asm_byte
460 #define __put_user_nocheck_2 __put_user_asm_half
461 #define __put_user_nocheck_4 __put_user_asm_word
462 #define __put_user_nocheck_8 __put_user_asm_dword
463
464 #define __put_user_asm(x, __pu_addr, err, instr)                \
465         __asm__ __volatile__(                                   \
466         "1:     " TUSER(instr) " %1, [%2], #0\n"                \
467         "2:\n"                                                  \
468         "       .pushsection .text.fixup,\"ax\"\n"              \
469         "       .align  2\n"                                    \
470         "3:     mov     %0, %3\n"                               \
471         "       b       2b\n"                                   \
472         "       .popsection\n"                                  \
473         "       .pushsection __ex_table,\"a\"\n"                \
474         "       .align  3\n"                                    \
475         "       .long   1b, 3b\n"                               \
476         "       .popsection"                                    \
477         : "+r" (err)                                            \
478         : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
479         : "cc")
480
481 #define __put_user_asm_byte(x, __pu_addr, err)                  \
482         __put_user_asm(x, __pu_addr, err, strb)
483
484 #if __LINUX_ARM_ARCH__ >= 6
485
486 #define __put_user_asm_half(x, __pu_addr, err)                  \
487         __put_user_asm(x, __pu_addr, err, strh)
488
489 #else
490
491 #ifndef __ARMEB__
492 #define __put_user_asm_half(x, __pu_addr, err)                  \
493 ({                                                              \
494         unsigned long __temp = (__force unsigned long)(x);      \
495         __put_user_asm_byte(__temp, __pu_addr, err);            \
496         __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);   \
497 })
498 #else
499 #define __put_user_asm_half(x, __pu_addr, err)                  \
500 ({                                                              \
501         unsigned long __temp = (__force unsigned long)(x);      \
502         __put_user_asm_byte(__temp >> 8, __pu_addr, err);       \
503         __put_user_asm_byte(__temp, __pu_addr + 1, err);        \
504 })
505 #endif
506
507 #endif /* __LINUX_ARM_ARCH__ >= 6 */
508
509 #define __put_user_asm_word(x, __pu_addr, err)                  \
510         __put_user_asm(x, __pu_addr, err, str)
511
512 #ifndef __ARMEB__
513 #define __reg_oper0     "%R2"
514 #define __reg_oper1     "%Q2"
515 #else
516 #define __reg_oper0     "%Q2"
517 #define __reg_oper1     "%R2"
518 #endif
519
520 #define __put_user_asm_dword(x, __pu_addr, err)                 \
521         __asm__ __volatile__(                                   \
522  ARM(   "1:     " TUSER(str) "  " __reg_oper1 ", [%1], #4\n"    ) \
523  ARM(   "2:     " TUSER(str) "  " __reg_oper0 ", [%1]\n"        ) \
524  THUMB( "1:     " TUSER(str) "  " __reg_oper1 ", [%1]\n"        ) \
525  THUMB( "2:     " TUSER(str) "  " __reg_oper0 ", [%1, #4]\n"    ) \
526         "3:\n"                                                  \
527         "       .pushsection .text.fixup,\"ax\"\n"              \
528         "       .align  2\n"                                    \
529         "4:     mov     %0, %3\n"                               \
530         "       b       3b\n"                                   \
531         "       .popsection\n"                                  \
532         "       .pushsection __ex_table,\"a\"\n"                \
533         "       .align  3\n"                                    \
534         "       .long   1b, 4b\n"                               \
535         "       .long   2b, 4b\n"                               \
536         "       .popsection"                                    \
537         : "+r" (err), "+r" (__pu_addr)                          \
538         : "r" (x), "i" (-EFAULT)                                \
539         : "cc")
540
541 #endif /* !CONFIG_CPU_SPECTRE */
542
543 #ifdef CONFIG_MMU
544 extern unsigned long __must_check
545 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
546
547 static inline unsigned long __must_check
548 __arch_copy_from_user(void *to, const void __user *from, unsigned long n)
549 {
550         unsigned int __ua_flags;
551
552         __ua_flags = uaccess_save_and_enable();
553         n = arm_copy_from_user(to, from, n);
554         uaccess_restore(__ua_flags);
555         return n;
556 }
557
558 extern unsigned long __must_check
559 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
560 extern unsigned long __must_check
561 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
562
563 static inline unsigned long __must_check
564 __arch_copy_to_user(void __user *to, const void *from, unsigned long n)
565 {
566 #ifndef CONFIG_UACCESS_WITH_MEMCPY
567         unsigned int __ua_flags;
568         __ua_flags = uaccess_save_and_enable();
569         n = arm_copy_to_user(to, from, n);
570         uaccess_restore(__ua_flags);
571         return n;
572 #else
573         return arm_copy_to_user(to, from, n);
574 #endif
575 }
576
577 extern unsigned long __must_check
578 arm_clear_user(void __user *addr, unsigned long n);
579 extern unsigned long __must_check
580 __clear_user_std(void __user *addr, unsigned long n);
581
582 static inline unsigned long __must_check
583 __clear_user(void __user *addr, unsigned long n)
584 {
585         unsigned int __ua_flags = uaccess_save_and_enable();
586         n = arm_clear_user(addr, n);
587         uaccess_restore(__ua_flags);
588         return n;
589 }
590
591 #else
592 #define __arch_copy_from_user(to, from, n)      \
593                                         (memcpy(to, (void __force *)from, n), 0)
594 #define __arch_copy_to_user(to, from, n)        \
595                                         (memcpy((void __force *)to, from, n), 0)
596 #define __clear_user(addr, n)           (memset((void __force *)addr, 0, n), 0)
597 #endif
598
599 static inline unsigned long __must_check
600 __copy_from_user(void *to, const void __user *from, unsigned long n)
601 {
602         check_object_size(to, n, false);
603         return __arch_copy_from_user(to, from, n);
604 }
605
606 static inline unsigned long __must_check
607 copy_from_user(void *to, const void __user *from, unsigned long n)
608 {
609         unsigned long res = n;
610
611         check_object_size(to, n, false);
612
613         if (likely(access_ok(VERIFY_READ, from, n)))
614                 res = __arch_copy_from_user(to, from, n);
615         if (unlikely(res))
616                 memset(to + (n - res), 0, res);
617         return res;
618 }
619
620 static inline unsigned long __must_check
621 __copy_to_user(void __user *to, const void *from, unsigned long n)
622 {
623         check_object_size(from, n, true);
624
625         return __arch_copy_to_user(to, from, n);
626 }
627
628 static inline unsigned long __must_check
629 copy_to_user(void __user *to, const void *from, unsigned long n)
630 {
631         check_object_size(from, n, true);
632
633         if (access_ok(VERIFY_WRITE, to, n))
634                 n = __arch_copy_to_user(to, from, n);
635         return n;
636 }
637
638 #define __copy_to_user_inatomic __copy_to_user
639 #define __copy_from_user_inatomic __copy_from_user
640
641 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
642 {
643         if (access_ok(VERIFY_WRITE, to, n))
644                 n = __clear_user(to, n);
645         return n;
646 }
647
648 /* These are from lib/ code, and use __get_user() and friends */
649 extern long strncpy_from_user(char *dest, const char __user *src, long count);
650
651 extern __must_check long strlen_user(const char __user *str);
652 extern __must_check long strnlen_user(const char __user *str, long n);
653
654 #endif /* _ASMARM_UACCESS_H */