1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Authors: Bjorn Wesen (bjornw@axis.com)
4 * Hans-Peter Nilsson (hp@axis.com)
7 #ifndef _CRIS_ARCH_UACCESS_H
8 #define _CRIS_ARCH_UACCESS_H
11 * We don't tell gcc that we are accessing memory, but this is OK
12 * because we do not write to any memory gcc knows about, so there
13 * are no aliasing issues.
15 * Note that PC at a fault is the address *after* the faulting
18 #define __put_user_asm(x, addr, err, op) \
19 __asm__ __volatile__( \
22 " .section .fixup,\"ax\"\n" \
26 " .section __ex_table,\"a\"\n" \
30 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
32 #define __put_user_asm_64(x, addr, err) \
33 __asm__ __volatile__( \
34 " move.d %M1,[%2]\n" \
35 "2: move.d %H1,[%2+4]\n" \
37 " .section .fixup,\"ax\"\n" \
41 " .section __ex_table,\"a\"\n" \
46 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
48 /* See comment before __put_user_asm. */
50 #define __get_user_asm(x, addr, err, op) \
51 __asm__ __volatile__( \
54 " .section .fixup,\"ax\"\n" \
59 " .section __ex_table,\"a\"\n" \
62 : "=r" (err), "=r" (x) \
63 : "r" (addr), "g" (-EFAULT), "0" (err))
65 #define __get_user_asm_64(x, addr, err) \
66 __asm__ __volatile__( \
67 " move.d [%2],%M1\n" \
68 "2: move.d [%2+4],%H1\n" \
70 " .section .fixup,\"ax\"\n" \
75 " .section __ex_table,\"a\"\n" \
79 : "=r" (err), "=r" (x) \
80 : "r" (addr), "g" (-EFAULT), "0" (err))
83 * Copy a null terminated string from userspace.
86 * -EFAULT for an exception
87 * count if we hit the buffer limit
88 * bytes copied if we hit a null byte
89 * (without the null byte)
92 __do_strncpy_from_user(char *dst, const char *src, long count)
100 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
103 * This code is deduced from:
108 * while ((*dst++ = (tmp2 = *src++)) != 0
112 * res = count - tmp1;
117 __asm__ __volatile__ (
119 " move.b [%2+],$r9\n"
121 " move.b $r9,[%1+]\n"
125 " move.b [%2+],$r9\n"
130 " .section .fixup,\"ax\"\n"
134 /* There's one address for a fault at the first move, and
135 two possible PC values for a fault at the second move,
136 being a delay-slot filler. However, the branch-target
137 for the second move is the same as the first address.
138 Just so you don't get confused... */
140 " .section __ex_table,\"a\"\n"
144 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
145 : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
151 /* A few copy asms to build up the more complex ones from.
153 Note again, a post-increment is performed regardless of whether a bus
154 fault occurred in that instruction, and PC for a faulted insn is the
155 address *after* the insn. */
157 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
158 __asm__ __volatile__ ( \
161 " .section .fixup,\"ax\"\n" \
165 " .section __ex_table,\"a\"\n" \
168 : "=r" (to), "=r" (from), "=r" (ret) \
169 : "0" (to), "1" (from), "2" (ret) \
172 #define __asm_copy_from_user_1(to, from, ret) \
173 __asm_copy_user_cont(to, from, ret, \
174 " move.b [%1+],$r9\n" \
175 "2: move.b $r9,[%0+]\n", \
179 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
180 __asm_copy_user_cont(to, from, ret, \
181 " move.w [%1+],$r9\n" \
182 "2: move.w $r9,[%0+]\n" COPY, \
183 "3: addq 2,%2\n" FIXUP, \
184 " .dword 2b,3b\n" TENTRY)
186 #define __asm_copy_from_user_2(to, from, ret) \
187 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
189 #define __asm_copy_from_user_3(to, from, ret) \
190 __asm_copy_from_user_2x_cont(to, from, ret, \
191 " move.b [%1+],$r9\n" \
192 "4: move.b $r9,[%0+]\n", \
196 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
197 __asm_copy_user_cont(to, from, ret, \
198 " move.d [%1+],$r9\n" \
199 "2: move.d $r9,[%0+]\n" COPY, \
200 "3: addq 4,%2\n" FIXUP, \
201 " .dword 2b,3b\n" TENTRY)
203 #define __asm_copy_from_user_4(to, from, ret) \
204 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
206 #define __asm_copy_from_user_5(to, from, ret) \
207 __asm_copy_from_user_4x_cont(to, from, ret, \
208 " move.b [%1+],$r9\n" \
209 "4: move.b $r9,[%0+]\n", \
213 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
214 __asm_copy_from_user_4x_cont(to, from, ret, \
215 " move.w [%1+],$r9\n" \
216 "4: move.w $r9,[%0+]\n" COPY, \
219 " .dword 4b,5b\n" TENTRY)
221 #define __asm_copy_from_user_6(to, from, ret) \
222 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
224 #define __asm_copy_from_user_7(to, from, ret) \
225 __asm_copy_from_user_6x_cont(to, from, ret, \
226 " move.b [%1+],$r9\n" \
227 "6: move.b $r9,[%0+]\n", \
231 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
232 __asm_copy_from_user_4x_cont(to, from, ret, \
233 " move.d [%1+],$r9\n" \
234 "4: move.d $r9,[%0+]\n" COPY, \
237 " .dword 4b,5b\n" TENTRY)
239 #define __asm_copy_from_user_8(to, from, ret) \
240 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
242 #define __asm_copy_from_user_9(to, from, ret) \
243 __asm_copy_from_user_8x_cont(to, from, ret, \
244 " move.b [%1+],$r9\n" \
245 "6: move.b $r9,[%0+]\n", \
249 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
250 __asm_copy_from_user_8x_cont(to, from, ret, \
251 " move.w [%1+],$r9\n" \
252 "6: move.w $r9,[%0+]\n" COPY, \
255 " .dword 6b,7b\n" TENTRY)
257 #define __asm_copy_from_user_10(to, from, ret) \
258 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
260 #define __asm_copy_from_user_11(to, from, ret) \
261 __asm_copy_from_user_10x_cont(to, from, ret, \
262 " move.b [%1+],$r9\n" \
263 "8: move.b $r9,[%0+]\n", \
267 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
268 __asm_copy_from_user_8x_cont(to, from, ret, \
269 " move.d [%1+],$r9\n" \
270 "6: move.d $r9,[%0+]\n" COPY, \
273 " .dword 6b,7b\n" TENTRY)
275 #define __asm_copy_from_user_12(to, from, ret) \
276 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
278 #define __asm_copy_from_user_13(to, from, ret) \
279 __asm_copy_from_user_12x_cont(to, from, ret, \
280 " move.b [%1+],$r9\n" \
281 "8: move.b $r9,[%0+]\n", \
285 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
286 __asm_copy_from_user_12x_cont(to, from, ret, \
287 " move.w [%1+],$r9\n" \
288 "8: move.w $r9,[%0+]\n" COPY, \
291 " .dword 8b,9b\n" TENTRY)
293 #define __asm_copy_from_user_14(to, from, ret) \
294 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
296 #define __asm_copy_from_user_15(to, from, ret) \
297 __asm_copy_from_user_14x_cont(to, from, ret, \
298 " move.b [%1+],$r9\n" \
299 "10: move.b $r9,[%0+]\n", \
303 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
304 __asm_copy_from_user_12x_cont(to, from, ret, \
305 " move.d [%1+],$r9\n" \
306 "8: move.d $r9,[%0+]\n" COPY, \
309 " .dword 8b,9b\n" TENTRY)
311 #define __asm_copy_from_user_16(to, from, ret) \
312 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
314 #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
315 __asm_copy_from_user_16x_cont(to, from, ret, \
316 " move.d [%1+],$r9\n" \
317 "10: move.d $r9,[%0+]\n" COPY, \
320 " .dword 10b,11b\n" TENTRY)
322 #define __asm_copy_from_user_20(to, from, ret) \
323 __asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
325 #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
326 __asm_copy_from_user_20x_cont(to, from, ret, \
327 " move.d [%1+],$r9\n" \
328 "12: move.d $r9,[%0+]\n" COPY, \
331 " .dword 12b,13b\n" TENTRY)
333 #define __asm_copy_from_user_24(to, from, ret) \
334 __asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
336 /* And now, the to-user ones. */
338 #define __asm_copy_to_user_1(to, from, ret) \
339 __asm_copy_user_cont(to, from, ret, \
340 " move.b [%1+],$r9\n" \
341 " move.b $r9,[%0+]\n2:\n", \
345 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
346 __asm_copy_user_cont(to, from, ret, \
347 " move.w [%1+],$r9\n" \
348 " move.w $r9,[%0+]\n2:\n" COPY, \
349 "3: addq 2,%2\n" FIXUP, \
350 " .dword 2b,3b\n" TENTRY)
352 #define __asm_copy_to_user_2(to, from, ret) \
353 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
355 #define __asm_copy_to_user_3(to, from, ret) \
356 __asm_copy_to_user_2x_cont(to, from, ret, \
357 " move.b [%1+],$r9\n" \
358 " move.b $r9,[%0+]\n4:\n", \
362 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
363 __asm_copy_user_cont(to, from, ret, \
364 " move.d [%1+],$r9\n" \
365 " move.d $r9,[%0+]\n2:\n" COPY, \
366 "3: addq 4,%2\n" FIXUP, \
367 " .dword 2b,3b\n" TENTRY)
369 #define __asm_copy_to_user_4(to, from, ret) \
370 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
372 #define __asm_copy_to_user_5(to, from, ret) \
373 __asm_copy_to_user_4x_cont(to, from, ret, \
374 " move.b [%1+],$r9\n" \
375 " move.b $r9,[%0+]\n4:\n", \
379 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
380 __asm_copy_to_user_4x_cont(to, from, ret, \
381 " move.w [%1+],$r9\n" \
382 " move.w $r9,[%0+]\n4:\n" COPY, \
383 "5: addq 2,%2\n" FIXUP, \
384 " .dword 4b,5b\n" TENTRY)
386 #define __asm_copy_to_user_6(to, from, ret) \
387 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
389 #define __asm_copy_to_user_7(to, from, ret) \
390 __asm_copy_to_user_6x_cont(to, from, ret, \
391 " move.b [%1+],$r9\n" \
392 " move.b $r9,[%0+]\n6:\n", \
396 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
397 __asm_copy_to_user_4x_cont(to, from, ret, \
398 " move.d [%1+],$r9\n" \
399 " move.d $r9,[%0+]\n4:\n" COPY, \
400 "5: addq 4,%2\n" FIXUP, \
401 " .dword 4b,5b\n" TENTRY)
403 #define __asm_copy_to_user_8(to, from, ret) \
404 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
406 #define __asm_copy_to_user_9(to, from, ret) \
407 __asm_copy_to_user_8x_cont(to, from, ret, \
408 " move.b [%1+],$r9\n" \
409 " move.b $r9,[%0+]\n6:\n", \
413 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
414 __asm_copy_to_user_8x_cont(to, from, ret, \
415 " move.w [%1+],$r9\n" \
416 " move.w $r9,[%0+]\n6:\n" COPY, \
417 "7: addq 2,%2\n" FIXUP, \
418 " .dword 6b,7b\n" TENTRY)
420 #define __asm_copy_to_user_10(to, from, ret) \
421 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
423 #define __asm_copy_to_user_11(to, from, ret) \
424 __asm_copy_to_user_10x_cont(to, from, ret, \
425 " move.b [%1+],$r9\n" \
426 " move.b $r9,[%0+]\n8:\n", \
430 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
431 __asm_copy_to_user_8x_cont(to, from, ret, \
432 " move.d [%1+],$r9\n" \
433 " move.d $r9,[%0+]\n6:\n" COPY, \
434 "7: addq 4,%2\n" FIXUP, \
435 " .dword 6b,7b\n" TENTRY)
437 #define __asm_copy_to_user_12(to, from, ret) \
438 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
440 #define __asm_copy_to_user_13(to, from, ret) \
441 __asm_copy_to_user_12x_cont(to, from, ret, \
442 " move.b [%1+],$r9\n" \
443 " move.b $r9,[%0+]\n8:\n", \
447 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
448 __asm_copy_to_user_12x_cont(to, from, ret, \
449 " move.w [%1+],$r9\n" \
450 " move.w $r9,[%0+]\n8:\n" COPY, \
451 "9: addq 2,%2\n" FIXUP, \
452 " .dword 8b,9b\n" TENTRY)
454 #define __asm_copy_to_user_14(to, from, ret) \
455 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
457 #define __asm_copy_to_user_15(to, from, ret) \
458 __asm_copy_to_user_14x_cont(to, from, ret, \
459 " move.b [%1+],$r9\n" \
460 " move.b $r9,[%0+]\n10:\n", \
464 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
465 __asm_copy_to_user_12x_cont(to, from, ret, \
466 " move.d [%1+],$r9\n" \
467 " move.d $r9,[%0+]\n8:\n" COPY, \
468 "9: addq 4,%2\n" FIXUP, \
469 " .dword 8b,9b\n" TENTRY)
471 #define __asm_copy_to_user_16(to, from, ret) \
472 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
474 #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
475 __asm_copy_to_user_16x_cont(to, from, ret, \
476 " move.d [%1+],$r9\n" \
477 " move.d $r9,[%0+]\n10:\n" COPY, \
478 "11: addq 4,%2\n" FIXUP, \
479 " .dword 10b,11b\n" TENTRY)
481 #define __asm_copy_to_user_20(to, from, ret) \
482 __asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
484 #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
485 __asm_copy_to_user_20x_cont(to, from, ret, \
486 " move.d [%1+],$r9\n" \
487 " move.d $r9,[%0+]\n12:\n" COPY, \
488 "13: addq 4,%2\n" FIXUP, \
489 " .dword 12b,13b\n" TENTRY)
491 #define __asm_copy_to_user_24(to, from, ret) \
492 __asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
494 /* Define a few clearing asms with exception handlers. */
496 /* This frame-asm is like the __asm_copy_user_cont one, but has one less
499 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
500 __asm__ __volatile__ ( \
503 " .section .fixup,\"ax\"\n" \
507 " .section __ex_table,\"a\"\n" \
510 : "=r" (to), "=r" (ret) \
511 : "0" (to), "1" (ret) \
514 #define __asm_clear_1(to, ret) \
515 __asm_clear(to, ret, \
516 " clear.b [%0+]\n2:\n", \
520 #define __asm_clear_2(to, ret) \
521 __asm_clear(to, ret, \
522 " clear.w [%0+]\n2:\n", \
526 #define __asm_clear_3(to, ret) \
527 __asm_clear(to, ret, \
529 "2: clear.b [%0+]\n3:\n", \
535 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
536 __asm_clear(to, ret, \
537 " clear.d [%0+]\n2:\n" CLEAR, \
538 "3: addq 4,%1\n" FIXUP, \
539 " .dword 2b,3b\n" TENTRY)
541 #define __asm_clear_4(to, ret) \
542 __asm_clear_4x_cont(to, ret, "", "", "")
544 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
545 __asm_clear_4x_cont(to, ret, \
546 " clear.d [%0+]\n4:\n" CLEAR, \
547 "5: addq 4,%1\n" FIXUP, \
548 " .dword 4b,5b\n" TENTRY)
550 #define __asm_clear_8(to, ret) \
551 __asm_clear_8x_cont(to, ret, "", "", "")
553 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
554 __asm_clear_8x_cont(to, ret, \
555 " clear.d [%0+]\n6:\n" CLEAR, \
556 "7: addq 4,%1\n" FIXUP, \
557 " .dword 6b,7b\n" TENTRY)
559 #define __asm_clear_12(to, ret) \
560 __asm_clear_12x_cont(to, ret, "", "", "")
562 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
563 __asm_clear_12x_cont(to, ret, \
564 " clear.d [%0+]\n8:\n" CLEAR, \
565 "9: addq 4,%1\n" FIXUP, \
566 " .dword 8b,9b\n" TENTRY)
568 #define __asm_clear_16(to, ret) \
569 __asm_clear_16x_cont(to, ret, "", "", "")
571 #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
572 __asm_clear_16x_cont(to, ret, \
573 " clear.d [%0+]\n10:\n" CLEAR, \
574 "11: addq 4,%1\n" FIXUP, \
575 " .dword 10b,11b\n" TENTRY)
577 #define __asm_clear_20(to, ret) \
578 __asm_clear_20x_cont(to, ret, "", "", "")
580 #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
581 __asm_clear_20x_cont(to, ret, \
582 " clear.d [%0+]\n12:\n" CLEAR, \
583 "13: addq 4,%1\n" FIXUP, \
584 " .dword 12b,13b\n" TENTRY)
586 #define __asm_clear_24(to, ret) \
587 __asm_clear_24x_cont(to, ret, "", "", "")
590 * Return the size of a string (including the ending 0)
592 * Return length of string in userspace including terminating 0
593 * or 0 for error. Return a value greater than N if too long.
597 strnlen_user(const char *s, long n)
601 if (!access_ok(VERIFY_READ, s, 0))
605 * This code is deduced from:
608 * while (tmp1-- > 0 && *s++)
616 __asm__ __volatile__ (
629 " .section .fixup,\"ax\"\n"
634 /* There's one address for a fault at the first move, and
635 two possible PC values for a fault at the second move,
636 being a delay-slot filler. However, the branch-target
637 for the second move is the same as the first address.
638 Just so you don't get confused... */
640 " .section __ex_table,\"a\"\n"
644 : "=r" (res), "=r" (tmp1)