2 * User address space access functions.
3 * The non-inlined parts of asm-cris/uaccess.h are here.
5 * Copyright (C) 2000, 2003 Axis Communications AB.
7 * Written by Hans-Peter Nilsson.
8 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
11 #include <asm/uaccess.h>
13 /* Asm:s have been tweaked (within the domain of correctness) to give
14 satisfactory results for "gcc version 3.2.1 Axis release R53/1.53-v32".
18 Note that for CRISv32, the PC saved at a bus-fault is the address
19 *at* the faulting instruction, with a special case for instructions
20 in delay slots: then it's the address of the branch. Note also that
21 in contrast to v10, a postincrement in the instruction is *not*
22 performed at a bus-fault; the register is seen having the original
23 value in fault handlers. */
26 /* Copy to userspace. This is based on the memcpy used for
27 kernel-to-kernel copying; see "string.c". */
29 unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
31 /* We want the parameters put in special registers.
32 Make sure the compiler is able to make something useful of this.
33 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
35 FIXME: Comment for old gcc version. Check.
36 If gcc was alright, it really would need no temporaries, and no
37 stack space to save stuff on. */
39 register char *dst __asm__ ("r13") = pdst;
40 register const char *src __asm__ ("r11") = psrc;
41 register int n __asm__ ("r12") = pn;
42 register int retn __asm__ ("r10") = 0;
45 /* When src is aligned but not dst, this makes a few extra needless
46 cycles. I believe it would take as many to check that the
47 re-alignment was unnecessary. */
48 if (((unsigned long) dst & 3) != 0
49 /* Don't align if we wouldn't copy more than a few bytes; so we
50 don't have to check further for overflows. */
53 if ((unsigned long) dst & 1)
55 __asm_copy_to_user_1 (dst, src, retn);
59 if ((unsigned long) dst & 2)
61 __asm_copy_to_user_2 (dst, src, retn);
66 /* Movem is dirt cheap. The overheap is low enough to always use the
67 minimum possible block size as the threshold. */
70 /* For large copies we use 'movem'. */
72 /* It is not optimal to tell the compiler about clobbering any
73 registers; that will move the saving/restoring of those registers
74 to the function prologue/epilogue, and make non-movem sizes
77 ;; Check that the register asm declaration got right. \n\
78 ;; The GCC manual explicitly says TRT will happen. \n\
79 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
83 ;; Save the registers we'll use in the movem process \n\
88 ;; Now we've got this: \n\
93 ;; Update n for the first loop \n\
96 movem [$r11+],$r10 \n\
99 movem $r10,[$r13+] \n\
101 addq 44,$r12 ;; compensate for last loop underflowing n \n\
103 ;; Restore registers from stack \n\
104 movem [$sp+],$r10 \n\
106 .section .fixup,\"ax\" \n\
108 ; When failing on any of the 1..44 bytes in a chunk, we adjust back the \n\
109 ; source pointer and just drop through to the by-16 and by-4 loops to \n\
110 ; get the correct number of failing bytes. This necessarily means a \n\
111 ; few extra exceptions, but invalid user pointers shouldn't happen in \n\
112 ; time-critical code anyway. \n\
117 .section __ex_table,\"a\" \n\
121 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
122 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
128 __asm_copy_to_user_16 (dst, src, retn);
132 /* Having a separate by-four loops cuts down on cache footprint.
133 FIXME: Test with and without; increasing switch to be 0..15. */
136 __asm_copy_to_user_4 (dst, src, retn);
145 __asm_copy_to_user_1 (dst, src, retn);
148 __asm_copy_to_user_2 (dst, src, retn);
151 __asm_copy_to_user_3 (dst, src, retn);
157 EXPORT_SYMBOL(__copy_user);
159 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
160 userland. The return-value is the number of bytes that were
162 unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
165 /* We want the parameters put in special registers.
166 Make sure the compiler is able to make something useful of this.
167 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
169 FIXME: Comment for old gcc version. Check.
170 If gcc was alright, it really would need no temporaries, and no
171 stack space to save stuff on. */
173 register char *dst __asm__ ("r13") = pdst;
174 register const char *src __asm__ ("r11") = psrc;
175 register int n __asm__ ("r12") = pn;
176 register int retn __asm__ ("r10") = 0;
178 /* The best reason to align src is that we then know that a read-fault
179 was for aligned bytes; there's no 1..3 remaining good bytes to
181 if (((unsigned long) src & 3) != 0)
183 if (((unsigned long) src & 1) && n != 0)
185 __asm_copy_from_user_1 (dst, src, retn);
189 if (((unsigned long) src & 2) && n >= 2)
191 __asm_copy_from_user_2 (dst, src, retn);
195 /* We only need one check after the unalignment-adjustments, because
196 if both adjustments were done, either both or neither reference
199 goto copy_exception_bytes;
202 /* Movem is dirt cheap. The overheap is low enough to always use the
203 minimum possible block size as the threshold. */
206 /* It is not optimal to tell the compiler about clobbering any
207 registers; that will move the saving/restoring of those registers
208 to the function prologue/epilogue, and make non-movem sizes
211 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
215 ;; Save the registers we'll use in the movem process \n\
220 ;; Now we've got this: \n\
225 ;; Update n for the first loop \n\
228 movem [$r11+],$r10 \n\
232 movem $r10,[$r13+] \n\
235 addq 44,$r12 ;; compensate for last loop underflowing n \n\
237 ;; Restore registers from stack \n\
238 movem [$sp+],$r10 \n\
239 .section .fixup,\"ax\" \n\
241 ;; Do not jump back into the loop if we fail. For some uses, we get a \n\
242 ;; page fault somewhere on the line. Without checking for page limits, \n\
243 ;; we don't know where, but we need to copy accurately and keep an \n\
244 ;; accurate count; not just clear the whole line. To do that, we fall \n\
245 ;; down in the code below, proceeding with smaller amounts. It should \n\
246 ;; be kept in mind that we have to cater to code like what at one time \n\
247 ;; was in fs/super.c: \n\
248 ;; i = size - copy_from_user((void *)page, data, size); \n\
249 ;; which would cause repeated faults while clearing the remainder of \n\
250 ;; the SIZE bytes at PAGE after the first fault. \n\
251 ;; A caveat here is that we must not fall through from a failing page \n\
252 ;; to a valid page. \n\
255 jump 4b ;; Fall through, pretending the fault didn't happen. \n\
259 .section __ex_table,\"a\" \n\
263 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
264 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
267 /* Either we directly start copying here, using dword copying in a loop,
268 or we copy as much as possible with 'movem' and then the last block
269 (<44 bytes) is copied here. This will work since 'movem' will have
270 updated src, dst and n. (Except with failing src.)
272 Since we want to keep src accurate, we can't use
273 __asm_copy_from_user_N with N != (1, 2, 4); it updates dst and
274 retn, but not src (by design; it's value is ignored elsewhere). */
278 __asm_copy_from_user_4 (dst, src, retn);
282 goto copy_exception_bytes;
285 /* If we get here, there were no memory read faults. */
288 /* These copies are at least "naturally aligned" (so we don't have
289 to check each byte), due to the src alignment code before the
290 movem loop. The *_3 case *will* get the correct count for retn. */
292 /* This case deliberately left in (if you have doubts check the
293 generated assembly code). */
296 __asm_copy_from_user_1 (dst, src, retn);
299 __asm_copy_from_user_2 (dst, src, retn);
302 __asm_copy_from_user_3 (dst, src, retn);
306 /* If we get here, retn correctly reflects the number of failing
310 copy_exception_bytes:
311 /* We already have "retn" bytes cleared, and need to clear the
312 remaining "n" bytes. A non-optimized simple byte-for-byte in-line
313 memset is preferred here, since this isn't speed-critical code and
314 we'd rather have this a leaf-function than calling memset. */
317 for (endp = dst + n; dst < endp; dst++)
323 EXPORT_SYMBOL(__copy_user_zeroing);
325 /* Zero userspace. */
326 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
328 /* We want the parameters put in special registers.
329 Make sure the compiler is able to make something useful of this.
330 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
332 FIXME: Comment for old gcc version. Check.
333 If gcc was alright, it really would need no temporaries, and no
334 stack space to save stuff on. */
336 register char *dst __asm__ ("r13") = pto;
337 register int n __asm__ ("r12") = pn;
338 register int retn __asm__ ("r10") = 0;
341 if (((unsigned long) dst & 3) != 0
342 /* Don't align if we wouldn't copy more than a few bytes. */
345 if ((unsigned long) dst & 1)
347 __asm_clear_1 (dst, retn);
351 if ((unsigned long) dst & 2)
353 __asm_clear_2 (dst, retn);
358 /* Decide which copying method to use.
359 FIXME: This number is from the "ordinary" kernel memset. */
362 /* For large clears we use 'movem' */
364 /* It is not optimal to tell the compiler about clobbering any
365 call-saved registers; that will move the saving/restoring of
366 those registers to the function prologue/epilogue, and make
367 non-movem sizes suboptimal.
369 This method is not foolproof; it assumes that the "asm reg"
370 declarations at the beginning of the function really are used
371 here (beware: they may be moved to temporary registers).
372 This way, we do not have to save/move the registers around into
373 temporaries; we can safely use them straight away.
375 If you want to check that the allocation was right; then
376 check the equalities in the first comment. It should say
377 something like "r13=r13, r11=r11, r12=r12". */
379 .ifnc %0%1%2,$r13$r12$r10 \n\
383 ;; Save the registers we'll clobber in the movem process \n\
384 ;; on the stack. Don't mention them to gcc, it will only be \n\
402 ;; Now we've got this: \n\
406 ;; Update n for the first loop \n\
412 movem $r11,[$r13+] \n\
414 addq 12*4,$r12 ;; compensate for last loop underflowing n \n\
416 ;; Restore registers from stack \n\
417 movem [$sp+],$r10 \n\
419 .section .fixup,\"ax\" \n\
429 .section __ex_table,\"a\" \n\
433 /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
434 /* Inputs */ : "0" (dst), "1" (n), "2" (retn)
435 /* Clobber */ : "r11");
440 __asm_clear_16 (dst, retn);
444 /* Having a separate by-four loops cuts down on cache footprint.
445 FIXME: Test with and without; increasing switch to be 0..15. */
448 __asm_clear_4 (dst, retn);
457 __asm_clear_1 (dst, retn);
460 __asm_clear_2 (dst, retn);
463 __asm_clear_3 (dst, retn);
469 EXPORT_SYMBOL(__do_clear_user);