GNU Linux-libre 4.9.318-gnu1
[releases.git] / arch / metag / lib / usercopy.c
1 /*
2  * User address space access functions.
3  * The non-inlined parts of asm-metag/uaccess.h are here.
4  *
5  * Copyright (C) 2006, Imagination Technologies.
6  * Copyright (C) 2000, Axis Communications AB.
7  *
8  * Written by Hans-Peter Nilsson.
9  * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10  * Modified for Meta by Will Newton.
11  */
12
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h>                  /* def of L1_CACHE_BYTES */
16
17 #define USE_RAPF
18 #define RAPF_MIN_BUF_SIZE       (3*L1_CACHE_BYTES)
19
20
21 /* The "double write" in this code is because the Meta will not fault
22  * immediately unless the memory pipe is forced to by e.g. a data stall or
23  * another memory op. The second write should be discarded by the write
24  * combiner so should have virtually no cost.
25  */
26
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
28         asm volatile (                                           \
29                 COPY                                             \
30                 "1:\n"                                           \
31                 "       .section .fixup,\"ax\"\n"                \
32                 FIXUP                                            \
33                 "       MOVT    D1Ar1,#HI(1b)\n"                 \
34                 "       JUMP    D1Ar1,#LO(1b)\n"                 \
35                 "       .previous\n"                             \
36                 "       .section __ex_table,\"a\"\n"             \
37                 TENTRY                                           \
38                 "       .previous\n"                             \
39                 : "=r" (to), "=r" (from), "=r" (ret)             \
40                 : "0" (to), "1" (from), "2" (ret)                \
41                 : "D1Ar1", "memory")
42
43
44 #define __asm_copy_to_user_1(to, from, ret)     \
45         __asm_copy_user_cont(to, from, ret,     \
46                 "       GETB D1Ar1,[%1++]\n"    \
47                 "       SETB [%0],D1Ar1\n"      \
48                 "2:     SETB [%0++],D1Ar1\n",   \
49                 "3:     ADD  %2,%2,#1\n",       \
50                 "       .long 2b,3b\n")
51
52 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
53         __asm_copy_user_cont(to, from, ret,             \
54                 "       GETW D1Ar1,[%1++]\n"            \
55                 "       SETW [%0],D1Ar1\n"              \
56                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
57                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
58                 "       .long 2b,3b\n" TENTRY)
59
60 #define __asm_copy_to_user_2(to, from, ret) \
61         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
62
63 #define __asm_copy_to_user_3(to, from, ret) \
64         __asm_copy_to_user_2x_cont(to, from, ret,       \
65                 "       GETB D1Ar1,[%1++]\n"            \
66                 "       SETB [%0],D1Ar1\n"              \
67                 "4:     SETB [%0++],D1Ar1\n",           \
68                 "5:     ADD  %2,%2,#1\n",               \
69                 "       .long 4b,5b\n")
70
71 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
72         __asm_copy_user_cont(to, from, ret,             \
73                 "       GETD D1Ar1,[%1++]\n"            \
74                 "       SETD [%0],D1Ar1\n"              \
75                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
76                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
77                 "       .long 2b,3b\n" TENTRY)
78
79 #define __asm_copy_to_user_4(to, from, ret) \
80         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
81
82 #define __asm_copy_to_user_5(to, from, ret) \
83         __asm_copy_to_user_4x_cont(to, from, ret,       \
84                 "       GETB D1Ar1,[%1++]\n"            \
85                 "       SETB [%0],D1Ar1\n"              \
86                 "4:     SETB [%0++],D1Ar1\n",           \
87                 "5:     ADD  %2,%2,#1\n",               \
88                 "       .long 4b,5b\n")
89
90 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
91         __asm_copy_to_user_4x_cont(to, from, ret,       \
92                 "       GETW D1Ar1,[%1++]\n"            \
93                 "       SETW [%0],D1Ar1\n"              \
94                 "4:     SETW [%0++],D1Ar1\n" COPY,      \
95                 "5:     ADD  %2,%2,#2\n" FIXUP,         \
96                 "       .long 4b,5b\n" TENTRY)
97
98 #define __asm_copy_to_user_6(to, from, ret) \
99         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
100
101 #define __asm_copy_to_user_7(to, from, ret) \
102         __asm_copy_to_user_6x_cont(to, from, ret,       \
103                 "       GETB D1Ar1,[%1++]\n"            \
104                 "       SETB [%0],D1Ar1\n"              \
105                 "6:     SETB [%0++],D1Ar1\n",           \
106                 "7:     ADD  %2,%2,#1\n",               \
107                 "       .long 6b,7b\n")
108
109 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
110         __asm_copy_to_user_4x_cont(to, from, ret,       \
111                 "       GETD D1Ar1,[%1++]\n"            \
112                 "       SETD [%0],D1Ar1\n"              \
113                 "4:     SETD [%0++],D1Ar1\n" COPY,      \
114                 "5:     ADD  %2,%2,#4\n"  FIXUP,        \
115                 "       .long 4b,5b\n" TENTRY)
116
117 #define __asm_copy_to_user_8(to, from, ret) \
118         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
119
120 #define __asm_copy_to_user_9(to, from, ret) \
121         __asm_copy_to_user_8x_cont(to, from, ret,       \
122                 "       GETB D1Ar1,[%1++]\n"            \
123                 "       SETB [%0],D1Ar1\n"              \
124                 "6:     SETB [%0++],D1Ar1\n",           \
125                 "7:     ADD  %2,%2,#1\n",               \
126                 "       .long 6b,7b\n")
127
128 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
129         __asm_copy_to_user_8x_cont(to, from, ret,       \
130                 "       GETW D1Ar1,[%1++]\n"            \
131                 "       SETW [%0],D1Ar1\n"              \
132                 "6:     SETW [%0++],D1Ar1\n" COPY,      \
133                 "7:     ADD  %2,%2,#2\n" FIXUP,         \
134                 "       .long 6b,7b\n" TENTRY)
135
136 #define __asm_copy_to_user_10(to, from, ret) \
137         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
138
139 #define __asm_copy_to_user_11(to, from, ret) \
140         __asm_copy_to_user_10x_cont(to, from, ret,      \
141                 "       GETB D1Ar1,[%1++]\n"            \
142                 "       SETB [%0],D1Ar1\n"              \
143                 "8:     SETB [%0++],D1Ar1\n",           \
144                 "9:     ADD  %2,%2,#1\n",               \
145                 "       .long 8b,9b\n")
146
147 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
148         __asm_copy_to_user_8x_cont(to, from, ret,       \
149                 "       GETD D1Ar1,[%1++]\n"            \
150                 "       SETD [%0],D1Ar1\n"              \
151                 "6:     SETD [%0++],D1Ar1\n" COPY,      \
152                 "7:     ADD  %2,%2,#4\n" FIXUP,         \
153                 "       .long 6b,7b\n" TENTRY)
154 #define __asm_copy_to_user_12(to, from, ret) \
155         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
156
157 #define __asm_copy_to_user_13(to, from, ret) \
158         __asm_copy_to_user_12x_cont(to, from, ret,      \
159                 "       GETB D1Ar1,[%1++]\n"            \
160                 "       SETB [%0],D1Ar1\n"              \
161                 "8:     SETB [%0++],D1Ar1\n",           \
162                 "9:     ADD  %2,%2,#1\n",               \
163                 "       .long 8b,9b\n")
164
165 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
166         __asm_copy_to_user_12x_cont(to, from, ret,      \
167                 "       GETW D1Ar1,[%1++]\n"            \
168                 "       SETW [%0],D1Ar1\n"              \
169                 "8:     SETW [%0++],D1Ar1\n" COPY,      \
170                 "9:     ADD  %2,%2,#2\n" FIXUP,         \
171                 "       .long 8b,9b\n" TENTRY)
172
173 #define __asm_copy_to_user_14(to, from, ret) \
174         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
175
176 #define __asm_copy_to_user_15(to, from, ret) \
177         __asm_copy_to_user_14x_cont(to, from, ret,      \
178                 "       GETB D1Ar1,[%1++]\n"            \
179                 "       SETB [%0],D1Ar1\n"              \
180                 "10:    SETB [%0++],D1Ar1\n",           \
181                 "11:    ADD  %2,%2,#1\n",               \
182                 "       .long 10b,11b\n")
183
184 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
185         __asm_copy_to_user_12x_cont(to, from, ret,      \
186                 "       GETD D1Ar1,[%1++]\n"            \
187                 "       SETD [%0],D1Ar1\n"              \
188                 "8:     SETD [%0++],D1Ar1\n" COPY,      \
189                 "9:     ADD  %2,%2,#4\n" FIXUP,         \
190                 "       .long 8b,9b\n" TENTRY)
191
192 #define __asm_copy_to_user_16(to, from, ret) \
193                 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
194
195 #define __asm_copy_to_user_8x64(to, from, ret) \
196         asm volatile (                                  \
197                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
198                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
199                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
200                 "1:\n"                                  \
201                 "       .section .fixup,\"ax\"\n"       \
202                 "3:     ADD  %2,%2,#8\n"                \
203                 "       MOVT    D0Ar2,#HI(1b)\n"        \
204                 "       JUMP    D0Ar2,#LO(1b)\n"        \
205                 "       .previous\n"                    \
206                 "       .section __ex_table,\"a\"\n"    \
207                 "       .long 2b,3b\n"                  \
208                 "       .previous\n"                    \
209                 : "=r" (to), "=r" (from), "=r" (ret)    \
210                 : "0" (to), "1" (from), "2" (ret)       \
211                 : "D1Ar1", "D0Ar2", "memory")
212
213 /*
214  *      optimized copying loop using RAPF when 64 bit aligned
215  *
216  *      n               will be automatically decremented inside the loop
217  *      ret             will be left intact. if error occurs we will rewind
218  *                      so that the original non optimized code will fill up
219  *                      this value correctly.
220  *
221  *      on fault:
222  *              >       n will hold total number of uncopied bytes
223  *
224  *              >       {'to','from'} will be rewind back so that
225  *                      the non-optimized code will do the proper fix up
226  *
227  *      DCACHE drops the cacheline which helps in reducing cache
228  *      pollution.
229  *
230  *      We introduce an extra SETL at the end of the loop to
231  *      ensure we don't fall off the loop before we catch all
232  *      erros.
233  *
234  *      NOTICE:
235  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
236  *              since we're using M{S,G}ETL, a fault might happen at
237  *              any address in the middle of M{S,G}ETL causing
238  *              the value of LSM_STEP to be incorrect which can
239  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
240  *              ie: if LSM_STEP was 1 when a fault occurs, the
241  *              next call to M{S,G}ET{L,D} will skip the first
242  *              copy/getting as it think that the first 1 has already
243  *              been done.
244  *
245  */
246 #define __asm_copy_user_64bit_rapf_loop(                                \
247                 to, from, ret, n, id, FIXUP)                            \
248         asm volatile (                                                  \
249                 ".balign 8\n"                                           \
250                 "MOV    RAPF, %1\n"                                     \
251                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
252                 "MOV    D0Ar6, #0\n"                                    \
253                 "LSR    D1Ar5, %3, #6\n"                                \
254                 "SUB    TXRPT, D1Ar5, #2\n"                             \
255                 "MOV    RAPF, %1\n"                                     \
256                 "$Lloop"id":\n"                                         \
257                 "ADD    RAPF, %1, #64\n"                                \
258                 "21:\n"                                                 \
259                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
260                 "22:\n"                                                 \
261                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
262                 "23:\n"                                                 \
263                 "SUB    %3, %3, #32\n"                                  \
264                 "24:\n"                                                 \
265                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
266                 "25:\n"                                                 \
267                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
268                 "26:\n"                                                 \
269                 "SUB    %3, %3, #32\n"                                  \
270                 "DCACHE [%1+#-64], D0Ar6\n"                             \
271                 "BR     $Lloop"id"\n"                                   \
272                                                                         \
273                 "MOV    RAPF, %1\n"                                     \
274                 "27:\n"                                                 \
275                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
276                 "28:\n"                                                 \
277                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
278                 "29:\n"                                                 \
279                 "SUB    %3, %3, #32\n"                                  \
280                 "30:\n"                                                 \
281                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
282                 "31:\n"                                                 \
283                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
284                 "32:\n"                                                 \
285                 "SUB    %0, %0, #8\n"                                   \
286                 "33:\n"                                                 \
287                 "SETL   [%0++], D0.7, D1.7\n"                           \
288                 "SUB    %3, %3, #32\n"                                  \
289                 "1:"                                                    \
290                 "DCACHE [%1+#-64], D0Ar6\n"                             \
291                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
292                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
293                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
294                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
295                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
296                 "SUB A0StP, A0StP, #40\n"                               \
297                 "       .section .fixup,\"ax\"\n"                       \
298                 "4:\n"                                                  \
299                 "       ADD     %0, %0, #8\n"                           \
300                 "3:\n"                                                  \
301                 "       MOV     D0Ar2, TXSTATUS\n"                      \
302                 "       MOV     D1Ar1, TXSTATUS\n"                      \
303                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
304                 "       MOV     TXSTATUS, D1Ar1\n"                      \
305                         FIXUP                                           \
306                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
307                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
308                 "       .previous\n"                                    \
309                 "       .section __ex_table,\"a\"\n"                    \
310                 "       .long 21b,3b\n"                                 \
311                 "       .long 22b,3b\n"                                 \
312                 "       .long 23b,3b\n"                                 \
313                 "       .long 24b,3b\n"                                 \
314                 "       .long 25b,3b\n"                                 \
315                 "       .long 26b,3b\n"                                 \
316                 "       .long 27b,3b\n"                                 \
317                 "       .long 28b,3b\n"                                 \
318                 "       .long 29b,3b\n"                                 \
319                 "       .long 30b,3b\n"                                 \
320                 "       .long 31b,3b\n"                                 \
321                 "       .long 32b,3b\n"                                 \
322                 "       .long 33b,4b\n"                                 \
323                 "       .previous\n"                                    \
324                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
325                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
326                 : "D1Ar1", "D0Ar2", "cc", "memory")
327
328 /*      rewind 'to' and 'from'  pointers when a fault occurs
329  *
330  *      Rationale:
331  *              A fault always occurs on writing to user buffer. A fault
332  *              is at a single address, so we need to rewind by only 4
333  *              bytes.
334  *              Since we do a complete read from kernel buffer before
335  *              writing, we need to rewind it also. The amount to be
336  *              rewind equals the number of faulty writes in MSETD
337  *              which is: [4 - (LSM_STEP-1)]*8
338  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
339  *              and stored in D0Ar2
340  *
341  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
342  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
343  *                      a fault happens at the 4th write, LSM_STEP will be 0
344  *                      instead of 4. The code copes with that.
345  *
346  *              n is updated by the number of successful writes, which is:
347  *              n = n - (LSM_STEP-1)*8
348  */
349 #define __asm_copy_to_user_64bit_rapf_loop(to,  from, ret, n, id)\
350         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
351                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
352                 "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
353                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
354                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
355                 "MOV    D1Ar1, #4\n"                                    \
356                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
357                 "LSL    D0Ar2, D0Ar2, #3\n"                             \
358                 "LSL    D1Ar1, D1Ar1, #3\n"                             \
359                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
360                 "SUB    %0, %0, #8\n"                                   \
361                 "SUB    %1,     %1,D0Ar2\n"                             \
362                 "SUB    %3, %3, D1Ar1\n")
363
364 /*
365  *      optimized copying loop using RAPF when 32 bit aligned
366  *
367  *      n               will be automatically decremented inside the loop
368  *      ret             will be left intact. if error occurs we will rewind
369  *                      so that the original non optimized code will fill up
370  *                      this value correctly.
371  *
372  *      on fault:
373  *              >       n will hold total number of uncopied bytes
374  *
375  *              >       {'to','from'} will be rewind back so that
376  *                      the non-optimized code will do the proper fix up
377  *
378  *      DCACHE drops the cacheline which helps in reducing cache
379  *      pollution.
380  *
381  *      We introduce an extra SETD at the end of the loop to
382  *      ensure we don't fall off the loop before we catch all
383  *      erros.
384  *
385  *      NOTICE:
386  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
387  *              since we're using M{S,G}ETL, a fault might happen at
388  *              any address in the middle of M{S,G}ETL causing
389  *              the value of LSM_STEP to be incorrect which can
390  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
391  *              ie: if LSM_STEP was 1 when a fault occurs, the
392  *              next call to M{S,G}ET{L,D} will skip the first
393  *              copy/getting as it think that the first 1 has already
394  *              been done.
395  *
396  */
397 #define __asm_copy_user_32bit_rapf_loop(                                \
398                         to,     from, ret, n, id, FIXUP)                \
399         asm volatile (                                                  \
400                 ".balign 8\n"                                           \
401                 "MOV    RAPF, %1\n"                                     \
402                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
403                 "MOV    D0Ar6, #0\n"                                    \
404                 "LSR    D1Ar5, %3, #6\n"                                \
405                 "SUB    TXRPT, D1Ar5, #2\n"                             \
406                 "MOV    RAPF, %1\n"                                     \
407         "$Lloop"id":\n"                                                 \
408                 "ADD    RAPF, %1, #64\n"                                \
409                 "21:\n"                                                 \
410                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
411                 "22:\n"                                                 \
412                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
413                 "23:\n"                                                 \
414                 "SUB    %3, %3, #16\n"                                  \
415                 "24:\n"                                                 \
416                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
417                 "25:\n"                                                 \
418                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
419                 "26:\n"                                                 \
420                 "SUB    %3, %3, #16\n"                                  \
421                 "27:\n"                                                 \
422                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
423                 "28:\n"                                                 \
424                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
425                 "29:\n"                                                 \
426                 "SUB    %3, %3, #16\n"                                  \
427                 "30:\n"                                                 \
428                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
429                 "31:\n"                                                 \
430                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
431                 "32:\n"                                                 \
432                 "SUB    %3, %3, #16\n"                                  \
433                 "DCACHE [%1+#-64], D0Ar6\n"                             \
434                 "BR     $Lloop"id"\n"                                   \
435                                                                         \
436                 "MOV    RAPF, %1\n"                                     \
437                 "33:\n"                                                 \
438                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
439                 "34:\n"                                                 \
440                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
441                 "35:\n"                                                 \
442                 "SUB    %3, %3, #16\n"                                  \
443                 "36:\n"                                                 \
444                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
445                 "37:\n"                                                 \
446                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
447                 "38:\n"                                                 \
448                 "SUB    %3, %3, #16\n"                                  \
449                 "39:\n"                                                 \
450                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
451                 "40:\n"                                                 \
452                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
453                 "41:\n"                                                 \
454                 "SUB    %3, %3, #16\n"                                  \
455                 "42:\n"                                                 \
456                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
457                 "43:\n"                                                 \
458                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
459                 "44:\n"                                                 \
460                 "SUB    %0, %0, #4\n"                                   \
461                 "45:\n"                                                 \
462                 "SETD   [%0++], D0.7\n"                                 \
463                 "SUB    %3, %3, #16\n"                                  \
464                 "1:"                                                    \
465                 "DCACHE [%1+#-64], D0Ar6\n"                             \
466                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
467                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
468                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
469                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
470                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
471                 "SUB A0StP, A0StP, #40\n"                               \
472                 "       .section .fixup,\"ax\"\n"                       \
473                 "4:\n"                                                  \
474                 "       ADD             %0, %0, #4\n"                   \
475                 "3:\n"                                                  \
476                 "       MOV     D0Ar2, TXSTATUS\n"                      \
477                 "       MOV     D1Ar1, TXSTATUS\n"                      \
478                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
479                 "       MOV     TXSTATUS, D1Ar1\n"                      \
480                         FIXUP                                           \
481                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
482                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
483                 "       .previous\n"                                    \
484                 "       .section __ex_table,\"a\"\n"                    \
485                 "       .long 21b,3b\n"                                 \
486                 "       .long 22b,3b\n"                                 \
487                 "       .long 23b,3b\n"                                 \
488                 "       .long 24b,3b\n"                                 \
489                 "       .long 25b,3b\n"                                 \
490                 "       .long 26b,3b\n"                                 \
491                 "       .long 27b,3b\n"                                 \
492                 "       .long 28b,3b\n"                                 \
493                 "       .long 29b,3b\n"                                 \
494                 "       .long 30b,3b\n"                                 \
495                 "       .long 31b,3b\n"                                 \
496                 "       .long 32b,3b\n"                                 \
497                 "       .long 33b,3b\n"                                 \
498                 "       .long 34b,3b\n"                                 \
499                 "       .long 35b,3b\n"                                 \
500                 "       .long 36b,3b\n"                                 \
501                 "       .long 37b,3b\n"                                 \
502                 "       .long 38b,3b\n"                                 \
503                 "       .long 39b,3b\n"                                 \
504                 "       .long 40b,3b\n"                                 \
505                 "       .long 41b,3b\n"                                 \
506                 "       .long 42b,3b\n"                                 \
507                 "       .long 43b,3b\n"                                 \
508                 "       .long 44b,3b\n"                                 \
509                 "       .long 45b,4b\n"                                 \
510                 "       .previous\n"                                    \
511                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
512                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
513                 : "D1Ar1", "D0Ar2", "cc", "memory")
514
515 /*      rewind 'to' and 'from'  pointers when a fault occurs
516  *
517  *      Rationale:
518  *              A fault always occurs on writing to user buffer. A fault
519  *              is at a single address, so we need to rewind by only 4
520  *              bytes.
521  *              Since we do a complete read from kernel buffer before
522  *              writing, we need to rewind it also. The amount to be
523  *              rewind equals the number of faulty writes in MSETD
524  *              which is: [4 - (LSM_STEP-1)]*4
525  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
526  *              and stored in D0Ar2
527  *
528  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
529  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
530  *                      a fault happens at the 4th write, LSM_STEP will be 0
531  *                      instead of 4. The code copes with that.
532  *
533  *              n is updated by the number of successful writes, which is:
534  *              n = n - (LSM_STEP-1)*4
535  */
536 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
537         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
538                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
539                 "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
540                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
541                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
542                 "MOV    D1Ar1, #4\n"                                    \
543                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
544                 "LSL    D0Ar2, D0Ar2, #2\n"                             \
545                 "LSL    D1Ar1, D1Ar1, #2\n"                             \
546                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
547                 "SUB    %0, %0, #4\n"                                   \
548                 "SUB    %1,     %1,     D0Ar2\n"                        \
549                 "SUB    %3, %3, D1Ar1\n")
550
551 unsigned long __copy_user(void __user *pdst, const void *psrc,
552                           unsigned long n)
553 {
554         register char __user *dst asm ("A0.2") = pdst;
555         register const char *src asm ("A1.2") = psrc;
556         unsigned long retn = 0;
557
558         if (n == 0)
559                 return 0;
560
561         if ((unsigned long) src & 1) {
562                 __asm_copy_to_user_1(dst, src, retn);
563                 n--;
564                 if (retn)
565                         return retn + n;
566         }
567         if ((unsigned long) dst & 1) {
568                 /* Worst case - byte copy */
569                 while (n > 0) {
570                         __asm_copy_to_user_1(dst, src, retn);
571                         n--;
572                         if (retn)
573                                 return retn + n;
574                 }
575         }
576         if (((unsigned long) src & 2) && n >= 2) {
577                 __asm_copy_to_user_2(dst, src, retn);
578                 n -= 2;
579                 if (retn)
580                         return retn + n;
581         }
582         if ((unsigned long) dst & 2) {
583                 /* Second worst case - word copy */
584                 while (n >= 2) {
585                         __asm_copy_to_user_2(dst, src, retn);
586                         n -= 2;
587                         if (retn)
588                                 return retn + n;
589                 }
590         }
591
592 #ifdef USE_RAPF
593         /* 64 bit copy loop */
594         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
595                 if (n >= RAPF_MIN_BUF_SIZE) {
596                         /* copy user using 64 bit rapf copy */
597                         __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
598                                                         n, "64cu");
599                 }
600                 while (n >= 8) {
601                         __asm_copy_to_user_8x64(dst, src, retn);
602                         n -= 8;
603                         if (retn)
604                                 return retn + n;
605                 }
606         }
607         if (n >= RAPF_MIN_BUF_SIZE) {
608                 /* copy user using 32 bit rapf copy */
609                 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
610         }
611 #else
612         /* 64 bit copy loop */
613         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
614                 while (n >= 8) {
615                         __asm_copy_to_user_8x64(dst, src, retn);
616                         n -= 8;
617                         if (retn)
618                                 return retn + n;
619                 }
620         }
621 #endif
622
623         while (n >= 16) {
624                 __asm_copy_to_user_16(dst, src, retn);
625                 n -= 16;
626                 if (retn)
627                         return retn + n;
628         }
629
630         while (n >= 4) {
631                 __asm_copy_to_user_4(dst, src, retn);
632                 n -= 4;
633                 if (retn)
634                         return retn + n;
635         }
636
637         switch (n) {
638         case 0:
639                 break;
640         case 1:
641                 __asm_copy_to_user_1(dst, src, retn);
642                 break;
643         case 2:
644                 __asm_copy_to_user_2(dst, src, retn);
645                 break;
646         case 3:
647                 __asm_copy_to_user_3(dst, src, retn);
648                 break;
649         }
650
651         /*
652          * If we get here, retn correctly reflects the number of failing
653          * bytes.
654          */
655         return retn;
656 }
657 EXPORT_SYMBOL(__copy_user);
658
659 #define __asm_copy_from_user_1(to, from, ret) \
660         __asm_copy_user_cont(to, from, ret,     \
661                 "       GETB D1Ar1,[%1++]\n"    \
662                 "2:     SETB [%0++],D1Ar1\n",   \
663                 "3:     ADD  %2,%2,#1\n",       \
664                 "       .long 2b,3b\n")
665
666 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
667         __asm_copy_user_cont(to, from, ret,             \
668                 "       GETW D1Ar1,[%1++]\n"            \
669                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
670                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
671                 "       .long 2b,3b\n" TENTRY)
672
673 #define __asm_copy_from_user_2(to, from, ret) \
674         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
675
676 #define __asm_copy_from_user_3(to, from, ret)           \
677         __asm_copy_from_user_2x_cont(to, from, ret,     \
678                 "       GETB D1Ar1,[%1++]\n"            \
679                 "4:     SETB [%0++],D1Ar1\n",           \
680                 "5:     ADD  %2,%2,#1\n",               \
681                 "       .long 4b,5b\n")
682
683 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
684         __asm_copy_user_cont(to, from, ret,             \
685                 "       GETD D1Ar1,[%1++]\n"            \
686                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
687                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
688                 "       .long 2b,3b\n" TENTRY)
689
690 #define __asm_copy_from_user_4(to, from, ret) \
691         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
692
693 #define __asm_copy_from_user_8x64(to, from, ret) \
694         asm volatile (                          \
695                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
696                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
697                 "1:\n"                                  \
698                 "       .section .fixup,\"ax\"\n"       \
699                 "3:     ADD  %2,%2,#8\n"                \
700                 "       MOVT    D0Ar2,#HI(1b)\n"        \
701                 "       JUMP    D0Ar2,#LO(1b)\n"        \
702                 "       .previous\n"                    \
703                 "       .section __ex_table,\"a\"\n"    \
704                 "       .long 2b,3b\n"                  \
705                 "       .previous\n"                    \
706                 : "=a" (to), "=r" (from), "=r" (ret)    \
707                 : "0" (to), "1" (from), "2" (ret)       \
708                 : "D1Ar1", "D0Ar2", "memory")
709
710 /*      rewind 'from' pointer when a fault occurs
711  *
712  *      Rationale:
713  *              A fault occurs while reading from user buffer, which is the
714  *              source.
715  *              Since we don't write to kernel buffer until we read first,
716  *              the kernel buffer is at the right state and needn't be
717  *              corrected, but the source must be rewound to the beginning of
718  *              the block, which is LSM_STEP*8 bytes.
719  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
720  *              and stored in D0Ar2
721  *
722  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
723  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
724  *                      a fault happens at the 4th write, LSM_STEP will be 0
725  *                      instead of 4. The code copes with that.
726  */
727 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)      \
728         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
729                 "LSR    D0Ar2, D0Ar2, #5\n"                             \
730                 "ANDS   D0Ar2, D0Ar2, #0x38\n"                          \
731                 "ADDZ   D0Ar2, D0Ar2, #32\n"                            \
732                 "SUB    %1, %1, D0Ar2\n")
733
734 /*      rewind 'from' pointer when a fault occurs
735  *
736  *      Rationale:
737  *              A fault occurs while reading from user buffer, which is the
738  *              source.
739  *              Since we don't write to kernel buffer until we read first,
740  *              the kernel buffer is at the right state and needn't be
741  *              corrected, but the source must be rewound to the beginning of
742  *              the block, which is LSM_STEP*4 bytes.
743  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
744  *              and stored in D0Ar2
745  *
746  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
747  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
748  *                      a fault happens at the 4th write, LSM_STEP will be 0
749  *                      instead of 4. The code copes with that.
750  */
751 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)      \
752         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
753                 "LSR    D0Ar2, D0Ar2, #6\n"                             \
754                 "ANDS   D0Ar2, D0Ar2, #0x1c\n"                          \
755                 "ADDZ   D0Ar2, D0Ar2, #16\n"                            \
756                 "SUB    %1, %1, D0Ar2\n")
757
758
759 /*
760  * Copy from user to kernel. The return-value is the number of bytes that were
761  * inaccessible.
762  */
763 unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
764                                  unsigned long n)
765 {
766         register char *dst asm ("A0.2") = pdst;
767         register const char __user *src asm ("A1.2") = psrc;
768         unsigned long retn = 0;
769
770         if (n == 0)
771                 return 0;
772
773         if ((unsigned long) src & 1) {
774                 __asm_copy_from_user_1(dst, src, retn);
775                 n--;
776                 if (retn)
777                         return retn + n;
778         }
779         if ((unsigned long) dst & 1) {
780                 /* Worst case - byte copy */
781                 while (n > 0) {
782                         __asm_copy_from_user_1(dst, src, retn);
783                         n--;
784                         if (retn)
785                                 return retn + n;
786                 }
787         }
788         if (((unsigned long) src & 2) && n >= 2) {
789                 __asm_copy_from_user_2(dst, src, retn);
790                 n -= 2;
791                 if (retn)
792                         return retn + n;
793         }
794         if ((unsigned long) dst & 2) {
795                 /* Second worst case - word copy */
796                 while (n >= 2) {
797                         __asm_copy_from_user_2(dst, src, retn);
798                         n -= 2;
799                         if (retn)
800                                 return retn + n;
801                 }
802         }
803
804 #ifdef USE_RAPF
805         /* 64 bit copy loop */
806         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
807                 if (n >= RAPF_MIN_BUF_SIZE) {
808                         /* Copy using fast 64bit rapf */
809                         __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
810                                                         n, "64cuz");
811                 }
812                 while (n >= 8) {
813                         __asm_copy_from_user_8x64(dst, src, retn);
814                         n -= 8;
815                         if (retn)
816                                 return retn + n;
817                 }
818         }
819
820         if (n >= RAPF_MIN_BUF_SIZE) {
821                 /* Copy using fast 32bit rapf */
822                 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
823                                                 n, "32cuz");
824         }
825 #else
826         /* 64 bit copy loop */
827         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
828                 while (n >= 8) {
829                         __asm_copy_from_user_8x64(dst, src, retn);
830                         n -= 8;
831                         if (retn)
832                                 return retn + n;
833                 }
834         }
835 #endif
836
837         while (n >= 4) {
838                 __asm_copy_from_user_4(dst, src, retn);
839                 n -= 4;
840
841                 if (retn)
842                         return retn + n;
843         }
844
845         /* If we get here, there were no memory read faults.  */
846         switch (n) {
847                 /* These copies are at least "naturally aligned" (so we don't
848                    have to check each byte), due to the src alignment code.
849                    The *_3 case *will* get the correct count for retn.  */
850         case 0:
851                 /* This case deliberately left in (if you have doubts check the
852                    generated assembly code).  */
853                 break;
854         case 1:
855                 __asm_copy_from_user_1(dst, src, retn);
856                 break;
857         case 2:
858                 __asm_copy_from_user_2(dst, src, retn);
859                 break;
860         case 3:
861                 __asm_copy_from_user_3(dst, src, retn);
862                 break;
863         }
864
865         /* If we get here, retn correctly reflects the number of failing
866            bytes.  */
867         return retn;
868 }
869 EXPORT_SYMBOL(raw_copy_from_user);
870
871 #define __asm_clear_8x64(to, ret) \
872         asm volatile (                                  \
873                 "       MOV  D0Ar2,#0\n"                \
874                 "       MOV  D1Ar1,#0\n"                \
875                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
876                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
877                 "1:\n"                                  \
878                 "       .section .fixup,\"ax\"\n"       \
879                 "3:     ADD  %1,%1,#8\n"                \
880                 "       MOVT    D0Ar2,#HI(1b)\n"        \
881                 "       JUMP    D0Ar2,#LO(1b)\n"        \
882                 "       .previous\n"                    \
883                 "       .section __ex_table,\"a\"\n"    \
884                 "       .long 2b,3b\n"                  \
885                 "       .previous\n"                    \
886                 : "=r" (to), "=r" (ret) \
887                 : "0" (to), "1" (ret)   \
888                 : "D1Ar1", "D0Ar2", "memory")
889
890 /* Zero userspace.  */
891
892 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
893         asm volatile (                                  \
894                 "       MOV D1Ar1,#0\n"                 \
895                         CLEAR                           \
896                 "1:\n"                                  \
897                 "       .section .fixup,\"ax\"\n"       \
898                         FIXUP                           \
899                 "       MOVT    D1Ar1,#HI(1b)\n"        \
900                 "       JUMP    D1Ar1,#LO(1b)\n"        \
901                 "       .previous\n"                    \
902                 "       .section __ex_table,\"a\"\n"    \
903                         TENTRY                          \
904                 "       .previous"                      \
905                 : "=r" (to), "=r" (ret)                 \
906                 : "0" (to), "1" (ret)                   \
907                 : "D1Ar1", "memory")
908
909 #define __asm_clear_1(to, ret) \
910         __asm_clear(to, ret,                    \
911                 "       SETB [%0],D1Ar1\n"      \
912                 "2:     SETB [%0++],D1Ar1\n",   \
913                 "3:     ADD  %1,%1,#1\n",       \
914                 "       .long 2b,3b\n")
915
916 #define __asm_clear_2(to, ret) \
917         __asm_clear(to, ret,                    \
918                 "       SETW [%0],D1Ar1\n"      \
919                 "2:     SETW [%0++],D1Ar1\n",   \
920                 "3:     ADD  %1,%1,#2\n",       \
921                 "       .long 2b,3b\n")
922
923 #define __asm_clear_3(to, ret) \
924         __asm_clear(to, ret,                    \
925                  "2:    SETW [%0++],D1Ar1\n"    \
926                  "      SETB [%0],D1Ar1\n"      \
927                  "3:    SETB [%0++],D1Ar1\n",   \
928                  "4:    ADD  %1,%1,#2\n"        \
929                  "5:    ADD  %1,%1,#1\n",       \
930                  "      .long 2b,4b\n"          \
931                  "      .long 3b,5b\n")
932
933 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
934         __asm_clear(to, ret,                            \
935                 "       SETD [%0],D1Ar1\n"              \
936                 "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
937                 "3:     ADD  %1,%1,#4\n" FIXUP,         \
938                 "       .long 2b,3b\n" TENTRY)
939
940 #define __asm_clear_4(to, ret) \
941         __asm_clear_4x_cont(to, ret, "", "", "")
942
943 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
944         __asm_clear_4x_cont(to, ret,                    \
945                 "       SETD [%0],D1Ar1\n"              \
946                 "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
947                 "5:     ADD  %1,%1,#4\n" FIXUP,         \
948                 "       .long 4b,5b\n" TENTRY)
949
950 #define __asm_clear_8(to, ret) \
951         __asm_clear_8x_cont(to, ret, "", "", "")
952
953 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
954         __asm_clear_8x_cont(to, ret,                    \
955                 "       SETD [%0],D1Ar1\n"              \
956                 "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
957                 "7:     ADD  %1,%1,#4\n" FIXUP,         \
958                 "       .long 6b,7b\n" TENTRY)
959
960 #define __asm_clear_12(to, ret) \
961         __asm_clear_12x_cont(to, ret, "", "", "")
962
963 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
964         __asm_clear_12x_cont(to, ret,                   \
965                 "       SETD [%0],D1Ar1\n"              \
966                 "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
967                 "9:     ADD  %1,%1,#4\n" FIXUP,         \
968                 "       .long 8b,9b\n" TENTRY)
969
970 #define __asm_clear_16(to, ret) \
971         __asm_clear_16x_cont(to, ret, "", "", "")
972
973 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
974 {
975         register char __user *dst asm ("D0Re0") = pto;
976         register unsigned long n asm ("D1Re0") = pn;
977         register unsigned long retn asm ("D0Ar6") = 0;
978
979         if ((unsigned long) dst & 1) {
980                 __asm_clear_1(dst, retn);
981                 n--;
982         }
983
984         if ((unsigned long) dst & 2) {
985                 __asm_clear_2(dst, retn);
986                 n -= 2;
987         }
988
989         /* 64 bit copy loop */
990         if (!((__force unsigned long) dst & 7)) {
991                 while (n >= 8) {
992                         __asm_clear_8x64(dst, retn);
993                         n -= 8;
994                 }
995         }
996
997         while (n >= 16) {
998                 __asm_clear_16(dst, retn);
999                 n -= 16;
1000         }
1001
1002         while (n >= 4) {
1003                 __asm_clear_4(dst, retn);
1004                 n -= 4;
1005         }
1006
1007         switch (n) {
1008         case 0:
1009                 break;
1010         case 1:
1011                 __asm_clear_1(dst, retn);
1012                 break;
1013         case 2:
1014                 __asm_clear_2(dst, retn);
1015                 break;
1016         case 3:
1017                 __asm_clear_3(dst, retn);
1018                 break;
1019         }
1020
1021         return retn;
1022 }
1023 EXPORT_SYMBOL(__do_clear_user);
1024
1025 unsigned char __get_user_asm_b(const void __user *addr, long *err)
1026 {
1027         register unsigned char x asm ("D0Re0") = 0;
1028         asm volatile (
1029                 "       GETB %0,[%2]\n"
1030                 "1:\n"
1031                 "       GETB %0,[%2]\n"
1032                 "2:\n"
1033                 "       .section .fixup,\"ax\"\n"
1034                 "3:     MOV     D0FrT,%3\n"
1035                 "       SETD    [%1],D0FrT\n"
1036                 "       MOVT    D0FrT,#HI(2b)\n"
1037                 "       JUMP    D0FrT,#LO(2b)\n"
1038                 "       .previous\n"
1039                 "       .section __ex_table,\"a\"\n"
1040                 "       .long 1b,3b\n"
1041                 "       .previous\n"
1042                 : "=r" (x)
1043                 : "r" (err), "r" (addr), "P" (-EFAULT)
1044                 : "D0FrT");
1045         return x;
1046 }
1047 EXPORT_SYMBOL(__get_user_asm_b);
1048
1049 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1050 {
1051         register unsigned short x asm ("D0Re0") = 0;
1052         asm volatile (
1053                 "       GETW %0,[%2]\n"
1054                 "1:\n"
1055                 "       GETW %0,[%2]\n"
1056                 "2:\n"
1057                 "       .section .fixup,\"ax\"\n"
1058                 "3:     MOV     D0FrT,%3\n"
1059                 "       SETD    [%1],D0FrT\n"
1060                 "       MOVT    D0FrT,#HI(2b)\n"
1061                 "       JUMP    D0FrT,#LO(2b)\n"
1062                 "       .previous\n"
1063                 "       .section __ex_table,\"a\"\n"
1064                 "       .long 1b,3b\n"
1065                 "       .previous\n"
1066                 : "=r" (x)
1067                 : "r" (err), "r" (addr), "P" (-EFAULT)
1068                 : "D0FrT");
1069         return x;
1070 }
1071 EXPORT_SYMBOL(__get_user_asm_w);
1072
1073 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1074 {
1075         register unsigned int x asm ("D0Re0") = 0;
1076         asm volatile (
1077                 "       GETD %0,[%2]\n"
1078                 "1:\n"
1079                 "       GETD %0,[%2]\n"
1080                 "2:\n"
1081                 "       .section .fixup,\"ax\"\n"
1082                 "3:     MOV     D0FrT,%3\n"
1083                 "       SETD    [%1],D0FrT\n"
1084                 "       MOVT    D0FrT,#HI(2b)\n"
1085                 "       JUMP    D0FrT,#LO(2b)\n"
1086                 "       .previous\n"
1087                 "       .section __ex_table,\"a\"\n"
1088                 "       .long 1b,3b\n"
1089                 "       .previous\n"
1090                 : "=r" (x)
1091                 : "r" (err), "r" (addr), "P" (-EFAULT)
1092                 : "D0FrT");
1093         return x;
1094 }
1095 EXPORT_SYMBOL(__get_user_asm_d);
1096
1097 long __put_user_asm_b(unsigned int x, void __user *addr)
1098 {
1099         register unsigned int err asm ("D0Re0") = 0;
1100         asm volatile (
1101                 "       MOV  %0,#0\n"
1102                 "       SETB [%2],%1\n"
1103                 "1:\n"
1104                 "       SETB [%2],%1\n"
1105                 "2:\n"
1106                 ".section .fixup,\"ax\"\n"
1107                 "3:     MOV     %0,%3\n"
1108                 "       MOVT    D0FrT,#HI(2b)\n"
1109                 "       JUMP    D0FrT,#LO(2b)\n"
1110                 ".previous\n"
1111                 ".section __ex_table,\"a\"\n"
1112                 "       .long 1b,3b\n"
1113                 ".previous"
1114                 : "=r"(err)
1115                 : "d" (x), "a" (addr), "P"(-EFAULT)
1116                 : "D0FrT");
1117         return err;
1118 }
1119 EXPORT_SYMBOL(__put_user_asm_b);
1120
1121 long __put_user_asm_w(unsigned int x, void __user *addr)
1122 {
1123         register unsigned int err asm ("D0Re0") = 0;
1124         asm volatile (
1125                 "       MOV  %0,#0\n"
1126                 "       SETW [%2],%1\n"
1127                 "1:\n"
1128                 "       SETW [%2],%1\n"
1129                 "2:\n"
1130                 ".section .fixup,\"ax\"\n"
1131                 "3:     MOV     %0,%3\n"
1132                 "       MOVT    D0FrT,#HI(2b)\n"
1133                 "       JUMP    D0FrT,#LO(2b)\n"
1134                 ".previous\n"
1135                 ".section __ex_table,\"a\"\n"
1136                 "       .long 1b,3b\n"
1137                 ".previous"
1138                 : "=r"(err)
1139                 : "d" (x), "a" (addr), "P"(-EFAULT)
1140                 : "D0FrT");
1141         return err;
1142 }
1143 EXPORT_SYMBOL(__put_user_asm_w);
1144
1145 long __put_user_asm_d(unsigned int x, void __user *addr)
1146 {
1147         register unsigned int err asm ("D0Re0") = 0;
1148         asm volatile (
1149                 "       MOV  %0,#0\n"
1150                 "       SETD [%2],%1\n"
1151                 "1:\n"
1152                 "       SETD [%2],%1\n"
1153                 "2:\n"
1154                 ".section .fixup,\"ax\"\n"
1155                 "3:     MOV     %0,%3\n"
1156                 "       MOVT    D0FrT,#HI(2b)\n"
1157                 "       JUMP    D0FrT,#LO(2b)\n"
1158                 ".previous\n"
1159                 ".section __ex_table,\"a\"\n"
1160                 "       .long 1b,3b\n"
1161                 ".previous"
1162                 : "=r"(err)
1163                 : "d" (x), "a" (addr), "P"(-EFAULT)
1164                 : "D0FrT");
1165         return err;
1166 }
1167 EXPORT_SYMBOL(__put_user_asm_d);
1168
1169 long __put_user_asm_l(unsigned long long x, void __user *addr)
1170 {
1171         register unsigned int err asm ("D0Re0") = 0;
1172         asm volatile (
1173                 "       MOV  %0,#0\n"
1174                 "       SETL [%2],%1,%t1\n"
1175                 "1:\n"
1176                 "       SETL [%2],%1,%t1\n"
1177                 "2:\n"
1178                 ".section .fixup,\"ax\"\n"
1179                 "3:     MOV     %0,%3\n"
1180                 "       MOVT    D0FrT,#HI(2b)\n"
1181                 "       JUMP    D0FrT,#LO(2b)\n"
1182                 ".previous\n"
1183                 ".section __ex_table,\"a\"\n"
1184                 "       .long 1b,3b\n"
1185                 ".previous"
1186                 : "=r"(err)
1187                 : "d" (x), "a" (addr), "P"(-EFAULT)
1188                 : "D0FrT");
1189         return err;
1190 }
1191 EXPORT_SYMBOL(__put_user_asm_l);
1192
1193 long strnlen_user(const char __user *src, long count)
1194 {
1195         long res;
1196
1197         if (!access_ok(VERIFY_READ, src, 0))
1198                 return 0;
1199
1200         asm volatile (" MOV     D0Ar4, %1\n"
1201                       " MOV     D0Ar6, %2\n"
1202                       "0:\n"
1203                       " SUBS    D0FrT, D0Ar6, #0\n"
1204                       " SUB     D0Ar6, D0Ar6, #1\n"
1205                       " BLE     2f\n"
1206                       " GETB    D0FrT, [D0Ar4+#1++]\n"
1207                       "1:\n"
1208                       " TST     D0FrT, #255\n"
1209                       " BNE     0b\n"
1210                       "2:\n"
1211                       " SUB     %0, %2, D0Ar6\n"
1212                       "3:\n"
1213                       " .section .fixup,\"ax\"\n"
1214                       "4:\n"
1215                       " MOV     %0, #0\n"
1216                       " MOVT    D0FrT,#HI(3b)\n"
1217                       " JUMP    D0FrT,#LO(3b)\n"
1218                       " .previous\n"
1219                       " .section __ex_table,\"a\"\n"
1220                       " .long 1b,4b\n"
1221                       " .previous\n"
1222                       : "=r" (res)
1223                       : "r" (src), "r" (count)
1224                       : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1225
1226         return res;
1227 }
1228 EXPORT_SYMBOL(strnlen_user);
1229
1230 long __strncpy_from_user(char *dst, const char __user *src, long count)
1231 {
1232         long res;
1233
1234         if (count == 0)
1235                 return 0;
1236
1237         /*
1238          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1239          *  So do we.
1240          *
1241          *  This code is deduced from:
1242          *
1243          *      char tmp2;
1244          *      long tmp1, tmp3;
1245          *      tmp1 = count;
1246          *      while ((*dst++ = (tmp2 = *src++)) != 0
1247          *             && --tmp1)
1248          *        ;
1249          *
1250          *      res = count - tmp1;
1251          *
1252          *  with tweaks.
1253          */
1254
1255         asm volatile (" MOV  %0,%3\n"
1256                       "1:\n"
1257                       " GETB D0FrT,[%2++]\n"
1258                       "2:\n"
1259                       " CMP  D0FrT,#0\n"
1260                       " SETB [%1++],D0FrT\n"
1261                       " BEQ  3f\n"
1262                       " SUBS %0,%0,#1\n"
1263                       " BNZ  1b\n"
1264                       "3:\n"
1265                       " SUB  %0,%3,%0\n"
1266                       "4:\n"
1267                       " .section .fixup,\"ax\"\n"
1268                       "5:\n"
1269                       " MOV  %0,%7\n"
1270                       " MOVT    D0FrT,#HI(4b)\n"
1271                       " JUMP    D0FrT,#LO(4b)\n"
1272                       " .previous\n"
1273                       " .section __ex_table,\"a\"\n"
1274                       " .long 2b,5b\n"
1275                       " .previous"
1276                       : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1277                       : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1278                       : "D0FrT", "memory", "cc");
1279
1280         return res;
1281 }
1282 EXPORT_SYMBOL(__strncpy_from_user);