GNU Linux-libre 6.7.9-gnu
[releases.git] / arch / alpha / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_UACCESS_H
3 #define __ALPHA_UACCESS_H
4
5 #include <asm-generic/access_ok.h>
6 /*
7  * These are the main single-value transfer routines.  They automatically
8  * use the right size if we just have the right pointer type.
9  *
10  * As the alpha uses the same address space for kernel and user
11  * data, we can just do these as direct assignments.  (Of course, the
12  * exception handling means that it's no longer "just"...)
13  *
14  * Careful to not
15  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
16  * (b) require any knowledge of processes at this stage
17  */
18 #define put_user(x, ptr) \
19   __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20 #define get_user(x, ptr) \
21   __get_user_check((x), (ptr), sizeof(*(ptr)))
22
23 /*
24  * The "__xxx" versions do not do address space checking, useful when
25  * doing multiple accesses to the same area (the programmer has to do the
26  * checks by hand with "access_ok()")
27  */
28 #define __put_user(x, ptr) \
29   __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
30 #define __get_user(x, ptr) \
31   __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
32   
33 /*
34  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
35  * encode the bits we need for resolving the exception.  See the
36  * more extensive comments with fixup_inline_exception below for
37  * more information.
38  */
39 #define EXC(label,cont,res,err)                         \
40         ".section __ex_table,\"a\"\n"                   \
41         "       .long "#label"-.\n"                     \
42         "       lda "#res","#cont"-"#label"("#err")\n"  \
43         ".previous\n"
44
45 extern void __get_user_unknown(void);
46
47 #define __get_user_nocheck(x, ptr, size)                        \
48 ({                                                              \
49         long __gu_err = 0;                                      \
50         unsigned long __gu_val;                                 \
51         __chk_user_ptr(ptr);                                    \
52         switch (size) {                                         \
53           case 1: __get_user_8(ptr); break;                     \
54           case 2: __get_user_16(ptr); break;                    \
55           case 4: __get_user_32(ptr); break;                    \
56           case 8: __get_user_64(ptr); break;                    \
57           default: __get_user_unknown(); break;                 \
58         }                                                       \
59         (x) = (__force __typeof__(*(ptr))) __gu_val;            \
60         __gu_err;                                               \
61 })
62
63 #define __get_user_check(x, ptr, size)                          \
64 ({                                                              \
65         long __gu_err = -EFAULT;                                \
66         unsigned long __gu_val = 0;                             \
67         const __typeof__(*(ptr)) __user *__gu_addr = (ptr);     \
68         if (__access_ok(__gu_addr, size)) {                     \
69                 __gu_err = 0;                                   \
70                 switch (size) {                                 \
71                   case 1: __get_user_8(__gu_addr); break;       \
72                   case 2: __get_user_16(__gu_addr); break;      \
73                   case 4: __get_user_32(__gu_addr); break;      \
74                   case 8: __get_user_64(__gu_addr); break;      \
75                   default: __get_user_unknown(); break;         \
76                 }                                               \
77         }                                                       \
78         (x) = (__force __typeof__(*(ptr))) __gu_val;            \
79         __gu_err;                                               \
80 })
81
82 struct __large_struct { unsigned long buf[100]; };
83 #define __m(x) (*(struct __large_struct __user *)(x))
84
85 #define __get_user_64(addr)                             \
86         __asm__("1: ldq %0,%2\n"                        \
87         "2:\n"                                          \
88         EXC(1b,2b,%0,%1)                                \
89                 : "=r"(__gu_val), "=r"(__gu_err)        \
90                 : "m"(__m(addr)), "1"(__gu_err))
91
92 #define __get_user_32(addr)                             \
93         __asm__("1: ldl %0,%2\n"                        \
94         "2:\n"                                          \
95         EXC(1b,2b,%0,%1)                                \
96                 : "=r"(__gu_val), "=r"(__gu_err)        \
97                 : "m"(__m(addr)), "1"(__gu_err))
98
99 #ifdef __alpha_bwx__
100 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
101
102 #define __get_user_16(addr)                             \
103         __asm__("1: ldwu %0,%2\n"                       \
104         "2:\n"                                          \
105         EXC(1b,2b,%0,%1)                                \
106                 : "=r"(__gu_val), "=r"(__gu_err)        \
107                 : "m"(__m(addr)), "1"(__gu_err))
108
109 #define __get_user_8(addr)                              \
110         __asm__("1: ldbu %0,%2\n"                       \
111         "2:\n"                                          \
112         EXC(1b,2b,%0,%1)                                \
113                 : "=r"(__gu_val), "=r"(__gu_err)        \
114                 : "m"(__m(addr)), "1"(__gu_err))
115 #else
116 /* Unfortunately, we can't get an unaligned access trap for the sub-word
117    load, so we have to do a general unaligned operation.  */
118
119 #define __get_user_16(addr)                                             \
120 {                                                                       \
121         long __gu_tmp;                                                  \
122         __asm__("1: ldq_u %0,0(%3)\n"                                   \
123         "2:     ldq_u %1,1(%3)\n"                                       \
124         "       extwl %0,%3,%0\n"                                       \
125         "       extwh %1,%3,%1\n"                                       \
126         "       or %0,%1,%0\n"                                          \
127         "3:\n"                                                          \
128         EXC(1b,3b,%0,%2)                                                \
129         EXC(2b,3b,%0,%2)                                                \
130                 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)      \
131                 : "r"(addr), "2"(__gu_err));                            \
132 }
133
134 #define __get_user_8(addr)                                              \
135         __asm__("1: ldq_u %0,0(%2)\n"                                   \
136         "       extbl %0,%2,%0\n"                                       \
137         "2:\n"                                                          \
138         EXC(1b,2b,%0,%1)                                                \
139                 : "=&r"(__gu_val), "=r"(__gu_err)                       \
140                 : "r"(addr), "1"(__gu_err))
141 #endif
142
143 extern void __put_user_unknown(void);
144
145 #define __put_user_nocheck(x, ptr, size)                        \
146 ({                                                              \
147         long __pu_err = 0;                                      \
148         __chk_user_ptr(ptr);                                    \
149         switch (size) {                                         \
150           case 1: __put_user_8(x, ptr); break;                  \
151           case 2: __put_user_16(x, ptr); break;                 \
152           case 4: __put_user_32(x, ptr); break;                 \
153           case 8: __put_user_64(x, ptr); break;                 \
154           default: __put_user_unknown(); break;                 \
155         }                                                       \
156         __pu_err;                                               \
157 })
158
159 #define __put_user_check(x, ptr, size)                          \
160 ({                                                              \
161         long __pu_err = -EFAULT;                                \
162         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
163         if (__access_ok(__pu_addr, size)) {                     \
164                 __pu_err = 0;                                   \
165                 switch (size) {                                 \
166                   case 1: __put_user_8(x, __pu_addr); break;    \
167                   case 2: __put_user_16(x, __pu_addr); break;   \
168                   case 4: __put_user_32(x, __pu_addr); break;   \
169                   case 8: __put_user_64(x, __pu_addr); break;   \
170                   default: __put_user_unknown(); break;         \
171                 }                                               \
172         }                                                       \
173         __pu_err;                                               \
174 })
175
176 /*
177  * The "__put_user_xx()" macros tell gcc they read from memory
178  * instead of writing: this is because they do not write to
179  * any memory gcc knows about, so there are no aliasing issues
180  */
181 #define __put_user_64(x, addr)                                  \
182 __asm__ __volatile__("1: stq %r2,%1\n"                          \
183         "2:\n"                                                  \
184         EXC(1b,2b,$31,%0)                                       \
185                 : "=r"(__pu_err)                                \
186                 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
187
188 #define __put_user_32(x, addr)                                  \
189 __asm__ __volatile__("1: stl %r2,%1\n"                          \
190         "2:\n"                                                  \
191         EXC(1b,2b,$31,%0)                                       \
192                 : "=r"(__pu_err)                                \
193                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
194
195 #ifdef __alpha_bwx__
196 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
197
198 #define __put_user_16(x, addr)                                  \
199 __asm__ __volatile__("1: stw %r2,%1\n"                          \
200         "2:\n"                                                  \
201         EXC(1b,2b,$31,%0)                                       \
202                 : "=r"(__pu_err)                                \
203                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
204
205 #define __put_user_8(x, addr)                                   \
206 __asm__ __volatile__("1: stb %r2,%1\n"                          \
207         "2:\n"                                                  \
208         EXC(1b,2b,$31,%0)                                       \
209                 : "=r"(__pu_err)                                \
210                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
211 #else
212 /* Unfortunately, we can't get an unaligned access trap for the sub-word
213    write, so we have to do a general unaligned operation.  */
214
215 #define __put_user_16(x, addr)                                  \
216 {                                                               \
217         long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;        \
218         __asm__ __volatile__(                                   \
219         "1:     ldq_u %2,1(%5)\n"                               \
220         "2:     ldq_u %1,0(%5)\n"                               \
221         "       inswh %6,%5,%4\n"                               \
222         "       inswl %6,%5,%3\n"                               \
223         "       mskwh %2,%5,%2\n"                               \
224         "       mskwl %1,%5,%1\n"                               \
225         "       or %2,%4,%2\n"                                  \
226         "       or %1,%3,%1\n"                                  \
227         "3:     stq_u %2,1(%5)\n"                               \
228         "4:     stq_u %1,0(%5)\n"                               \
229         "5:\n"                                                  \
230         EXC(1b,5b,$31,%0)                                       \
231         EXC(2b,5b,$31,%0)                                       \
232         EXC(3b,5b,$31,%0)                                       \
233         EXC(4b,5b,$31,%0)                                       \
234                 : "=r"(__pu_err), "=&r"(__pu_tmp1),             \
235                   "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),           \
236                   "=&r"(__pu_tmp4)                              \
237                 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
238 }
239
240 #define __put_user_8(x, addr)                                   \
241 {                                                               \
242         long __pu_tmp1, __pu_tmp2;                              \
243         __asm__ __volatile__(                                   \
244         "1:     ldq_u %1,0(%4)\n"                               \
245         "       insbl %3,%4,%2\n"                               \
246         "       mskbl %1,%4,%1\n"                               \
247         "       or %1,%2,%1\n"                                  \
248         "2:     stq_u %1,0(%4)\n"                               \
249         "3:\n"                                                  \
250         EXC(1b,3b,$31,%0)                                       \
251         EXC(2b,3b,$31,%0)                                       \
252                 : "=r"(__pu_err),                               \
253                   "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)            \
254                 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
255 }
256 #endif
257
258
259 /*
260  * Complex access routines
261  */
262
263 extern long __copy_user(void *to, const void *from, long len);
264
265 static inline unsigned long
266 raw_copy_from_user(void *to, const void __user *from, unsigned long len)
267 {
268         return __copy_user(to, (__force const void *)from, len);
269 }
270
271 static inline unsigned long
272 raw_copy_to_user(void __user *to, const void *from, unsigned long len)
273 {
274         return __copy_user((__force void *)to, from, len);
275 }
276
277 extern long __clear_user(void __user *to, long len);
278
279 static inline long
280 clear_user(void __user *to, long len)
281 {
282         if (__access_ok(to, len))
283                 len = __clear_user(to, len);
284         return len;
285 }
286
287 extern long strncpy_from_user(char *dest, const char __user *src, long count);
288 extern __must_check long strnlen_user(const char __user *str, long n);
289
290 #include <asm/extable.h>
291
292 #endif /* __ALPHA_UACCESS_H */