1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
4 * Copyright 2002 Andi Kleen, SuSE Labs.
6 * Functions to copy from and to user space.
9 #include <linux/linkage.h>
10 #include <asm/current.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/thread_info.h>
13 #include <asm/cpufeatures.h>
14 #include <asm/alternative.h>
17 #include <asm/export.h>
18 #include <asm/trapnr.h>
20 .macro ALIGN_DESTINATION
21 /* check for bad alignment of destination */
24 jz 102f /* already aligned */
36 _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align)
37 _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align)
41 * copy_user_generic_unrolled - memory copy with exception handling.
42 * This version is for CPUs like P4 that don't have efficient micro
51 * eax uncopied bytes or 0 if successful.
53 SYM_FUNC_START(copy_user_generic_unrolled)
56 jb .Lcopy_user_short_string_bytes
61 jz copy_user_short_string
64 3: movq 2*8(%rsi),%r10
65 4: movq 3*8(%rsi),%r11
68 7: movq %r10,2*8(%rdi)
69 8: movq %r11,3*8(%rdi)
71 10: movq 5*8(%rsi),%r9
72 11: movq 6*8(%rsi),%r10
73 12: movq 7*8(%rsi),%r11
74 13: movq %r8,4*8(%rdi)
75 14: movq %r9,5*8(%rdi)
76 15: movq %r10,6*8(%rdi)
77 16: movq %r11,7*8(%rdi)
82 jmp copy_user_short_string
86 jmp .Lcopy_user_handle_tail
88 _ASM_EXTABLE_CPY(1b, 30b)
89 _ASM_EXTABLE_CPY(2b, 30b)
90 _ASM_EXTABLE_CPY(3b, 30b)
91 _ASM_EXTABLE_CPY(4b, 30b)
92 _ASM_EXTABLE_CPY(5b, 30b)
93 _ASM_EXTABLE_CPY(6b, 30b)
94 _ASM_EXTABLE_CPY(7b, 30b)
95 _ASM_EXTABLE_CPY(8b, 30b)
96 _ASM_EXTABLE_CPY(9b, 30b)
97 _ASM_EXTABLE_CPY(10b, 30b)
98 _ASM_EXTABLE_CPY(11b, 30b)
99 _ASM_EXTABLE_CPY(12b, 30b)
100 _ASM_EXTABLE_CPY(13b, 30b)
101 _ASM_EXTABLE_CPY(14b, 30b)
102 _ASM_EXTABLE_CPY(15b, 30b)
103 _ASM_EXTABLE_CPY(16b, 30b)
104 SYM_FUNC_END(copy_user_generic_unrolled)
105 EXPORT_SYMBOL(copy_user_generic_unrolled)
107 /* Some CPUs run faster using the string copy instructions.
108 * This is also a lot simpler. Use them when possible.
110 * Only 4GB of copy is supported. This shouldn't be a problem
111 * because the kernel normally only writes from/to page sized chunks
112 * even if user space passed a longer buffer.
113 * And more would be dangerous because both Intel and AMD have
114 * errata with rep movsq > 4GB. If someone feels the need to fix
115 * this please consider this.
123 * eax uncopied bytes or 0 if successful.
125 SYM_FUNC_START(copy_user_generic_string)
128 jb 2f /* less than 8 bytes, go to byte copy loop */
140 11: leal (%rdx,%rcx,8),%ecx
141 12: movl %ecx,%edx /* ecx is zerorest also */
142 jmp .Lcopy_user_handle_tail
144 _ASM_EXTABLE_CPY(1b, 11b)
145 _ASM_EXTABLE_CPY(3b, 12b)
146 SYM_FUNC_END(copy_user_generic_string)
147 EXPORT_SYMBOL(copy_user_generic_string)
150 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
151 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
159 * eax uncopied bytes or 0 if successful.
161 SYM_FUNC_START(copy_user_enhanced_fast_string)
163 /* CPUs without FSRM should avoid rep movsb for short copies */
164 ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM
171 12: movl %ecx,%edx /* ecx is zerorest also */
172 jmp .Lcopy_user_handle_tail
174 _ASM_EXTABLE_CPY(1b, 12b)
175 SYM_FUNC_END(copy_user_enhanced_fast_string)
176 EXPORT_SYMBOL(copy_user_enhanced_fast_string)
179 * Try to copy last bytes and clear the rest if needed.
180 * Since protection fault in copy_from/to_user is not a normal situation,
181 * it is not necessary to optimize tail handling.
182 * Don't try to copy the tail if machine check happened
185 * eax trap number written by ex_handler_copy()
191 * eax uncopied bytes or 0 if successful.
193 SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
194 cmp $X86_TRAP_MC,%eax
208 _ASM_EXTABLE_CPY(1b, 2b)
210 .Lcopy_user_handle_align:
211 addl %ecx,%edx /* ecx is zerorest also */
212 jmp .Lcopy_user_handle_tail
214 SYM_CODE_END(.Lcopy_user_handle_tail)
217 * Finish memcpy of less than 64 bytes. #AC should already be set.
225 * eax uncopied bytes or 0 if successful.
227 SYM_CODE_START_LOCAL(copy_user_short_string)
231 jz .Lcopy_user_short_string_bytes
238 .Lcopy_user_short_string_bytes:
252 40: leal (%rdx,%rcx,8),%edx
254 50: movl %ecx,%edx /* ecx is zerorest also */
255 60: jmp .Lcopy_user_handle_tail
257 _ASM_EXTABLE_CPY(18b, 40b)
258 _ASM_EXTABLE_CPY(19b, 40b)
259 _ASM_EXTABLE_CPY(21b, 50b)
260 _ASM_EXTABLE_CPY(22b, 50b)
261 SYM_CODE_END(copy_user_short_string)
264 * copy_user_nocache - Uncached memory copy with exception handling
265 * This will force destination out of cache for more performance.
267 * Note: Cached memory copy is used when destination or size is not
268 * naturally aligned. That is:
269 * - Require 8-byte alignment when size is 8 bytes or larger.
270 * - Require 4-byte alignment when size is 4 bytes.
272 SYM_FUNC_START(__copy_user_nocache)
275 /* If size is less than 8 bytes, go to 4-byte copy */
277 jb .L_4b_nocache_copy_entry
279 /* If destination is not 8-byte aligned, "cache" copy to align it */
282 /* Set 4x8-byte copy count and remainder */
286 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
288 /* Perform 4x8-byte nocache loop-copy */
289 .L_4x8b_nocache_copy_loop:
291 2: movq 1*8(%rsi),%r9
292 3: movq 2*8(%rsi),%r10
293 4: movq 3*8(%rsi),%r11
295 6: movnti %r9,1*8(%rdi)
296 7: movnti %r10,2*8(%rdi)
297 8: movnti %r11,3*8(%rdi)
298 9: movq 4*8(%rsi),%r8
299 10: movq 5*8(%rsi),%r9
300 11: movq 6*8(%rsi),%r10
301 12: movq 7*8(%rsi),%r11
302 13: movnti %r8,4*8(%rdi)
303 14: movnti %r9,5*8(%rdi)
304 15: movnti %r10,6*8(%rdi)
305 16: movnti %r11,7*8(%rdi)
309 jnz .L_4x8b_nocache_copy_loop
311 /* Set 8-byte copy count and remainder */
312 .L_8b_nocache_copy_entry:
316 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
318 /* Perform 8-byte nocache loop-copy */
319 .L_8b_nocache_copy_loop:
321 21: movnti %r8,(%rdi)
325 jnz .L_8b_nocache_copy_loop
327 /* If no byte left, we're done */
328 .L_4b_nocache_copy_entry:
332 /* If destination is not 4-byte aligned, go to byte copy: */
335 jnz .L_1b_cache_copy_entry
337 /* Set 4-byte copy count (1 or 0) and remainder */
341 jz .L_1b_cache_copy_entry /* jump if count is 0 */
343 /* Perform 4-byte nocache copy: */
345 31: movnti %r8d,(%rdi)
349 /* If no bytes left, we're done: */
353 /* Perform byte "cache" loop-copy for the remainder */
354 .L_1b_cache_copy_entry:
356 .L_1b_cache_copy_loop:
362 jnz .L_1b_cache_copy_loop
364 /* Finished copying; fence the prior stores */
374 jmp .L_fixup_handle_tail
376 lea (%rdx,%rcx,8),%rdx
377 jmp .L_fixup_handle_tail
379 lea (%rdx,%rcx,4),%rdx
380 jmp .L_fixup_handle_tail
383 .L_fixup_handle_tail:
385 jmp .Lcopy_user_handle_tail
387 _ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
388 _ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
389 _ASM_EXTABLE_CPY(3b, .L_fixup_4x8b_copy)
390 _ASM_EXTABLE_CPY(4b, .L_fixup_4x8b_copy)
391 _ASM_EXTABLE_CPY(5b, .L_fixup_4x8b_copy)
392 _ASM_EXTABLE_CPY(6b, .L_fixup_4x8b_copy)
393 _ASM_EXTABLE_CPY(7b, .L_fixup_4x8b_copy)
394 _ASM_EXTABLE_CPY(8b, .L_fixup_4x8b_copy)
395 _ASM_EXTABLE_CPY(9b, .L_fixup_4x8b_copy)
396 _ASM_EXTABLE_CPY(10b, .L_fixup_4x8b_copy)
397 _ASM_EXTABLE_CPY(11b, .L_fixup_4x8b_copy)
398 _ASM_EXTABLE_CPY(12b, .L_fixup_4x8b_copy)
399 _ASM_EXTABLE_CPY(13b, .L_fixup_4x8b_copy)
400 _ASM_EXTABLE_CPY(14b, .L_fixup_4x8b_copy)
401 _ASM_EXTABLE_CPY(15b, .L_fixup_4x8b_copy)
402 _ASM_EXTABLE_CPY(16b, .L_fixup_4x8b_copy)
403 _ASM_EXTABLE_CPY(20b, .L_fixup_8b_copy)
404 _ASM_EXTABLE_CPY(21b, .L_fixup_8b_copy)
405 _ASM_EXTABLE_CPY(30b, .L_fixup_4b_copy)
406 _ASM_EXTABLE_CPY(31b, .L_fixup_4b_copy)
407 _ASM_EXTABLE_CPY(40b, .L_fixup_1b_copy)
408 _ASM_EXTABLE_CPY(41b, .L_fixup_1b_copy)
409 SYM_FUNC_END(__copy_user_nocache)
410 EXPORT_SYMBOL(__copy_user_nocache)