1 /* Copyright 2002 Andi Kleen */
3 #include <linux/linkage.h>
5 #include <asm/cpufeatures.h>
6 #include <asm/alternative-asm.h>
7 #include <asm/export.h>
10 * We build a jump to memcpy_orig by default which gets NOPped out on
11 * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
12 * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
13 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
19 * memcpy - Copy a memory block.
27 * rax original destination
31 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
32 "jmp memcpy_erms", X86_FEATURE_ERMS
45 EXPORT_SYMBOL(__memcpy)
48 * memcpy_erms() - enhanced fast string memcpy. This is faster and
49 * simpler than memcpy. Use memcpy_erms when possible.
65 * We check whether memory false dependence could occur,
66 * then jump to corresponding copy mode.
75 * Move in blocks of 4x8 bytes:
88 jae .Lcopy_forward_loop
94 * Calculate copy position to tail.
100 * At most 3 ALU operations in one cycle,
101 * so append NOPS in the same 16 bytes trunk.
104 .Lcopy_backward_loop:
108 movq -3*8(%rsi), %r10
109 movq -4*8(%rsi), %r11
110 leaq -4*8(%rsi), %rsi
113 movq %r10, -3*8(%rdi)
114 movq %r11, -4*8(%rdi)
115 leaq -4*8(%rdi), %rdi
116 jae .Lcopy_backward_loop
119 * Calculate copy position to head.
129 * Move data from 16 bytes to 31 bytes.
133 movq -2*8(%rsi, %rdx), %r10
134 movq -1*8(%rsi, %rdx), %r11
137 movq %r10, -2*8(%rdi, %rdx)
138 movq %r11, -1*8(%rdi, %rdx)
145 * Move data from 8 bytes to 15 bytes.
148 movq -1*8(%rsi, %rdx), %r9
150 movq %r9, -1*8(%rdi, %rdx)
158 * Move data from 4 bytes to 7 bytes.
161 movl -4(%rsi, %rdx), %r8d
163 movl %r8d, -4(%rdi, %rdx)
170 * Move data from 1 bytes to 3 bytes.
175 movzbq (%rsi, %rdx), %r9
177 movb %r9b, (%rdi, %rdx)
187 * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
188 * Note that we only catch machine checks when reading the source addresses.
189 * Writes to target are posted and don't generate machine checks.
191 ENTRY(memcpy_mcsafe_unrolled)
193 /* Less than 8 bytes? Go to byte copy loop */
196 /* Check for bad alignment of source */
198 /* Already aligned */
201 /* Copy one byte at a time until source is 8-byte aligned */
207 .L_copy_leading_bytes:
213 jnz .L_copy_leading_bytes
216 /* Figure out how many whole cache lines (64-bytes) to copy */
220 jz .L_no_whole_cache_lines
222 /* Loop copying whole cache lines */
223 .L_cache_w0: movq (%rsi), %r8
224 .L_cache_w1: movq 1*8(%rsi), %r9
225 .L_cache_w2: movq 2*8(%rsi), %r10
226 .L_cache_w3: movq 3*8(%rsi), %r11
231 .L_cache_w4: movq 4*8(%rsi), %r8
232 .L_cache_w5: movq 5*8(%rsi), %r9
233 .L_cache_w6: movq 6*8(%rsi), %r10
234 .L_cache_w7: movq 7*8(%rsi), %r11
244 /* Are there any trailing 8-byte words? */
245 .L_no_whole_cache_lines:
251 /* Copy trailing words */
252 .L_copy_trailing_words:
258 jnz .L_copy_trailing_words
260 /* Any trailing bytes? */
263 jz .L_done_memcpy_trap
265 /* Copy trailing bytes */
267 .L_copy_trailing_bytes:
273 jnz .L_copy_trailing_bytes
275 /* Copy successful. Return zero */
279 ENDPROC(memcpy_mcsafe_unrolled)
280 EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
282 .section .fixup, "ax"
283 /* Return -EFAULT for any failure */
284 .L_memcpy_mcsafe_fail:
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
297 _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
298 _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
299 _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
300 _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)