1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright 2002 Andi Kleen */
4 #include <linux/linkage.h>
5 #include <linux/cfi_types.h>
7 #include <asm/cpufeatures.h>
8 #include <asm/alternative.h>
9 #include <asm/export.h>
11 .pushsection .noinstr.text, "ax"
14 * We build a jump to memcpy_orig by default which gets NOPped out on
15 * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
16 * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
17 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
21 * memcpy - Copy a memory block.
29 * rax original destination
31 SYM_TYPED_FUNC_START(__memcpy)
32 ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
33 "jmp memcpy_erms", X86_FEATURE_ERMS
43 SYM_FUNC_END(__memcpy)
44 EXPORT_SYMBOL(__memcpy)
46 SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
50 * memcpy_erms() - enhanced fast string memcpy. This is faster and
51 * simpler than memcpy. Use memcpy_erms when possible.
53 SYM_FUNC_START_LOCAL(memcpy_erms)
58 SYM_FUNC_END(memcpy_erms)
60 SYM_FUNC_START_LOCAL(memcpy_orig)
67 * We check whether memory false dependence could occur,
68 * then jump to corresponding copy mode.
77 * Move in blocks of 4x8 bytes:
90 jae .Lcopy_forward_loop
96 * Calculate copy position to tail.
102 * At most 3 ALU operations in one cycle,
103 * so append NOPS in the same 16 bytes trunk.
106 .Lcopy_backward_loop:
110 movq -3*8(%rsi), %r10
111 movq -4*8(%rsi), %r11
112 leaq -4*8(%rsi), %rsi
115 movq %r10, -3*8(%rdi)
116 movq %r11, -4*8(%rdi)
117 leaq -4*8(%rdi), %rdi
118 jae .Lcopy_backward_loop
121 * Calculate copy position to head.
131 * Move data from 16 bytes to 31 bytes.
135 movq -2*8(%rsi, %rdx), %r10
136 movq -1*8(%rsi, %rdx), %r11
139 movq %r10, -2*8(%rdi, %rdx)
140 movq %r11, -1*8(%rdi, %rdx)
147 * Move data from 8 bytes to 15 bytes.
150 movq -1*8(%rsi, %rdx), %r9
152 movq %r9, -1*8(%rdi, %rdx)
160 * Move data from 4 bytes to 7 bytes.
163 movl -4(%rsi, %rdx), %r8d
165 movl %r8d, -4(%rdi, %rdx)
172 * Move data from 1 bytes to 3 bytes.
177 movzbq (%rsi, %rdx), %r9
179 movb %r9b, (%rdi, %rdx)
185 SYM_FUNC_END(memcpy_orig)