1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2013 ARM Ltd.
4 * Copyright (C) 2013 Linaro.
6 * This code is based on glibc cortex strings work originally authored by Linaro
9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
10 * files/head:/src/aarch64/
13 #include <linux/linkage.h>
14 #include <asm/assembler.h>
15 #include <asm/cache.h>
18 * Fill in the buffer with character c (alignment handled by the hardware)
45 SYM_FUNC_START(__pi_memset)
46 mov dst, dstin /* Preserve return value. */
48 orr A_lw, A_lw, A_lw, lsl #8
49 orr A_lw, A_lw, A_lw, lsl #16
50 orr A_l, A_l, A_l, lsl #32
54 /*All store maybe are non-aligned..*/
70 /*Whether the start address is aligned with 16.*/
75 * The count is not less than 16, we can use stp to store the start 16 bytes,
76 * then adjust the dst aligned with 16.This process will make the current
77 * memory address at alignment boundary.
79 stp A_l, A_l, [dst] /*non-aligned store..*/
80 /*make the dst aligned..*/
81 sub count, count, tmp2
91 ands tmp1, count, #0x30
96 stp A_l, A_l, [dst], #16
98 stp A_l, A_l, [dst], #16
100 stp A_l, A_l, [dst], #16
102 * The last store length is less than 16,use stp to write last 16 bytes.
103 * It will lead some bytes written twice and the access is non-aligned.
106 ands count, count, #15
109 stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
114 * Critical loop. Start at a new cache line boundary. Assuming
115 * 64 bytes per line, this ensures the entire loop is in one line.
117 .p2align L1_CACHE_SHIFT
119 sub dst, dst, #16/* Pre-bias. */
120 sub count, count, #64
122 stp A_l, A_l, [dst, #16]
123 stp A_l, A_l, [dst, #32]
124 stp A_l, A_l, [dst, #48]
125 stp A_l, A_l, [dst, #64]!
126 subs count, count, #64
135 * For zeroing memory, check to see if we can use the ZVA feature to
136 * zero entire 'cache' lines.
142 * For zeroing small amounts of memory, it's not worth setting up
143 * the line-clear code.
146 b.lt .Lnot_short /*count is at least 128 bytes*/
149 tbnz tmp1, #4, .Lnot_short
151 and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
152 lsl zva_len, tmp3w, zva_len
154 ands tmp3w, zva_len, #63
156 * ensure the zva_len is not less than 64.
157 * It is not meaningful to use ZVA if the block size is less than 64.
162 * Compute how far we need to go to become suitably aligned. We're
163 * already at quad-word alignment.
166 b.lt .Lnot_short /* Not enough to reach alignment. */
167 sub zva_bits_x, zva_len_x, #1
169 ands tmp2, tmp2, zva_bits_x
170 b.eq 2f /* Already aligned. */
171 /* Not aligned, check that there's enough to copy after alignment.*/
172 sub tmp1, count, tmp2
174 * grantee the remain length to be ZVA is bigger than 64,
175 * avoid to make the 2f's process over mem range.*/
177 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
180 * We know that there's at least 64 bytes to zero and that it's safe
181 * to overrun by 64 bytes.
186 stp A_l, A_l, [dst, #16]
187 stp A_l, A_l, [dst, #32]
189 stp A_l, A_l, [dst, #48]
192 /* We've overrun a bit, so adjust dst downwards.*/
195 sub count, count, zva_len_x
198 add dst, dst, zva_len_x
199 subs count, count, zva_len_x
201 ands count, count, zva_bits_x
202 b.ne .Ltail_maybe_long
204 SYM_FUNC_END(__pi_memset)
206 SYM_FUNC_ALIAS(__memset, __pi_memset)
207 EXPORT_SYMBOL(__memset)
209 SYM_FUNC_ALIAS_WEAK(memset, __pi_memset)
210 EXPORT_SYMBOL(memset)