2 * Based on arch/arm/include/asm/assembler.h
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/cputype.h>
27 #include <asm/ptrace.h>
28 #include <asm/thread_info.h>
31 * Stack pushing/popping (register pairs only). Equivalent to store decrement
32 * before, load increment after.
34 .macro push, xreg1, xreg2
35 stp \xreg1, \xreg2, [sp, #-16]!
38 .macro pop, xreg1, xreg2
39 ldp \xreg1, \xreg2, [sp], #16
43 * Enable and disable interrupts.
54 * Enable and disable debug exceptions.
64 .macro disable_step_tsk, flgs, tmp
65 tbz \flgs, #TIF_SINGLESTEP, 9990f
69 isb // Synchronise with enable_dbg
73 .macro enable_step_tsk, flgs, tmp
74 tbz \flgs, #TIF_SINGLESTEP, 9990f
83 * Enable both debug exceptions and interrupts. This is likely to be
84 * faster than two daifclr operations, since writes to this register
85 * are self-synchronising.
87 .macro enable_dbg_and_irq
92 * SMP data memory barrier
98 #define USER(l, x...) \
100 .section __ex_table,"a"; \
108 lr .req x30 // link register
119 * Select code when configured for BE.
121 #ifdef CONFIG_CPU_BIG_ENDIAN
122 #define CPU_BE(code...) code
124 #define CPU_BE(code...)
128 * Select code when configured for LE.
130 #ifdef CONFIG_CPU_BIG_ENDIAN
131 #define CPU_LE(code...)
133 #define CPU_LE(code...) code
137 * Define a macro that constructs a 64-bit value by concatenating two
138 * 32-bit registers. Note that on big endian systems the order of the
139 * registers is swapped.
141 #ifndef CONFIG_CPU_BIG_ENDIAN
142 .macro regs_to_64, rd, lbits, hbits
144 .macro regs_to_64, rd, hbits, lbits
146 orr \rd, \lbits, \hbits, lsl #32
150 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
151 * <symbol> is within the range +/- 4 GB of the PC when running
152 * in core kernel context. In module context, a movz/movk sequence
153 * is used, since modules may be loaded far away from the kernel
154 * when KASLR is in effect.
157 * @dst: destination register (64 bit wide)
158 * @sym: name of the symbol
160 .macro adr_l, dst, sym
163 add \dst, \dst, :lo12:\sym
165 movz \dst, #:abs_g3:\sym
166 movk \dst, #:abs_g2_nc:\sym
167 movk \dst, #:abs_g1_nc:\sym
168 movk \dst, #:abs_g0_nc:\sym
173 * @dst: destination register (32 or 64 bit wide)
174 * @sym: name of the symbol
175 * @tmp: optional 64-bit scratch register to be used if <dst> is a
176 * 32-bit wide register, in which case it cannot be used to hold
179 .macro ldr_l, dst, sym, tmp=
183 ldr \dst, [\dst, :lo12:\sym]
186 ldr \dst, [\tmp, :lo12:\sym]
200 * @src: source register (32 or 64 bit wide)
201 * @sym: name of the symbol
202 * @tmp: mandatory 64-bit scratch register to calculate the address
203 * while <src> needs to be preserved.
205 .macro str_l, src, sym, tmp
208 str \src, [\tmp, :lo12:\sym]
216 * Annotate a function as position independent, i.e., safe to be called before
217 * the kernel virtual mapping is activated.
219 #define ENDPIPROC(x) \
221 .type __pi_##x, %function; \
223 .size __pi_##x, . - x; \
227 * mov_q - move an immediate constant into a 64-bit register using
228 * between 2 and 4 movz/movk instructions (depending on the
229 * magnitude and sign of the operand)
231 .macro mov_q, reg, val
232 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
233 movz \reg, :abs_g1_s:\val
235 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
236 movz \reg, :abs_g2_s:\val
238 movz \reg, :abs_g3:\val
239 movk \reg, :abs_g2_nc:\val
241 movk \reg, :abs_g1_nc:\val
243 movk \reg, :abs_g0_nc:\val
247 * Check the MIDR_EL1 of the current CPU for a given model and a range of
248 * variant/revision. See asm/cputype.h for the macros used below.
250 * model: MIDR_CPU_PART of CPU
251 * rv_min: Minimum of MIDR_CPU_VAR_REV()
252 * rv_max: Maximum of MIDR_CPU_VAR_REV()
253 * res: Result register.
254 * tmp1, tmp2, tmp3: Temporary registers
256 * Corrupts: res, tmp1, tmp2, tmp3
257 * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
259 .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
261 mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
262 mov_q \tmp2, MIDR_CPU_PART_MASK
263 and \tmp3, \res, \tmp2 // Extract model
264 and \tmp1, \res, \tmp1 // rev & variant
268 cbz \res, .Ldone\@ // Model matches ?
270 .if (\rv_min != 0) // Skip min check if rv_min == 0
274 .endif // \rv_min != 0
275 /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
276 .if ((\rv_min != \rv_max) || \rv_min == 0)
280 and \res, \res, \tmp2
285 #endif /* __ASM_ASSEMBLER_H */