2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/cputype.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
35 * Enable and disable interrupts.
46 * Enable and disable debug exceptions.
56 .macro disable_step_tsk, flgs, tmp
57 tbz \flgs, #TIF_SINGLESTEP, 9990f
61 isb // Synchronise with enable_dbg
65 .macro enable_step_tsk, flgs, tmp
66 tbz \flgs, #TIF_SINGLESTEP, 9990f
75 * Enable both debug exceptions and interrupts. This is likely to be
76 * faster than two daifclr operations, since writes to this register
77 * are self-synchronising.
79 .macro enable_dbg_and_irq
84 * SMP data memory barrier
91 * Value prediction barrier
98 * Clear Branch History instruction
105 * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
108 .macro mask_nospec64, idx, limit, tmp
109 sub \tmp, \idx, \limit
111 and \idx, \idx, \tmp, asr #63
125 * Emit an entry into the exception table
127 .macro _asm_extable, from, to
128 .pushsection __ex_table, "a"
130 .long (\from - .), (\to - .)
134 #define USER(l, x...) \
136 _asm_extable 9999b, l
141 lr .req x30 // link register
152 * Select code when configured for BE.
154 #ifdef CONFIG_CPU_BIG_ENDIAN
155 #define CPU_BE(code...) code
157 #define CPU_BE(code...)
161 * Select code when configured for LE.
163 #ifdef CONFIG_CPU_BIG_ENDIAN
164 #define CPU_LE(code...)
166 #define CPU_LE(code...) code
170 * Define a macro that constructs a 64-bit value by concatenating two
171 * 32-bit registers. Note that on big endian systems the order of the
172 * registers is swapped.
174 #ifndef CONFIG_CPU_BIG_ENDIAN
175 .macro regs_to_64, rd, lbits, hbits
177 .macro regs_to_64, rd, hbits, lbits
179 orr \rd, \lbits, \hbits, lsl #32
183 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
184 * <symbol> is within the range +/- 4 GB of the PC when running
185 * in core kernel context. In module context, a movz/movk sequence
186 * is used, since modules may be loaded far away from the kernel
187 * when KASLR is in effect.
190 * @dst: destination register (64 bit wide)
191 * @sym: name of the symbol
193 .macro adr_l, dst, sym
196 add \dst, \dst, :lo12:\sym
198 movz \dst, #:abs_g3:\sym
199 movk \dst, #:abs_g2_nc:\sym
200 movk \dst, #:abs_g1_nc:\sym
201 movk \dst, #:abs_g0_nc:\sym
206 * @dst: destination register (32 or 64 bit wide)
207 * @sym: name of the symbol
208 * @tmp: optional 64-bit scratch register to be used if <dst> is a
209 * 32-bit wide register, in which case it cannot be used to hold
212 .macro ldr_l, dst, sym, tmp=
216 ldr \dst, [\dst, :lo12:\sym]
219 ldr \dst, [\tmp, :lo12:\sym]
233 * @src: source register (32 or 64 bit wide)
234 * @sym: name of the symbol
235 * @tmp: mandatory 64-bit scratch register to calculate the address
236 * while <src> needs to be preserved.
238 .macro str_l, src, sym, tmp
241 str \src, [\tmp, :lo12:\sym]
249 * @dst: Result of per_cpu(sym, smp_processor_id())
250 * @sym: The name of the per-cpu variable
251 * @tmp: scratch register
253 .macro adr_this_cpu, dst, sym, tmp
255 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
264 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
265 * @sym: The name of the per-cpu variable
266 * @tmp: scratch register
268 .macro ldr_this_cpu dst, sym, tmp
270 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
275 ldr \dst, [\dst, \tmp]
279 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
281 .macro vma_vm_mm, rd, rn
282 ldr \rd, [\rn, #VMA_VM_MM]
286 * mmid - get context id from mm pointer (mm->context.id)
289 ldr \rd, [\rn, #MM_CONTEXT_ID]
292 * read_ctr - read CTR_EL0. If the system has mismatched
293 * cache line sizes, provide the system wide safe value
294 * from arm64_ftr_reg_ctrel0.sys_val
297 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
298 mrs \reg, ctr_el0 // read CTR
301 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
307 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
308 * from the CTR register.
310 .macro raw_dcache_line_size, reg, tmp
311 mrs \tmp, ctr_el0 // read CTR
312 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
313 mov \reg, #4 // bytes per word
314 lsl \reg, \reg, \tmp // actual cache line size
318 * dcache_line_size - get the safe D-cache line size across all CPUs
320 .macro dcache_line_size, reg, tmp
322 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
323 mov \reg, #4 // bytes per word
324 lsl \reg, \reg, \tmp // actual cache line size
328 * raw_icache_line_size - get the minimum I-cache line size on this CPU
329 * from the CTR register.
331 .macro raw_icache_line_size, reg, tmp
332 mrs \tmp, ctr_el0 // read CTR
333 and \tmp, \tmp, #0xf // cache line size encoding
334 mov \reg, #4 // bytes per word
335 lsl \reg, \reg, \tmp // actual cache line size
339 * icache_line_size - get the safe I-cache line size across all CPUs
341 .macro icache_line_size, reg, tmp
343 and \tmp, \tmp, #0xf // cache line size encoding
344 mov \reg, #4 // bytes per word
345 lsl \reg, \reg, \tmp // actual cache line size
349 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
351 .macro tcr_set_idmap_t0sz, valreg, tmpreg
352 #ifndef CONFIG_ARM64_VA_BITS_48
353 ldr_l \tmpreg, idmap_t0sz
354 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
359 * Macro to perform a data cache maintenance for the interval
360 * [kaddr, kaddr + size)
362 * op: operation passed to dc instruction
363 * domain: domain used in dsb instruciton
364 * kaddr: starting virtual address of the region
365 * size: size of the region
366 * Corrupts: kaddr, size, tmp1, tmp2
368 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
369 dcache_line_size \tmp1, \tmp2
370 add \size, \kaddr, \size
372 bic \kaddr, \kaddr, \tmp2
374 .if (\op == cvau || \op == cvac)
375 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
383 add \kaddr, \kaddr, \tmp1
390 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
392 .macro reset_pmuserenr_el0, tmpreg
393 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
394 sbfx \tmpreg, \tmpreg, #8, #4
395 cmp \tmpreg, #1 // Skip if no PMU present
397 msr pmuserenr_el0, xzr // Disable PMU access from EL0
402 * copy_page - copy src to dest using temp registers t1-t8
404 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
405 9998: ldp \t1, \t2, [\src]
406 ldp \t3, \t4, [\src, #16]
407 ldp \t5, \t6, [\src, #32]
408 ldp \t7, \t8, [\src, #48]
410 stnp \t1, \t2, [\dest]
411 stnp \t3, \t4, [\dest, #16]
412 stnp \t5, \t6, [\dest, #32]
413 stnp \t7, \t8, [\dest, #48]
414 add \dest, \dest, #64
415 tst \src, #(PAGE_SIZE - 1)
420 * Annotate a function as position independent, i.e., safe to be called before
421 * the kernel virtual mapping is activated.
423 #define ENDPIPROC(x) \
425 .type __pi_##x, %function; \
427 .size __pi_##x, . - x; \
431 * Emit a 64-bit absolute little endian symbol reference in a way that
432 * ensures that it will be resolved at build time, even when building a
433 * PIE binary. This requires cooperation from the linker script, which
434 * must emit the lo32/hi32 halves individually.
442 * mov_q - move an immediate constant into a 64-bit register using
443 * between 2 and 4 movz/movk instructions (depending on the
444 * magnitude and sign of the operand)
446 .macro mov_q, reg, val
447 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
448 movz \reg, :abs_g1_s:\val
450 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
451 movz \reg, :abs_g2_s:\val
453 movz \reg, :abs_g3:\val
454 movk \reg, :abs_g2_nc:\val
456 movk \reg, :abs_g1_nc:\val
458 movk \reg, :abs_g0_nc:\val
461 .macro pte_to_phys, phys, pte
462 and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
466 * Check the MIDR_EL1 of the current CPU for a given model and a range of
467 * variant/revision. See asm/cputype.h for the macros used below.
469 * model: MIDR_CPU_MODEL of CPU
470 * rv_min: Minimum of MIDR_CPU_VAR_REV()
471 * rv_max: Maximum of MIDR_CPU_VAR_REV()
472 * res: Result register.
473 * tmp1, tmp2, tmp3: Temporary registers
475 * Corrupts: res, tmp1, tmp2, tmp3
476 * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
478 .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
480 mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
481 mov_q \tmp2, MIDR_CPU_MODEL_MASK
482 and \tmp3, \res, \tmp2 // Extract model
483 and \tmp1, \res, \tmp1 // rev & variant
487 cbz \res, .Ldone\@ // Model matches ?
489 .if (\rv_min != 0) // Skip min check if rv_min == 0
493 .endif // \rv_min != 0
494 /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
495 .if ((\rv_min != \rv_max) || \rv_min == 0)
499 and \res, \res, \tmp2
504 .macro __mitigate_spectre_bhb_loop tmp
505 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
506 alternative_cb spectre_bhb_patch_loop_iter
507 mov \tmp, #32 // Patched to correct the immediate
509 .Lspectre_bhb_loop\@:
512 b.ne .Lspectre_bhb_loop\@
515 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
518 /* Save/restores x0-x3 to the stack */
519 .macro __mitigate_spectre_bhb_fw
520 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
521 stp x0, x1, [sp, #-16]!
522 stp x2, x3, [sp, #-16]!
523 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
524 alternative_cb arm64_update_smccc_conduit
525 nop // Patched to SMC/HVC #0
527 ldp x2, x3, [sp], #16
528 ldp x0, x1, [sp], #16
529 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
531 #endif /* __ASM_ASSEMBLER_H */