2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/cputype.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
35 * Enable and disable interrupts.
45 .macro save_and_disable_irq, flags
50 .macro restore_irq, flags
55 * Enable and disable debug exceptions.
65 .macro disable_step_tsk, flgs, tmp
66 tbz \flgs, #TIF_SINGLESTEP, 9990f
70 isb // Synchronise with enable_dbg
74 .macro enable_step_tsk, flgs, tmp
75 tbz \flgs, #TIF_SINGLESTEP, 9990f
84 * Enable both debug exceptions and interrupts. This is likely to be
85 * faster than two daifclr operations, since writes to this register
86 * are self-synchronising.
88 .macro enable_dbg_and_irq
93 * SMP data memory barrier
100 * Value prediction barrier
107 * Clear Branch History instruction
114 * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
117 .macro mask_nospec64, idx, limit, tmp
118 sub \tmp, \idx, \limit
120 and \idx, \idx, \tmp, asr #63
134 * Emit an entry into the exception table
136 .macro _asm_extable, from, to
137 .pushsection __ex_table, "a"
139 .long (\from - .), (\to - .)
143 #define USER(l, x...) \
145 _asm_extable 9999b, l
150 lr .req x30 // link register
161 * Select code when configured for BE.
163 #ifdef CONFIG_CPU_BIG_ENDIAN
164 #define CPU_BE(code...) code
166 #define CPU_BE(code...)
170 * Select code when configured for LE.
172 #ifdef CONFIG_CPU_BIG_ENDIAN
173 #define CPU_LE(code...)
175 #define CPU_LE(code...) code
179 * Define a macro that constructs a 64-bit value by concatenating two
180 * 32-bit registers. Note that on big endian systems the order of the
181 * registers is swapped.
183 #ifndef CONFIG_CPU_BIG_ENDIAN
184 .macro regs_to_64, rd, lbits, hbits
186 .macro regs_to_64, rd, hbits, lbits
188 orr \rd, \lbits, \hbits, lsl #32
192 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
193 * <symbol> is within the range +/- 4 GB of the PC when running
194 * in core kernel context. In module context, a movz/movk sequence
195 * is used, since modules may be loaded far away from the kernel
196 * when KASLR is in effect.
199 * @dst: destination register (64 bit wide)
200 * @sym: name of the symbol
202 .macro adr_l, dst, sym
205 add \dst, \dst, :lo12:\sym
207 movz \dst, #:abs_g3:\sym
208 movk \dst, #:abs_g2_nc:\sym
209 movk \dst, #:abs_g1_nc:\sym
210 movk \dst, #:abs_g0_nc:\sym
215 * @dst: destination register (32 or 64 bit wide)
216 * @sym: name of the symbol
217 * @tmp: optional 64-bit scratch register to be used if <dst> is a
218 * 32-bit wide register, in which case it cannot be used to hold
221 .macro ldr_l, dst, sym, tmp=
225 ldr \dst, [\dst, :lo12:\sym]
228 ldr \dst, [\tmp, :lo12:\sym]
242 * @src: source register (32 or 64 bit wide)
243 * @sym: name of the symbol
244 * @tmp: mandatory 64-bit scratch register to calculate the address
245 * while <src> needs to be preserved.
247 .macro str_l, src, sym, tmp
250 str \src, [\tmp, :lo12:\sym]
258 * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
260 * @sym: The name of the per-cpu variable
261 * @tmp: scratch register
263 .macro adr_this_cpu, dst, sym, tmp
266 add \dst, \tmp, #:lo12:\sym
270 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
279 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
280 * @sym: The name of the per-cpu variable
281 * @tmp: scratch register
283 .macro ldr_this_cpu dst, sym, tmp
285 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
290 ldr \dst, [\dst, \tmp]
294 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
296 .macro vma_vm_mm, rd, rn
297 ldr \rd, [\rn, #VMA_VM_MM]
301 * mmid - get context id from mm pointer (mm->context.id)
304 ldr \rd, [\rn, #MM_CONTEXT_ID]
307 * read_ctr - read CTR_EL0. If the system has mismatched
308 * cache line sizes, provide the system wide safe value
309 * from arm64_ftr_reg_ctrel0.sys_val
312 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
313 mrs \reg, ctr_el0 // read CTR
316 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
322 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
323 * from the CTR register.
325 .macro raw_dcache_line_size, reg, tmp
326 mrs \tmp, ctr_el0 // read CTR
327 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
328 mov \reg, #4 // bytes per word
329 lsl \reg, \reg, \tmp // actual cache line size
333 * dcache_line_size - get the safe D-cache line size across all CPUs
335 .macro dcache_line_size, reg, tmp
337 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
338 mov \reg, #4 // bytes per word
339 lsl \reg, \reg, \tmp // actual cache line size
343 * raw_icache_line_size - get the minimum I-cache line size on this CPU
344 * from the CTR register.
346 .macro raw_icache_line_size, reg, tmp
347 mrs \tmp, ctr_el0 // read CTR
348 and \tmp, \tmp, #0xf // cache line size encoding
349 mov \reg, #4 // bytes per word
350 lsl \reg, \reg, \tmp // actual cache line size
354 * icache_line_size - get the safe I-cache line size across all CPUs
356 .macro icache_line_size, reg, tmp
358 and \tmp, \tmp, #0xf // cache line size encoding
359 mov \reg, #4 // bytes per word
360 lsl \reg, \reg, \tmp // actual cache line size
364 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
366 .macro tcr_set_idmap_t0sz, valreg, tmpreg
367 #ifndef CONFIG_ARM64_VA_BITS_48
368 ldr_l \tmpreg, idmap_t0sz
369 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
374 * Macro to perform a data cache maintenance for the interval
375 * [kaddr, kaddr + size)
377 * op: operation passed to dc instruction
378 * domain: domain used in dsb instruciton
379 * kaddr: starting virtual address of the region
380 * size: size of the region
381 * Corrupts: kaddr, size, tmp1, tmp2
383 .macro __dcache_op_workaround_clean_cache, op, kaddr
384 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
391 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
392 dcache_line_size \tmp1, \tmp2
393 add \size, \kaddr, \size
395 bic \kaddr, \kaddr, \tmp2
398 __dcache_op_workaround_clean_cache \op, \kaddr
401 __dcache_op_workaround_clean_cache \op, \kaddr
404 sys 3, c7, c12, 1, \kaddr // dc cvap
410 add \kaddr, \kaddr, \tmp1
417 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
419 .macro reset_pmuserenr_el0, tmpreg
420 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
421 sbfx \tmpreg, \tmpreg, #8, #4
422 cmp \tmpreg, #1 // Skip if no PMU present
424 msr pmuserenr_el0, xzr // Disable PMU access from EL0
429 * copy_page - copy src to dest using temp registers t1-t8
431 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
432 9998: ldp \t1, \t2, [\src]
433 ldp \t3, \t4, [\src, #16]
434 ldp \t5, \t6, [\src, #32]
435 ldp \t7, \t8, [\src, #48]
437 stnp \t1, \t2, [\dest]
438 stnp \t3, \t4, [\dest, #16]
439 stnp \t5, \t6, [\dest, #32]
440 stnp \t7, \t8, [\dest, #48]
441 add \dest, \dest, #64
442 tst \src, #(PAGE_SIZE - 1)
447 * Annotate a function as position independent, i.e., safe to be called before
448 * the kernel virtual mapping is activated.
450 #define ENDPIPROC(x) \
452 .type __pi_##x, %function; \
454 .size __pi_##x, . - x; \
458 * Annotate a function as being unsuitable for kprobes.
460 #ifdef CONFIG_KPROBES
461 #define NOKPROBE(x) \
462 .pushsection "_kprobe_blacklist", "aw"; \
469 * Emit a 64-bit absolute little endian symbol reference in a way that
470 * ensures that it will be resolved at build time, even when building a
471 * PIE binary. This requires cooperation from the linker script, which
472 * must emit the lo32/hi32 halves individually.
480 * mov_q - move an immediate constant into a 64-bit register using
481 * between 2 and 4 movz/movk instructions (depending on the
482 * magnitude and sign of the operand)
484 .macro mov_q, reg, val
485 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
486 movz \reg, :abs_g1_s:\val
488 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
489 movz \reg, :abs_g2_s:\val
491 movz \reg, :abs_g3:\val
492 movk \reg, :abs_g2_nc:\val
494 movk \reg, :abs_g1_nc:\val
496 movk \reg, :abs_g0_nc:\val
500 * Return the current thread_info.
502 .macro get_thread_info, rd
507 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
508 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
510 .macro pre_disable_mmu_workaround
511 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
516 .macro pte_to_phys, phys, pte
517 and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
521 * Check the MIDR_EL1 of the current CPU for a given model and a range of
522 * variant/revision. See asm/cputype.h for the macros used below.
524 * model: MIDR_CPU_MODEL of CPU
525 * rv_min: Minimum of MIDR_CPU_VAR_REV()
526 * rv_max: Maximum of MIDR_CPU_VAR_REV()
527 * res: Result register.
528 * tmp1, tmp2, tmp3: Temporary registers
530 * Corrupts: res, tmp1, tmp2, tmp3
531 * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
533 .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
535 mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
536 mov_q \tmp2, MIDR_CPU_MODEL_MASK
537 and \tmp3, \res, \tmp2 // Extract model
538 and \tmp1, \res, \tmp1 // rev & variant
542 cbz \res, .Ldone\@ // Model matches ?
544 .if (\rv_min != 0) // Skip min check if rv_min == 0
548 .endif // \rv_min != 0
549 /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
550 .if ((\rv_min != \rv_max) || \rv_min == 0)
554 and \res, \res, \tmp2
559 .macro __mitigate_spectre_bhb_loop tmp
560 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
561 alternative_cb spectre_bhb_patch_loop_iter
562 mov \tmp, #32 // Patched to correct the immediate
564 .Lspectre_bhb_loop\@:
567 b.ne .Lspectre_bhb_loop\@
570 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
573 /* Save/restores x0-x3 to the stack */
574 .macro __mitigate_spectre_bhb_fw
575 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
576 stp x0, x1, [sp, #-16]!
577 stp x2, x3, [sp, #-16]!
578 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
579 alternative_cb arm64_update_smccc_conduit
580 nop // Patched to SMC/HVC #0
582 ldp x2, x3, [sp], #16
583 ldp x0, x1, [sp], #16
584 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
586 #endif /* __ASM_ASSEMBLER_H */