2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/cputype.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
35 * Enable and disable interrupts.
45 .macro save_and_disable_irq, flags
50 .macro restore_irq, flags
55 * Enable and disable debug exceptions.
65 .macro disable_step_tsk, flgs, tmp
66 tbz \flgs, #TIF_SINGLESTEP, 9990f
70 isb // Synchronise with enable_dbg
74 .macro enable_step_tsk, flgs, tmp
75 tbz \flgs, #TIF_SINGLESTEP, 9990f
84 * Enable both debug exceptions and interrupts. This is likely to be
85 * faster than two daifclr operations, since writes to this register
86 * are self-synchronising.
88 .macro enable_dbg_and_irq
93 * SMP data memory barrier
100 * Value prediction barrier
107 * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
110 .macro mask_nospec64, idx, limit, tmp
111 sub \tmp, \idx, \limit
113 and \idx, \idx, \tmp, asr #63
127 * Emit an entry into the exception table
129 .macro _asm_extable, from, to
130 .pushsection __ex_table, "a"
132 .long (\from - .), (\to - .)
136 #define USER(l, x...) \
138 _asm_extable 9999b, l
143 lr .req x30 // link register
154 * Select code when configured for BE.
156 #ifdef CONFIG_CPU_BIG_ENDIAN
157 #define CPU_BE(code...) code
159 #define CPU_BE(code...)
163 * Select code when configured for LE.
165 #ifdef CONFIG_CPU_BIG_ENDIAN
166 #define CPU_LE(code...)
168 #define CPU_LE(code...) code
172 * Define a macro that constructs a 64-bit value by concatenating two
173 * 32-bit registers. Note that on big endian systems the order of the
174 * registers is swapped.
176 #ifndef CONFIG_CPU_BIG_ENDIAN
177 .macro regs_to_64, rd, lbits, hbits
179 .macro regs_to_64, rd, hbits, lbits
181 orr \rd, \lbits, \hbits, lsl #32
185 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
186 * <symbol> is within the range +/- 4 GB of the PC when running
187 * in core kernel context. In module context, a movz/movk sequence
188 * is used, since modules may be loaded far away from the kernel
189 * when KASLR is in effect.
192 * @dst: destination register (64 bit wide)
193 * @sym: name of the symbol
195 .macro adr_l, dst, sym
198 add \dst, \dst, :lo12:\sym
200 movz \dst, #:abs_g3:\sym
201 movk \dst, #:abs_g2_nc:\sym
202 movk \dst, #:abs_g1_nc:\sym
203 movk \dst, #:abs_g0_nc:\sym
208 * @dst: destination register (32 or 64 bit wide)
209 * @sym: name of the symbol
210 * @tmp: optional 64-bit scratch register to be used if <dst> is a
211 * 32-bit wide register, in which case it cannot be used to hold
214 .macro ldr_l, dst, sym, tmp=
218 ldr \dst, [\dst, :lo12:\sym]
221 ldr \dst, [\tmp, :lo12:\sym]
235 * @src: source register (32 or 64 bit wide)
236 * @sym: name of the symbol
237 * @tmp: mandatory 64-bit scratch register to calculate the address
238 * while <src> needs to be preserved.
240 .macro str_l, src, sym, tmp
243 str \src, [\tmp, :lo12:\sym]
251 * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
253 * @sym: The name of the per-cpu variable
254 * @tmp: scratch register
256 .macro adr_this_cpu, dst, sym, tmp
259 add \dst, \tmp, #:lo12:\sym
263 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
272 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
273 * @sym: The name of the per-cpu variable
274 * @tmp: scratch register
276 .macro ldr_this_cpu dst, sym, tmp
278 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
283 ldr \dst, [\dst, \tmp]
287 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
289 .macro vma_vm_mm, rd, rn
290 ldr \rd, [\rn, #VMA_VM_MM]
294 * mmid - get context id from mm pointer (mm->context.id)
297 ldr \rd, [\rn, #MM_CONTEXT_ID]
300 * read_ctr - read CTR_EL0. If the system has mismatched
301 * cache line sizes, provide the system wide safe value
302 * from arm64_ftr_reg_ctrel0.sys_val
305 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
306 mrs \reg, ctr_el0 // read CTR
309 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
315 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
316 * from the CTR register.
318 .macro raw_dcache_line_size, reg, tmp
319 mrs \tmp, ctr_el0 // read CTR
320 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
321 mov \reg, #4 // bytes per word
322 lsl \reg, \reg, \tmp // actual cache line size
326 * dcache_line_size - get the safe D-cache line size across all CPUs
328 .macro dcache_line_size, reg, tmp
330 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
331 mov \reg, #4 // bytes per word
332 lsl \reg, \reg, \tmp // actual cache line size
336 * raw_icache_line_size - get the minimum I-cache line size on this CPU
337 * from the CTR register.
339 .macro raw_icache_line_size, reg, tmp
340 mrs \tmp, ctr_el0 // read CTR
341 and \tmp, \tmp, #0xf // cache line size encoding
342 mov \reg, #4 // bytes per word
343 lsl \reg, \reg, \tmp // actual cache line size
347 * icache_line_size - get the safe I-cache line size across all CPUs
349 .macro icache_line_size, reg, tmp
351 and \tmp, \tmp, #0xf // cache line size encoding
352 mov \reg, #4 // bytes per word
353 lsl \reg, \reg, \tmp // actual cache line size
357 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
359 .macro tcr_set_idmap_t0sz, valreg, tmpreg
360 #ifndef CONFIG_ARM64_VA_BITS_48
361 ldr_l \tmpreg, idmap_t0sz
362 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
367 * Macro to perform a data cache maintenance for the interval
368 * [kaddr, kaddr + size)
370 * op: operation passed to dc instruction
371 * domain: domain used in dsb instruciton
372 * kaddr: starting virtual address of the region
373 * size: size of the region
374 * Corrupts: kaddr, size, tmp1, tmp2
376 .macro __dcache_op_workaround_clean_cache, op, kaddr
377 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
384 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
385 dcache_line_size \tmp1, \tmp2
386 add \size, \kaddr, \size
388 bic \kaddr, \kaddr, \tmp2
391 __dcache_op_workaround_clean_cache \op, \kaddr
394 __dcache_op_workaround_clean_cache \op, \kaddr
397 sys 3, c7, c12, 1, \kaddr // dc cvap
403 add \kaddr, \kaddr, \tmp1
410 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
412 .macro reset_pmuserenr_el0, tmpreg
413 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
414 sbfx \tmpreg, \tmpreg, #8, #4
415 cmp \tmpreg, #1 // Skip if no PMU present
417 msr pmuserenr_el0, xzr // Disable PMU access from EL0
422 * copy_page - copy src to dest using temp registers t1-t8
424 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
425 9998: ldp \t1, \t2, [\src]
426 ldp \t3, \t4, [\src, #16]
427 ldp \t5, \t6, [\src, #32]
428 ldp \t7, \t8, [\src, #48]
430 stnp \t1, \t2, [\dest]
431 stnp \t3, \t4, [\dest, #16]
432 stnp \t5, \t6, [\dest, #32]
433 stnp \t7, \t8, [\dest, #48]
434 add \dest, \dest, #64
435 tst \src, #(PAGE_SIZE - 1)
440 * Annotate a function as position independent, i.e., safe to be called before
441 * the kernel virtual mapping is activated.
443 #define ENDPIPROC(x) \
445 .type __pi_##x, %function; \
447 .size __pi_##x, . - x; \
451 * Annotate a function as being unsuitable for kprobes.
453 #ifdef CONFIG_KPROBES
454 #define NOKPROBE(x) \
455 .pushsection "_kprobe_blacklist", "aw"; \
462 * Emit a 64-bit absolute little endian symbol reference in a way that
463 * ensures that it will be resolved at build time, even when building a
464 * PIE binary. This requires cooperation from the linker script, which
465 * must emit the lo32/hi32 halves individually.
473 * mov_q - move an immediate constant into a 64-bit register using
474 * between 2 and 4 movz/movk instructions (depending on the
475 * magnitude and sign of the operand)
477 .macro mov_q, reg, val
478 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
479 movz \reg, :abs_g1_s:\val
481 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
482 movz \reg, :abs_g2_s:\val
484 movz \reg, :abs_g3:\val
485 movk \reg, :abs_g2_nc:\val
487 movk \reg, :abs_g1_nc:\val
489 movk \reg, :abs_g0_nc:\val
493 * Return the current thread_info.
495 .macro get_thread_info, rd
500 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
501 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
503 .macro pre_disable_mmu_workaround
504 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
509 .macro pte_to_phys, phys, pte
510 and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
514 * Check the MIDR_EL1 of the current CPU for a given model and a range of
515 * variant/revision. See asm/cputype.h for the macros used below.
517 * model: MIDR_CPU_MODEL of CPU
518 * rv_min: Minimum of MIDR_CPU_VAR_REV()
519 * rv_max: Maximum of MIDR_CPU_VAR_REV()
520 * res: Result register.
521 * tmp1, tmp2, tmp3: Temporary registers
523 * Corrupts: res, tmp1, tmp2, tmp3
524 * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
526 .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
528 mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
529 mov_q \tmp2, MIDR_CPU_MODEL_MASK
530 and \tmp3, \res, \tmp2 // Extract model
531 and \tmp1, \res, \tmp1 // rev & variant
535 cbz \res, .Ldone\@ // Model matches ?
537 .if (\rv_min != 0) // Skip min check if rv_min == 0
541 .endif // \rv_min != 0
542 /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
543 .if ((\rv_min != \rv_max) || \rv_min == 0)
547 and \res, \res, \tmp2
552 #endif /* __ASM_ASSEMBLER_H */