1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Low-level CPU initialisation
4 * Based on arch/arm/kernel/head.S
6 * Copyright (C) 1994-2002 Russell King
7 * Copyright (C) 2003-2012 ARM Ltd.
8 * Authors: Catalin Marinas <catalin.marinas@arm.com>
9 * Will Deacon <will.deacon@arm.com>
12 #include <linux/linkage.h>
13 #include <linux/init.h>
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/pgtable.h>
17 #include <asm/asm_pointer_auth.h>
18 #include <asm/assembler.h>
20 #include <asm/ptrace.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/cache.h>
23 #include <asm/cputype.h>
25 #include <asm/image.h>
26 #include <asm/kernel-pgtable.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/memory.h>
29 #include <asm/pgtable-hwdef.h>
33 #include <asm/sysreg.h>
34 #include <asm/thread_info.h>
37 #include "efi-header.S"
39 #define __PHYS_OFFSET KERNEL_START
41 #if (PAGE_OFFSET & 0x1fffff) != 0
42 #error PAGE_OFFSET must be at least 2MB aligned
46 * Kernel startup entry point.
47 * ---------------------------
49 * The requirements are:
50 * MMU = off, D-cache = off, I-cache = on or off,
51 * x0 = physical address to the FDT blob.
53 * This code is mostly position independent so you call this at
56 * Note that the callee-saved registers are used for storing variables
57 * that are useful before the MMU is enabled. The allocations are described
58 * in the entry routines.
63 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
67 * This add instruction has no meaningful effect except that
68 * its opcode forms the magic "MZ" signature required by UEFI.
73 b primary_entry // branch to kernel start, magic
76 .quad 0 // Image load offset from start of RAM, little-endian
77 le64sym _kernel_size_le // Effective size of kernel image, little-endian
78 le64sym _kernel_flags_le // Informative flags, little-endian
82 .ascii ARM64_IMAGE_MAGIC // Magic number
84 .long pe_header - _head // Offset to the PE header.
95 * The following callee saved general purpose registers are used on the
96 * primary lowlevel boot path:
98 * Register Scope Purpose
99 * x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
100 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset
101 * x28 __create_page_tables() callee preserved temp register
102 * x19/x20 __primary_switch() callee preserved temp registers
103 * x24 __primary_switch() .. relocate_kernel() current RELR displacement
105 SYM_CODE_START(primary_entry)
106 bl preserve_boot_args
107 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
108 adrp x23, __PHYS_OFFSET
109 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
110 bl set_cpu_boot_mode_flag
111 bl __create_page_tables
113 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
115 * On return, the CPU will be ready for the MMU to be turned on and
116 * the TCR will have been set.
118 bl __cpu_setup // initialise processor
120 SYM_CODE_END(primary_entry)
123 * Preserve the arguments passed by the bootloader in x0 .. x3
125 SYM_CODE_START_LOCAL(preserve_boot_args)
126 mov x21, x0 // x21=FDT
128 adr_l x0, boot_args // record the contents of
129 stp x21, x1, [x0] // x0 .. x3 at kernel entry
130 stp x2, x3, [x0, #16]
132 dmb sy // needed before dc ivac with
135 mov x1, #0x20 // 4 x 8 bytes
136 b __inval_dcache_area // tail call
137 SYM_CODE_END(preserve_boot_args)
140 * Macro to create a table entry to the next page.
142 * tbl: page table address
143 * virt: virtual address
144 * shift: #imm page table shift
145 * ptrs: #imm pointers per table page
148 * Corrupts: ptrs, tmp1, tmp2
149 * Returns: tbl -> next level table page address
151 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
152 add \tmp1, \tbl, #PAGE_SIZE
153 phys_to_pte \tmp2, \tmp1
154 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
155 lsr \tmp1, \virt, #\shift
157 and \tmp1, \tmp1, \ptrs // table index
158 str \tmp2, [\tbl, \tmp1, lsl #3]
159 add \tbl, \tbl, #PAGE_SIZE // next level table page
163 * Macro to populate page table entries, these entries can be pointers to the next level
164 * or last level entries pointing to physical memory.
166 * tbl: page table address
167 * rtbl: pointer to page table or physical memory
168 * index: start index to write
169 * eindex: end index to write - [index, eindex] written to
170 * flags: flags for pagetable entry to or in
171 * inc: increment to rtbl between each entry
172 * tmp1: temporary variable
174 * Preserves: tbl, eindex, flags, inc
175 * Corrupts: index, tmp1
178 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
179 .Lpe\@: phys_to_pte \tmp1, \rtbl
180 orr \tmp1, \tmp1, \flags // tmp1 = table entry
181 str \tmp1, [\tbl, \index, lsl #3]
182 add \rtbl, \rtbl, \inc // rtbl = pa next level
183 add \index, \index, #1
189 * Compute indices of table entries from virtual address range. If multiple entries
190 * were needed in the previous page table level then the next page table level is assumed
191 * to be composed of multiple pages. (This effectively scales the end index).
193 * vstart: virtual address of start of range
194 * vend: virtual address of end of range - we map [vstart, vend]
195 * shift: shift used to transform virtual address into index
196 * ptrs: number of entries in page table
197 * istart: index in table corresponding to vstart
198 * iend: index in table corresponding to vend
199 * count: On entry: how many extra entries were required in previous level, scales
201 * On exit: returns how many extra entries required for next page table level
203 * Preserves: vstart, vend, shift, ptrs
204 * Returns: istart, iend, count
206 .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
207 lsr \iend, \vend, \shift
209 sub \istart, \istart, #1
210 and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
212 mul \istart, \istart, \count
213 add \iend, \iend, \istart // iend += (count - 1) * ptrs
214 // our entries span multiple tables
216 lsr \istart, \vstart, \shift
218 sub \count, \count, #1
219 and \istart, \istart, \count
221 sub \count, \iend, \istart
225 * Map memory for specified virtual address range. Each level of page table needed supports
226 * multiple entries. If a level requires n entries the next page table level is assumed to be
227 * formed from n pages.
229 * tbl: location of page table
230 * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
231 * vstart: virtual address of start of range
232 * vend: virtual address of end of range - we map [vstart, vend - 1]
233 * flags: flags to use to map last level entries
234 * phys: physical address corresponding to vstart - physical memory is contiguous
235 * pgds: the number of pgd entries
237 * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
238 * Preserves: vstart, flags
239 * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv
241 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
243 add \rtbl, \tbl, #PAGE_SIZE
246 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
247 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
251 #if SWAPPER_PGTABLE_LEVELS > 3
252 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
253 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
258 #if SWAPPER_PGTABLE_LEVELS > 2
259 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
260 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
264 compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
265 bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
266 populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
270 * Setup the initial page tables. We only setup the barest amount which is
271 * required to get the kernel running. The following sections are required:
272 * - identity mapping to enable the MMU (low address, TTBR0)
273 * - first few MB of the kernel linear mapping to jump to once the MMU has
276 SYM_FUNC_START_LOCAL(__create_page_tables)
280 * Invalidate the init page tables to avoid potential dirty cache lines
281 * being evicted. Other page tables are allocated in rodata as part of
282 * the kernel image, and thus are clean to the PoC per the boot
288 bl __inval_dcache_area
291 * Clear the init page tables.
296 1: stp xzr, xzr, [x0], #16
297 stp xzr, xzr, [x0], #16
298 stp xzr, xzr, [x0], #16
299 stp xzr, xzr, [x0], #16
303 mov x7, SWAPPER_MM_MMUFLAGS
306 * Create the identity mapping.
308 adrp x0, idmap_pg_dir
309 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
311 #ifdef CONFIG_ARM64_VA_BITS_52
312 mrs_s x6, SYS_ID_AA64MMFR2_EL1
313 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
319 adr_l x6, vabits_actual
322 dc ivac, x6 // Invalidate potentially stale cache line
325 * VA_BITS may be too small to allow for an ID mapping to be created
326 * that covers system RAM if that is located sufficiently high in the
327 * physical address space. So for the ID map, use an extended virtual
328 * range in that case, and configure an additional translation level
331 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
332 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
333 * this number conveniently equals the number of leading zeroes in
334 * the physical address of __idmap_text_end.
336 adrp x5, __idmap_text_end
338 cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
339 b.ge 1f // .. then skip VA range extension
344 dc ivac, x6 // Invalidate potentially stale cache line
347 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
348 #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
351 * If VA_BITS < 48, we have to configure an additional table level.
352 * First, we have to verify our assumption that the current value of
353 * VA_BITS was chosen such that all translation levels are fully
354 * utilised, and that lowering T0SZ will always result in an additional
355 * translation level to be configured.
357 #if VA_BITS != EXTRA_SHIFT
358 #error "Mismatch between VA_BITS and page size/number of translation levels"
362 create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
365 * If VA_BITS == 48, we don't have to configure an additional
366 * translation level, but the top-level table has more entries.
368 mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
369 str_l x4, idmap_ptrs_per_pgd, x5
372 ldr_l x4, idmap_ptrs_per_pgd
373 mov x5, x3 // __pa(__idmap_text_start)
374 adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
376 map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
379 * Map the kernel image (starting with PHYS_OFFSET).
382 mov_q x5, KIMAGE_VADDR // compile time __va(_text)
383 add x5, x5, x23 // add KASLR displacement
385 adrp x6, _end // runtime __pa(_end)
386 adrp x3, _text // runtime __pa(_text)
387 sub x6, x6, x3 // _end - _text
388 add x6, x6, x5 // runtime __va(_end)
390 map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
393 * Since the page tables have been populated with non-cacheable
394 * accesses (MMU disabled), invalidate those tables again to
395 * remove any speculatively loaded cache lines.
399 adrp x0, idmap_pg_dir
400 adrp x1, idmap_pg_end
402 bl __inval_dcache_area
407 bl __inval_dcache_area
410 SYM_FUNC_END(__create_page_tables)
413 * The following fragment of code is executed with the MMU enabled.
417 SYM_FUNC_START_LOCAL(__primary_switched)
418 adrp x4, init_thread_union
419 add sp, x4, #THREAD_SIZE
421 msr sp_el0, x5 // Save thread_info
423 #ifdef CONFIG_ARM64_PTR_AUTH
424 __ptrauth_keys_init_cpu x5, x6, x7, x8
427 adr_l x8, vectors // load VBAR_EL1 with virtual
428 msr vbar_el1, x8 // vector table address
431 stp xzr, x30, [sp, #-16]!
434 #ifdef CONFIG_SHADOW_CALL_STACK
435 adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
438 str_l x21, __fdt_pointer, x5 // Save FDT pointer
440 ldr_l x4, kimage_vaddr // Save the offset between
441 sub x4, x4, x0 // the kernel virtual and
442 str_l x4, kimage_voffset, x5 // physical mappings
445 adr_l x0, __bss_start
450 dsb ishst // Make zero page visible to PTW
455 #ifdef CONFIG_RANDOMIZE_BASE
456 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
458 mov x0, x21 // pass FDT address in x0
459 bl kaslr_early_init // parse FDT for KASLR options
460 cbz x0, 0f // KASLR disabled? just proceed
461 orr x23, x23, x0 // record KASLR offset
462 ldp x29, x30, [sp], #16 // we must enable KASLR, return
463 ret // to __primary_switch()
470 SYM_FUNC_END(__primary_switched)
472 .pushsection ".rodata", "a"
473 SYM_DATA_START(kimage_vaddr)
475 SYM_DATA_END(kimage_vaddr)
476 EXPORT_SYMBOL(kimage_vaddr)
480 * end early head section, begin head code that is also used for
481 * hotplug and needs to have the same protections as the text region
483 .section ".idmap.text","awx"
486 * If we're fortunate enough to boot at EL2, ensure that the world is
487 * sane before dropping to EL1.
489 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
490 * booted in EL1 or EL2 respectively.
492 SYM_FUNC_START(el2_setup)
493 msr SPsel, #1 // We want to use SP_EL{1,2}
495 cmp x0, #CurrentEL_EL2
497 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
499 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
503 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
506 #ifdef CONFIG_ARM64_VHE
508 * Check for VHE being present. For the rest of the EL2 setup,
509 * x2 being non-zero indicates that we do have VHE, and that the
510 * kernel is intended to run at EL2.
512 mrs x2, id_aa64mmfr1_el1
513 ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
518 /* Hyp configuration. */
519 mov_q x0, HCR_HOST_NVHE_FLAGS
521 mov_q x0, HCR_HOST_VHE_FLAGS
527 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
528 * This is not necessary for VHE, since the host kernel runs in EL2,
529 * and EL0 accesses are configured in the later stage of boot process.
530 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
531 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
532 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
533 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
538 orr x0, x0, #3 // Enable EL1 physical timers
541 msr cntvoff_el2, xzr // Clear virtual offset
543 #ifdef CONFIG_ARM_GIC_V3
544 /* GICv3 system register access */
545 mrs x0, id_aa64pfr0_el1
546 ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
549 mrs_s x0, SYS_ICC_SRE_EL2
550 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
551 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
552 msr_s SYS_ICC_SRE_EL2, x0
553 isb // Make sure SRE is now set
554 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
555 tbz x0, #0, 3f // and check that it sticks
556 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
561 /* Populate ID registers. */
568 msr hstr_el2, xzr // Disable CP15 traps to EL2
572 mrs x1, id_aa64dfr0_el1
573 sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
575 b.lt 4f // Skip if no PMU present
576 mrs x0, pmcr_el0 // Disable debug access traps
577 ubfx x0, x0, #11, #5 // to EL2 and allow access to
579 csel x3, xzr, x0, lt // all PMU counters from EL1
581 /* Statistical profiling */
582 ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
583 cbz x0, 7f // Skip if SPE not present
585 mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
586 and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
587 cbnz x4, 5f // then permit sampling of physical
588 mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
589 1 << SYS_PMSCR_EL2_PA_SHIFT)
590 msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
592 mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
593 orr x3, x3, x1 // If we don't have VHE, then
594 b 7f // use EL1&0 translation.
595 6: // For VHE, use EL2 translation
596 orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
598 msr mdcr_el2, x3 // Configure debug traps
601 mrs x1, id_aa64mmfr1_el1
602 ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
604 msr_s SYS_LORC_EL1, xzr
607 /* Stage-2 translation */
610 cbz x2, install_el2_stub
612 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
616 SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
618 * When VHE is not in use, early init of EL2 and EL1 needs to be
620 * When VHE _is_ in use, EL1 will not be used in the host and
621 * requires no configuration, and all non-hyp-specific EL2 setup
622 * will be done via the _EL1 system register aliases in __cpu_setup.
624 mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
627 /* Coprocessor traps. */
629 msr cptr_el2, x0 // Disable copro. traps to EL2
631 /* SVE register access */
632 mrs x1, id_aa64pfr0_el1
633 ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
636 bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
637 msr cptr_el2, x0 // Disable copro. traps to EL2
639 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
640 msr_s SYS_ZCR_EL2, x1 // length for EL1.
642 /* Hypervisor stub */
643 7: adr_l x0, __hyp_stub_vectors
647 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
651 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
653 SYM_FUNC_END(el2_setup)
656 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
657 * in w0. See arch/arm64/include/asm/virt.h for more info.
659 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
660 adr_l x1, __boot_cpu_mode
661 cmp w0, #BOOT_CPU_MODE_EL2
664 1: str w0, [x1] // This CPU has booted in EL1
666 dc ivac, x1 // Invalidate potentially stale cache line
668 SYM_FUNC_END(set_cpu_boot_mode_flag)
671 * These values are written with the MMU off, but read with the MMU on.
672 * Writers will invalidate the corresponding address, discarding up to a
673 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
674 * sufficient alignment that the CWG doesn't overlap another section.
676 .pushsection ".mmuoff.data.write", "aw"
678 * We need to find out the CPU boot mode long after boot, so we need to
679 * store it in a writable variable.
681 * This is not in .bss, because we set it sufficiently early that the boot-time
682 * zeroing of .bss would clobber it.
684 SYM_DATA_START(__boot_cpu_mode)
685 .long BOOT_CPU_MODE_EL2
686 .long BOOT_CPU_MODE_EL1
687 SYM_DATA_END(__boot_cpu_mode)
689 * The booting CPU updates the failed status @__early_cpu_boot_status,
690 * with MMU turned off.
692 SYM_DATA_START(__early_cpu_boot_status)
694 SYM_DATA_END(__early_cpu_boot_status)
699 * This provides a "holding pen" for platforms to hold all secondary
700 * cores are held until we're ready for them to initialise.
702 SYM_FUNC_START(secondary_holding_pen)
703 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
704 bl set_cpu_boot_mode_flag
706 mov_q x1, MPIDR_HWID_BITMASK
708 adr_l x3, secondary_holding_pen_release
711 b.eq secondary_startup
714 SYM_FUNC_END(secondary_holding_pen)
717 * Secondary entry point that jumps straight into the kernel. Only to
718 * be used where CPUs are brought online dynamically by the kernel.
720 SYM_FUNC_START(secondary_entry)
721 bl el2_setup // Drop to EL1
722 bl set_cpu_boot_mode_flag
724 SYM_FUNC_END(secondary_entry)
726 SYM_FUNC_START_LOCAL(secondary_startup)
728 * Common entry point for secondary CPUs.
730 bl __cpu_secondary_check52bitva
731 bl __cpu_setup // initialise processor
732 adrp x1, swapper_pg_dir
734 ldr x8, =__secondary_switched
736 SYM_FUNC_END(secondary_startup)
738 SYM_FUNC_START_LOCAL(__secondary_switched)
743 adr_l x0, secondary_data
744 ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
745 cbz x1, __secondary_too_slow
747 ldr x2, [x0, #CPU_BOOT_TASK]
748 cbz x2, __secondary_too_slow
754 #ifdef CONFIG_ARM64_PTR_AUTH
755 ptrauth_keys_init_cpu x2, x3, x4, x5
758 b secondary_start_kernel
759 SYM_FUNC_END(__secondary_switched)
761 SYM_FUNC_START_LOCAL(__secondary_too_slow)
764 b __secondary_too_slow
765 SYM_FUNC_END(__secondary_too_slow)
768 * The booting CPU updates the failed status @__early_cpu_boot_status,
769 * with MMU turned off.
771 * update_early_cpu_boot_status tmp, status
772 * - Corrupts tmp1, tmp2
773 * - Writes 'status' to __early_cpu_boot_status and makes sure
774 * it is committed to memory.
777 .macro update_early_cpu_boot_status status, tmp1, tmp2
779 adr_l \tmp1, __early_cpu_boot_status
782 dc ivac, \tmp1 // Invalidate potentially stale cache line
788 * x0 = SCTLR_EL1 value for turning on the MMU.
789 * x1 = TTBR1_EL1 value
791 * Returns to the caller via x30/lr. This requires the caller to be covered
792 * by the .idmap.text section.
794 * Checks if the selected granule size is supported by the CPU.
795 * If it isn't, park the CPU
797 SYM_FUNC_START(__enable_mmu)
798 mrs x2, ID_AA64MMFR0_EL1
799 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
800 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
801 b.lt __no_granule_support
802 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
803 b.gt __no_granule_support
804 update_early_cpu_boot_status 0, x2, x3
805 adrp x2, idmap_pg_dir
808 msr ttbr0_el1, x2 // load TTBR0
810 msr ttbr1_el1, x1 // load TTBR1
815 * Invalidate the local I-cache so that any instructions fetched
816 * speculatively from the PoC are discarded, since they may have
817 * been dynamically patched at the PoU.
823 SYM_FUNC_END(__enable_mmu)
825 SYM_FUNC_START(__cpu_secondary_check52bitva)
826 #ifdef CONFIG_ARM64_VA_BITS_52
827 ldr_l x0, vabits_actual
831 mrs_s x0, SYS_ID_AA64MMFR2_EL1
832 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
835 update_early_cpu_boot_status \
836 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
843 SYM_FUNC_END(__cpu_secondary_check52bitva)
845 SYM_FUNC_START_LOCAL(__no_granule_support)
846 /* Indicate that this CPU can't boot and is stuck in the kernel */
847 update_early_cpu_boot_status \
848 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
853 SYM_FUNC_END(__no_granule_support)
855 #ifdef CONFIG_RELOCATABLE
856 SYM_FUNC_START_LOCAL(__relocate_kernel)
858 * Iterate over each entry in the relocation table, and apply the
859 * relocations in place.
861 ldr w9, =__rela_offset // offset to reloc table
862 ldr w10, =__rela_size // size of reloc table
864 mov_q x11, KIMAGE_VADDR // default virtual offset
865 add x11, x11, x23 // actual virtual offset
866 add x9, x9, x11 // __va(.rela)
867 add x10, x9, x10 // __va(.rela) + sizeof(.rela)
871 ldp x12, x13, [x9], #24
873 cmp w13, #R_AARCH64_RELATIVE
875 add x14, x14, x23 // relocate
882 * Apply RELR relocations.
884 * RELR is a compressed format for storing relative relocations. The
885 * encoded sequence of entries looks like:
886 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
888 * i.e. start with an address, followed by any number of bitmaps. The
889 * address entry encodes 1 relocation. The subsequent bitmap entries
890 * encode up to 63 relocations each, at subsequent offsets following
891 * the last address entry.
893 * The bitmap entries must have 1 in the least significant bit. The
894 * assumption here is that an address cannot have 1 in lsb. Odd
895 * addresses are not supported. Any odd addresses are stored in the RELA
896 * section, which is handled above.
898 * Excluding the least significant bit in the bitmap, each non-zero
899 * bit in the bitmap represents a relocation to be applied to
900 * a corresponding machine word that follows the base address
901 * word. The second least significant bit represents the machine
902 * word immediately following the initial address, and each bit
903 * that follows represents the next word, in linear order. As such,
904 * a single bitmap can encode up to 63 relocations in a 64-bit object.
906 * In this implementation we store the address of the next RELR table
907 * entry in x9, the address being relocated by the current address or
908 * bitmap entry in x13 and the address being relocated by the current
911 * Because addends are stored in place in the binary, RELR relocations
912 * cannot be applied idempotently. We use x24 to keep track of the
913 * currently applied displacement so that we can correctly relocate if
914 * __relocate_kernel is called twice with non-zero displacements (i.e.
915 * if there is both a physical misalignment and a KASLR displacement).
917 ldr w9, =__relr_offset // offset to reloc table
918 ldr w10, =__relr_size // size of reloc table
919 add x9, x9, x11 // __va(.relr)
920 add x10, x9, x10 // __va(.relr) + sizeof(.relr)
922 sub x15, x23, x24 // delta from previous offset
923 cbz x15, 7f // nothing to do if unchanged
924 mov x24, x23 // save new offset
929 tbnz x11, #0, 3f // branch to handle bitmaps
931 ldr x12, [x13] // relocate address entry
933 str x12, [x13], #8 // adjust to start of bitmap
939 tbz x11, #0, 5f // skip bit if not set
940 ldr x12, [x14] // relocate bit
944 5: add x14, x14, #8 // move to next bit's address
948 * Move to the next bitmap's address. 8 is the word size, and 63 is the
949 * number of significant bits in a bitmap entry.
951 add x13, x13, #(8 * 63)
958 SYM_FUNC_END(__relocate_kernel)
961 SYM_FUNC_START_LOCAL(__primary_switch)
962 #ifdef CONFIG_RANDOMIZE_BASE
963 mov x19, x0 // preserve new SCTLR_EL1 value
964 mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
969 #ifdef CONFIG_RELOCATABLE
971 mov x24, #0 // no RELR displacement yet
974 #ifdef CONFIG_RANDOMIZE_BASE
975 ldr x8, =__primary_switched
976 adrp x0, __PHYS_OFFSET
980 * If we return here, we have a KASLR displacement in x23 which we need
981 * to take into account by discarding the current kernel mapping and
982 * creating a new one.
984 pre_disable_mmu_workaround
985 msr sctlr_el1, x20 // disable the MMU
987 bl __create_page_tables // recreate kernel mapping
989 tlbi vmalle1 // Remove any stale TLB entries
993 msr sctlr_el1, x19 // re-enable the MMU
995 ic iallu // flush instructions fetched
996 dsb nsh // via old mapping
1002 ldr x8, =__primary_switched
1003 adrp x0, __PHYS_OFFSET
1005 SYM_FUNC_END(__primary_switch)