1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/mm/proc.S
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 * Author: Catalin Marinas <catalin.marinas@arm.com>
10 #include <linux/init.h>
11 #include <linux/linkage.h>
12 #include <linux/pgtable.h>
13 #include <linux/cfi_types.h>
14 #include <asm/assembler.h>
15 #include <asm/asm-offsets.h>
16 #include <asm/asm_pointer_auth.h>
17 #include <asm/hwcap.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/pgtable-hwdef.h>
20 #include <asm/cpufeature.h>
21 #include <asm/alternative.h>
23 #include <asm/sysreg.h>
25 #ifdef CONFIG_ARM64_64K_PAGES
26 #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
27 #elif defined(CONFIG_ARM64_16K_PAGES)
28 #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K
29 #else /* CONFIG_ARM64_4K_PAGES */
30 #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
33 #ifdef CONFIG_RANDOMIZE_BASE
34 #define TCR_KASLR_FLAGS TCR_NFD1
36 #define TCR_KASLR_FLAGS 0
39 #define TCR_SMP_FLAGS TCR_SHARED
41 /* PTWs cacheable, inner/outer WBWA */
42 #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
44 #ifdef CONFIG_KASAN_SW_TAGS
45 #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1
47 #define TCR_KASAN_SW_FLAGS 0
50 #ifdef CONFIG_KASAN_HW_TAGS
51 #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
52 #elif defined(CONFIG_ARM64_MTE)
54 * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
55 * TBI being enabled at EL1.
57 #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
59 #define TCR_MTE_FLAGS 0
63 * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
64 * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
66 #define MAIR_EL1_SET \
67 (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
68 MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
69 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
70 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
71 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
75 * cpu_do_suspend - save CPU registers context
77 * x0: virtual address of context pointer
79 * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>.
81 SYM_FUNC_START(cpu_do_suspend)
84 mrs x4, contextidr_el1
92 get_this_cpu_offset x12
98 stp x10, x11, [x0, #64]
99 stp x12, x13, [x0, #80]
101 * Save x18 as it may be used as a platform register, e.g. by shadow
106 SYM_FUNC_END(cpu_do_suspend)
109 * cpu_do_resume - restore CPU register context
111 * x0: Address of context pointer
113 .pushsection ".idmap.text", "awx"
114 SYM_FUNC_START(cpu_do_resume)
116 ldp x4, x5, [x0, #16]
117 ldp x6, x8, [x0, #32]
118 ldp x9, x10, [x0, #48]
119 ldp x11, x12, [x0, #64]
120 ldp x13, x14, [x0, #80]
122 * Restore x18, as it may be used as a platform register, and clear
123 * the buffer to minimize the risk of exposure when used for shadow
130 msr contextidr_el1, x4
133 /* Don't change t0sz here, mask those bits when restoring */
135 bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
141 * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
142 * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
143 * exception. Mask them until local_daif_restore() in cpu_suspend()
150 set_this_cpu_offset x13
153 * Restore oslsr_el1 by writing oslar_el1
156 ubfx x11, x11, #1, #1
158 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
159 reset_amuserenr_el0 x0 // Disable AMU access from EL0
161 alternative_if ARM64_HAS_RAS_EXTN
162 msr_s SYS_DISR_EL1, xzr
163 alternative_else_nop_endif
165 ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
168 SYM_FUNC_END(cpu_do_resume)
172 .pushsection ".idmap.text", "awx"
174 .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
175 adrp \tmp1, reserved_pg_dir
176 phys_to_ttbr \tmp2, \tmp1
177 offset_ttbr1 \tmp2, \tmp1
186 * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
188 * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
189 * called by anything else. It can only be executed from a TTBR0 mapping.
191 SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
192 save_and_disable_daif flags=x2
194 __idmap_cpu_set_reserved_ttbr1 x1, x3
203 SYM_FUNC_END(idmap_cpu_replace_ttbr1)
206 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
208 #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
210 .pushsection ".idmap.text", "awx"
212 .macro kpti_mk_tbl_ng, type, num_entries
213 add end_\type\()p, cur_\type\()p, #\num_entries * 8
215 ldr \type, [cur_\type\()p] // Load the entry
216 tbz \type, #0, .Lnext_\type // Skip invalid and
217 tbnz \type, #11, .Lnext_\type // non-global entries
218 orr \type, \type, #PTE_NG // Same bit for blocks and pages
219 str \type, [cur_\type\()p] // Update the entry
221 tbnz \type, #1, .Lderef_\type
224 add cur_\type\()p, cur_\type\()p, #8
225 cmp cur_\type\()p, end_\type\()p
230 * Dereference the current table entry and map it into the temporary
231 * fixmap slot associated with the current level.
233 .macro kpti_map_pgtbl, type, level
234 str xzr, [temp_pte, #8 * (\level + 1)] // break before make
236 add pte, temp_pte, #PAGE_SIZE * (\level + 1)
242 phys_to_pte pte, cur_\type\()p
243 add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
244 orr pte, pte, pte_flags
245 str pte, [temp_pte, #8 * (\level + 1)]
250 * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
251 * unsigned long temp_pte_va)
253 * Called exactly once from stop_machine context by each CPU found during boot.
255 .pushsection ".data", "aw", %progbits
256 SYM_DATA(__idmap_kpti_flag, .long 1)
259 SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings)
264 temp_pgd_phys .req x2
279 mov x5, x3 // preserve temp_pte arg
280 mrs swapper_ttb, ttbr1_el1
281 adr_l flag_ptr, __idmap_kpti_flag
283 cbnz cpu, __idmap_kpti_secondary
285 /* We're the boot CPU. Wait for the others to catch up */
288 ldaxr w17, [flag_ptr]
289 eor w17, w17, num_cpus
292 /* Switch to the temporary page tables on this CPU only */
293 __idmap_cpu_set_reserved_ttbr1 x8, x9
294 offset_ttbr1 temp_pgd_phys, x8
295 msr ttbr1_el1, temp_pgd_phys
299 mov pte_flags, #KPTI_NG_PTE_FLAGS
301 /* Everybody is enjoying the idmap, so we can rewrite swapper. */
303 adrp cur_pgdp, swapper_pg_dir
304 kpti_map_pgtbl pgd, 0
305 kpti_mk_tbl_ng pgd, PTRS_PER_PGD
307 /* Ensure all the updated entries are visible to secondary CPUs */
310 /* We're done: fire up swapper_pg_dir again */
311 __idmap_cpu_set_reserved_ttbr1 x8, x9
312 msr ttbr1_el1, swapper_ttb
315 /* Set the flag to zero to indicate that we're all done */
321 .if CONFIG_PGTABLE_LEVELS > 3
323 pte_to_phys cur_pudp, pgd
324 kpti_map_pgtbl pud, 1
325 kpti_mk_tbl_ng pud, PTRS_PER_PUD
327 .else /* CONFIG_PGTABLE_LEVELS <= 3 */
329 .set .Lnext_pud, .Lnext_pgd
334 .if CONFIG_PGTABLE_LEVELS > 2
336 pte_to_phys cur_pmdp, pud
337 kpti_map_pgtbl pmd, 2
338 kpti_mk_tbl_ng pmd, PTRS_PER_PMD
340 .else /* CONFIG_PGTABLE_LEVELS <= 2 */
342 .set .Lnext_pmd, .Lnext_pgd
347 pte_to_phys cur_ptep, pmd
348 kpti_map_pgtbl pte, 3
349 kpti_mk_tbl_ng pte, PTRS_PER_PTE
371 /* Secondary CPUs end up here */
372 __idmap_kpti_secondary:
373 /* Uninstall swapper before surgery begins */
374 __idmap_cpu_set_reserved_ttbr1 x16, x17
376 /* Increment the flag to let the boot CPU we're ready */
377 1: ldxr w16, [flag_ptr]
379 stxr w17, w16, [flag_ptr]
382 /* Wait for the boot CPU to finish messing around with swapper */
388 /* All done, act like nothing happened */
389 msr ttbr1_el1, swapper_ttb
395 SYM_FUNC_END(idmap_kpti_install_ng_mappings)
402 * Initialise the processor for turning the MMU on.
405 * x0 - actual number of VA bits (ignored unless VA_BITS > 48)
407 * Return in x0 the value of the SCTLR_EL1 register.
409 .pushsection ".idmap.text", "awx"
410 SYM_FUNC_START(__cpu_setup)
411 tlbi vmalle1 // Invalidate local TLB
415 msr cpacr_el1, x1 // Enable FP/ASIMD
416 mov x1, #1 << 12 // Reset mdscr_el1 and disable
417 msr mdscr_el1, x1 // access to the DCC from EL0
418 isb // Unmask debug exceptions now,
419 enable_dbg // since this is per-cpu
420 reset_pmuserenr_el0 x1 // Disable PMU access from EL0
421 reset_amuserenr_el0 x1 // Disable AMU access from EL0
424 * Default values for VMSA control registers. These will be adjusted
425 * below depending on detected CPU features.
429 mov_q mair, MAIR_EL1_SET
430 mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
431 TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
432 TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
434 tcr_clear_errata_bits tcr, x9, x5
436 #ifdef CONFIG_ARM64_VA_BITS_52
446 * Set the IPS bits in TCR_EL1.
448 tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
449 #ifdef CONFIG_ARM64_HW_AFDBM
451 * Enable hardware update of the Access Flags bit.
452 * Hardware dirty bit management is enabled later,
455 mrs x9, ID_AA64MMFR1_EL1
458 orr tcr, tcr, #TCR_HA // hardware Access flag update
460 #endif /* CONFIG_ARM64_HW_AFDBM */
466 mov_q x0, INIT_SCTLR_EL1_MMU_ON
467 ret // return to head.S
471 SYM_FUNC_END(__cpu_setup)