1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #include <asm/asm-offsets.h>
8 #include <linux/init.h>
9 #include <linux/linkage.h>
10 #include <asm/thread_info.h>
12 #include <asm/pgtable.h>
14 #include <asm/cpu_ops_sbi.h>
15 #include <asm/hwcap.h>
16 #include <asm/image.h>
18 #include <asm/xip_fixup.h>
19 #include "efi-header.S"
22 SYM_CODE_START(_start)
24 * Image header expected by Linux boot-loaders. The image header data
25 * structure is described in asm/image.h.
26 * Do not modify it without modifying the structure and all bootloaders
27 * that expects this header format!!
31 * This instruction decodes to "MZ" ASCII required by UEFI.
36 /* jump to start kernel */
42 #ifdef CONFIG_RISCV_M_MODE
43 /* Image load offset (0MB) from start of RAM for M-mode */
46 #if __riscv_xlen == 64
47 /* Image load offset(2MB) from start of RAM */
50 /* Image load offset(4MB) from start of RAM */
54 /* Effective size of kernel image */
57 .word RISCV_HEADER_VERSION
60 .ascii RISCV_IMAGE_MAGIC
62 .ascii RISCV_IMAGE_MAGIC2
64 .word pe_head_start - _start
74 .global relocate_enable_mmu
76 /* Relocate return address */
79 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
84 /* Point stvec to virtual address of intruction after satp write */
89 /* Compute satp for kernel page tables, but don't load it yet */
90 srl a2, a0, PAGE_SHIFT
97 * Load trampoline page directory, which will cause us to trap to
98 * stvec if VA != PA, or simply fall through if VA == PA. We need a
99 * full fence here because setup_vm() just wrote these PTEs and we need
100 * to ensure the new translations are in use.
102 la a0, trampoline_pg_dir
104 srl a0, a0, PAGE_SHIFT
110 /* Set trap vector to spin forever to help debug */
111 la a0, .Lsecondary_park
114 /* Reload the global pointer */
118 * Switch to kernel page tables. A full fence is necessary in order to
119 * avoid using the trampoline translations, which are only correct for
120 * the first superpage. Fetching the fence is guaranteed to work
121 * because that first superpage is translated the same way.
127 #endif /* CONFIG_MMU */
129 .global secondary_start_sbi
131 /* Mask all interrupts */
135 /* Load the global pointer */
139 * Disable FPU & VECTOR to detect illegal usage of
140 * floating point or vector in kernel space
145 /* Set trap vector to spin forever to help debug */
146 la a3, .Lsecondary_park
149 /* a0 contains the hartid & a1 contains boot data */
150 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
154 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
159 .Lsecondary_start_common:
162 /* Enable virtual memory and relocate to virtual address */
163 la a0, swapper_pg_dir
165 call relocate_enable_mmu
167 call .Lsetup_trap_vector
170 #endif /* CONFIG_SMP */
174 /* Set trap vector to exception handler */
175 la a0, handle_exception
179 * Set sup0 scratch register to 0, indicating to exception vector that
180 * we are presently executing in kernel.
182 csrw CSR_SCRATCH, zero
187 /* We lack SMP support or have too many harts, so park this hart */
193 SYM_CODE_START(_start_kernel)
194 /* Mask all interrupts */
198 #ifdef CONFIG_RISCV_M_MODE
199 /* flush the instruction cache */
202 /* Reset all registers except ra, a0, a1 */
206 * Setup a PMP to permit access to all of memory. Some machines may
207 * not implement PMPs, so we set up a quick trap handler to just skip
208 * touching the PMPs on any trap.
214 csrw CSR_PMPADDR0, a0
215 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
221 * The hartid in a0 is expected later on, and we have no firmware
225 #endif /* CONFIG_RISCV_M_MODE */
227 /* Load the global pointer */
231 * Disable FPU & VECTOR to detect illegal usage of
232 * floating point or vector in kernel space
237 #ifdef CONFIG_RISCV_BOOT_SPINWAIT
238 li t0, CONFIG_NR_CPUS
239 blt a0, t0, .Lgood_cores
240 tail .Lsecondary_park
243 /* The lottery system is only required for spinwait booting method */
244 #ifndef CONFIG_XIP_KERNEL
245 /* Pick one hart to run the main boot sequence */
248 amoadd.w a3, a2, (a3)
249 bnez a3, .Lsecondary_start
252 /* hart_lottery in flash contains a magic number */
256 XIP_FIXUP_FLASH_OFFSET a3
258 amoswap.w t0, t1, (a2)
259 /* first time here if hart_lottery in RAM is not set */
260 beq t0, t1, .Lsecondary_start
262 #endif /* CONFIG_XIP */
263 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
265 #ifdef CONFIG_XIP_KERNEL
266 la sp, _end + THREAD_SIZE
271 /* Restore a0 copy */
275 #ifndef CONFIG_XIP_KERNEL
276 /* Clear BSS for flat non-ELF images */
279 ble a4, a3, .Lclear_bss_done
282 add a3, a3, RISCV_SZPTR
283 blt a3, a4, .Lclear_bss
286 la a2, boot_cpu_hartid
290 /* Initialize page tables and relocate to virtual addresses */
292 la sp, init_thread_union + THREAD_SIZE
294 addi sp, sp, -PT_SIZE_ON_STACK
296 #ifdef CONFIG_BUILTIN_DTB
301 #endif /* CONFIG_BUILTIN_DTB */
306 call relocate_enable_mmu
307 #endif /* CONFIG_MMU */
309 call .Lsetup_trap_vector
310 /* Restore C environment */
312 la sp, init_thread_union + THREAD_SIZE
313 addi sp, sp, -PT_SIZE_ON_STACK
317 call kasan_early_init
319 /* Start the kernel */
323 #ifdef CONFIG_RISCV_BOOT_SPINWAIT
325 /* Set trap vector to spin forever to help debug */
326 la a3, .Lsecondary_park
330 la a1, __cpu_spinwait_stack_pointer
332 la a2, __cpu_spinwait_task_pointer
338 * This hart didn't win the lottery, so we wait for the winning hart to
339 * get far enough along the boot process that it should continue.
342 /* FIXME: We should WFI to save some energy here. */
345 beqz sp, .Lwait_for_cpu_up
346 beqz tp, .Lwait_for_cpu_up
349 tail .Lsecondary_start_common
350 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
352 SYM_CODE_END(_start_kernel)
354 #ifdef CONFIG_RISCV_M_MODE
355 SYM_CODE_START_LOCAL(reset_regs)
388 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
389 beqz t0, .Lreset_regs_done_fpu
426 /* note that the caller must clear SR_FS */
427 .Lreset_regs_done_fpu:
428 #endif /* CONFIG_FPU */
430 #ifdef CONFIG_RISCV_ISA_V
432 li t1, COMPAT_HWCAP_ISA_V
434 beqz t0, .Lreset_regs_done_vector
437 * Clear vector registers and reset vcsr
438 * VLMAX has a defined value, VLEN is a constant,
439 * and this form of vsetvli is defined to set vl to VLMAX.
444 vsetvli t1, x0, e8, m8, ta, ma
449 /* note that the caller must clear SR_VS */
450 .Lreset_regs_done_vector:
451 #endif /* CONFIG_RISCV_ISA_V */
453 SYM_CODE_END(reset_regs)
454 #endif /* CONFIG_RISCV_M_MODE */