1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #include <asm/asm-offsets.h>
8 #include <linux/init.h>
9 #include <linux/linkage.h>
10 #include <asm/thread_info.h>
12 #include <asm/pgtable.h>
14 #include <asm/hwcap.h>
15 #include <asm/image.h>
17 #include <asm/xip_fixup.h>
18 #include "efi-header.S"
21 SYM_CODE_START(_start)
23 * Image header expected by Linux boot-loaders. The image header data
24 * structure is described in asm/image.h.
25 * Do not modify it without modifying the structure and all bootloaders
26 * that expects this header format!!
30 * This instruction decodes to "MZ" ASCII required by UEFI.
35 /* jump to start kernel */
41 #ifdef CONFIG_RISCV_M_MODE
42 /* Image load offset (0MB) from start of RAM for M-mode */
45 #if __riscv_xlen == 64
46 /* Image load offset(2MB) from start of RAM */
49 /* Image load offset(4MB) from start of RAM */
53 /* Effective size of kernel image */
56 .word RISCV_HEADER_VERSION
59 .ascii RISCV_IMAGE_MAGIC
61 .ascii RISCV_IMAGE_MAGIC2
63 .word pe_head_start - _start
73 .global relocate_enable_mmu
75 /* Relocate return address */
78 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
83 /* Point stvec to virtual address of intruction after satp write */
88 /* Compute satp for kernel page tables, but don't load it yet */
89 srl a2, a0, PAGE_SHIFT
96 * Load trampoline page directory, which will cause us to trap to
97 * stvec if VA != PA, or simply fall through if VA == PA. We need a
98 * full fence here because setup_vm() just wrote these PTEs and we need
99 * to ensure the new translations are in use.
101 la a0, trampoline_pg_dir
103 srl a0, a0, PAGE_SHIFT
109 /* Set trap vector to spin forever to help debug */
110 la a0, .Lsecondary_park
113 /* Reload the global pointer */
117 * Switch to kernel page tables. A full fence is necessary in order to
118 * avoid using the trampoline translations, which are only correct for
119 * the first superpage. Fetching the fence is guaranteed to work
120 * because that first superpage is translated the same way.
126 #endif /* CONFIG_MMU */
128 .global secondary_start_sbi
130 /* Mask all interrupts */
134 /* Load the global pointer */
138 * Disable FPU & VECTOR to detect illegal usage of
139 * floating point or vector in kernel space
144 /* Set trap vector to spin forever to help debug */
145 la a3, .Lsecondary_park
148 /* a0 contains the hartid & a1 contains boot data */
149 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
153 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
158 .Lsecondary_start_common:
161 /* Enable virtual memory and relocate to virtual address */
162 la a0, swapper_pg_dir
164 call relocate_enable_mmu
166 call .Lsetup_trap_vector
169 #endif /* CONFIG_SMP */
173 /* Set trap vector to exception handler */
174 la a0, handle_exception
178 * Set sup0 scratch register to 0, indicating to exception vector that
179 * we are presently executing in kernel.
181 csrw CSR_SCRATCH, zero
186 /* We lack SMP support or have too many harts, so park this hart */
192 SYM_CODE_START(_start_kernel)
193 /* Mask all interrupts */
197 #ifdef CONFIG_RISCV_M_MODE
198 /* flush the instruction cache */
201 /* Reset all registers except ra, a0, a1 */
205 * Setup a PMP to permit access to all of memory. Some machines may
206 * not implement PMPs, so we set up a quick trap handler to just skip
207 * touching the PMPs on any trap.
213 csrw CSR_PMPADDR0, a0
214 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
220 * The hartid in a0 is expected later on, and we have no firmware
224 #endif /* CONFIG_RISCV_M_MODE */
226 /* Load the global pointer */
230 * Disable FPU & VECTOR to detect illegal usage of
231 * floating point or vector in kernel space
236 #ifdef CONFIG_RISCV_BOOT_SPINWAIT
237 li t0, CONFIG_NR_CPUS
238 blt a0, t0, .Lgood_cores
239 tail .Lsecondary_park
242 /* The lottery system is only required for spinwait booting method */
243 #ifndef CONFIG_XIP_KERNEL
244 /* Pick one hart to run the main boot sequence */
247 amoadd.w a3, a2, (a3)
248 bnez a3, .Lsecondary_start
251 /* hart_lottery in flash contains a magic number */
255 XIP_FIXUP_FLASH_OFFSET a3
257 amoswap.w t0, t1, (a2)
258 /* first time here if hart_lottery in RAM is not set */
259 beq t0, t1, .Lsecondary_start
261 #endif /* CONFIG_XIP */
262 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
264 #ifdef CONFIG_XIP_KERNEL
265 la sp, _end + THREAD_SIZE
271 /* Restore a0 & a1 copy */
276 #ifndef CONFIG_XIP_KERNEL
277 /* Clear BSS for flat non-ELF images */
280 ble a4, a3, .Lclear_bss_done
283 add a3, a3, RISCV_SZPTR
284 blt a3, a4, .Lclear_bss
287 la a2, boot_cpu_hartid
291 /* Initialize page tables and relocate to virtual addresses */
293 la sp, init_thread_union + THREAD_SIZE
295 addi sp, sp, -PT_SIZE_ON_STACK
297 #ifdef CONFIG_BUILTIN_DTB
302 #endif /* CONFIG_BUILTIN_DTB */
307 call relocate_enable_mmu
308 #endif /* CONFIG_MMU */
310 call .Lsetup_trap_vector
311 /* Restore C environment */
313 la sp, init_thread_union + THREAD_SIZE
314 addi sp, sp, -PT_SIZE_ON_STACK
318 call kasan_early_init
320 /* Start the kernel */
324 #ifdef CONFIG_RISCV_BOOT_SPINWAIT
326 /* Set trap vector to spin forever to help debug */
327 la a3, .Lsecondary_park
331 la a1, __cpu_spinwait_stack_pointer
333 la a2, __cpu_spinwait_task_pointer
339 * This hart didn't win the lottery, so we wait for the winning hart to
340 * get far enough along the boot process that it should continue.
343 /* FIXME: We should WFI to save some energy here. */
346 beqz sp, .Lwait_for_cpu_up
347 beqz tp, .Lwait_for_cpu_up
350 tail .Lsecondary_start_common
351 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
353 SYM_CODE_END(_start_kernel)
355 #ifdef CONFIG_RISCV_M_MODE
356 SYM_CODE_START_LOCAL(reset_regs)
389 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
390 beqz t0, .Lreset_regs_done_fpu
427 /* note that the caller must clear SR_FS */
428 .Lreset_regs_done_fpu:
429 #endif /* CONFIG_FPU */
431 #ifdef CONFIG_RISCV_ISA_V
433 li t1, COMPAT_HWCAP_ISA_V
435 beqz t0, .Lreset_regs_done_vector
438 * Clear vector registers and reset vcsr
439 * VLMAX has a defined value, VLEN is a constant,
440 * and this form of vsetvli is defined to set vl to VLMAX.
445 vsetvli t1, x0, e8, m8, ta, ma
450 /* note that the caller must clear SR_VS */
451 .Lreset_regs_done_vector:
452 #endif /* CONFIG_RISCV_ISA_V */
454 SYM_CODE_END(reset_regs)
455 #endif /* CONFIG_RISCV_M_MODE */