1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
20 #include <linux/pgtable.h>
21 #include <linux/linkage.h>
26 #include <asm/cputable.h>
27 #include <asm/cache.h>
28 #include <asm/thread_info.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/ptrace.h>
33 #include <asm/kvm_book3s_asm.h>
34 #include <asm/export.h>
35 #include <asm/feature-fixups.h>
36 #include <asm/interrupt.h>
40 #define LOAD_BAT(n, reg, RA, RB) \
41 /* see the comment for clear_bats() -- Cort */ \
43 mtspr SPRN_IBAT##n##U,RA; \
44 mtspr SPRN_DBAT##n##U,RA; \
45 lwz RA,(n*16)+0(reg); \
46 lwz RB,(n*16)+4(reg); \
47 mtspr SPRN_IBAT##n##U,RA; \
48 mtspr SPRN_IBAT##n##L,RB; \
49 lwz RA,(n*16)+8(reg); \
50 lwz RB,(n*16)+12(reg); \
51 mtspr SPRN_DBAT##n##U,RA; \
52 mtspr SPRN_DBAT##n##L,RB
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
63 * These are here for legacy reasons, the kernel used to
64 * need to look like a coff function entry for the pmac
65 * but we're always started by some kind of bootloader now.
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
69 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address). Address translation is turned on, with the prom
77 * managing the hash table. Interrupts are disabled. The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 * r3: 'BooX' (0x426f6f58)
84 * r4: virtual address of boot_infos_t
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader. The expected layout
91 * r3: ptr to residual data
92 * r4: initrd_start or if no initrd then 0
93 * r5: initrd_end - unused if r4 is 0
94 * r6: Start of command line string
95 * r7: End of command line string
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
112 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113 /* find out where we are now */
115 0: mflr r8 /* r8 = runtime addr here */
116 addis r8,r8,(_stext - 0b)@ha
117 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
119 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
121 /* We never return. We also hit that trap if trying to boot
122 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
129 #ifdef CONFIG_PPC_PMAC
136 #endif /* CONFIG_PPC_PMAC */
138 1: mr r31,r3 /* save device tree ptr */
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 * -- Cort <cort@fsmlabs.com>
148 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
157 bl load_segment_registers
160 #if defined(CONFIG_BOOTX_TEXT)
163 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
166 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
167 bl setup_usbgecko_bat
171 * Call setup_cpu for CPU 0 and initialize 6xx Idle
175 bl call_setup_cpu /* Call setup_cpu for this CPU */
181 * We need to run with _start at physical address 0.
182 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
183 * the exception vectors at 0 (and therefore this copy
184 * overwrites OF's exception vectors with our own).
185 * The MMU is off at this point.
189 addis r4,r3,KERNELBASE@h /* current address of _start */
190 lis r5,PHYSICAL_START@h
191 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
194 * we now have the 1st 16M of ram mapped with the bats.
195 * prep needs the mmu to be turned on here, but pmac already has it on.
196 * this shouldn't bother the pmac since it just gets turned on again
197 * as we jump to our code at KERNELBASE. -- Cort
198 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
199 * off, and in other cases, we now turn it off before changing BATs above.
203 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
206 ori r0,r0,start_here@l
208 rfi /* enables MMU */
211 * We need __secondary_hold as a place to hold the other cpus on
212 * an SMP machine, even when we are running a UP kernel.
214 . = 0xc0 /* for prep bootloader */
215 li r3,1 /* MTX only has 1 cpu */
216 .globl __secondary_hold
218 /* tell the master we're here */
219 stw r3,__secondary_hold_acknowledge@l(0)
222 /* wait until we're told to start */
225 /* our cpu # was at addr 0 - go */
226 mr r24,r3 /* cpu # */
230 #endif /* CONFIG_SMP */
232 .globl __secondary_hold_spinloop
233 __secondary_hold_spinloop:
235 .globl __secondary_hold_acknowledge
236 __secondary_hold_acknowledge:
240 /* core99 pmac starts the seconary here by changing the vector, and
241 putting it back to what it was (unknown_async_exception) when done. */
242 EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
246 * On CHRP, this is complicated by the fact that we could get a
247 * machine check inside RTAS, and we have no guarantee that certain
248 * critical registers will have the values we expect. The set of
249 * registers that might have bad values includes all the GPRs
250 * and all the BATs. We indicate that we are in RTAS by putting
251 * a non-zero value, the address of the exception frame to use,
252 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
253 * and uses its value if it is non-zero.
254 * (Other exception handlers assume that r1 is a valid kernel stack
255 * pointer when we take an exception from supervisor mode.)
258 START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
260 #ifdef CONFIG_PPC_CHRP
261 mtspr SPRN_SPRG_SCRATCH2,r1
262 mfspr r1, SPRN_SPRG_THREAD
266 mfspr r1, SPRN_SPRG_SCRATCH2
267 #endif /* CONFIG_PPC_CHRP */
269 7: EXCEPTION_PROLOG_2 0x200 MachineCheck
270 #ifdef CONFIG_PPC_CHRP
274 1: prepare_transfer_to_handler
275 bl machine_check_exception
278 /* Data access exception. */
279 START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
280 #ifdef CONFIG_PPC_BOOK3S_604
281 BEGIN_MMU_FTR_SECTION
282 mtspr SPRN_SPRG_SCRATCH2,r10
283 mfspr r10, SPRN_SPRG_THREAD
285 mfspr r10, SPRN_DSISR
287 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
288 mfspr r10, SPRN_SPRG_THREAD
290 .Lhash_page_dsi_cont:
293 mfspr r10, SPRN_SPRG_SCRATCH2
296 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
298 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
300 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
301 prepare_transfer_to_handler
303 andis. r0, r5, DSISR_DABRMATCH@h
312 /* Instruction access exception. */
313 START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
314 mtspr SPRN_SPRG_SCRATCH0,r10
315 mtspr SPRN_SPRG_SCRATCH1,r11
316 mfspr r10, SPRN_SPRG_THREAD
319 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
322 #ifdef CONFIG_PPC_BOOK3S_604
323 BEGIN_MMU_FTR_SECTION
324 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
326 .Lhash_page_isi_cont:
327 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
328 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
330 andi. r11, r11, MSR_PR
333 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
334 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
337 prepare_transfer_to_handler
341 /* External interrupt */
342 EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
344 /* Alignment exception */
345 START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
346 EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
347 prepare_transfer_to_handler
348 bl alignment_exception
352 /* Program check exception */
353 START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
354 EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
355 prepare_transfer_to_handler
356 bl program_check_exception
360 /* Floating-point unavailable */
361 START_EXCEPTION(0x800, FPUnavailable)
362 #ifdef CONFIG_PPC_FPU
365 * Certain Freescale cores don't have a FPU and treat fp instructions
366 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
369 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
370 EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
372 bl load_up_fpu /* if from user, just load it up */
373 b fast_exception_return
374 1: prepare_transfer_to_handler
375 bl kernel_fp_unavailable_exception
382 EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
384 EXCEPTION(0xa00, Trap_0a, unknown_exception)
385 EXCEPTION(0xb00, Trap_0b, unknown_exception)
388 START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
389 SYSCALL_ENTRY INTERRUPT_SYSCALL
391 EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
392 EXCEPTION(0xe00, Trap_0e, unknown_exception)
395 * The Altivec unavailable trap is at 0x0f20. Foo.
396 * We effectively remap it to 0x3000.
397 * We include an altivec unavailable exception vector even if
398 * not configured for Altivec, so that you can't panic a
399 * non-altivec kernel running on a machine with altivec just
400 * by executing an altivec instruction.
402 START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
405 START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
410 * Handle TLB miss for instruction on 603/603e.
411 * Note: we get an alternate set of r0 - r3 to use automatically.
413 . = INTERRUPT_INST_TLB_MISS_603
417 * r1: linux style pte ( later becomes ppc hardware pte )
418 * r2: ptr to linux-style pte
421 /* Get PTE (linux-style) and check access */
423 #ifdef CONFIG_MODULES
424 lis r1, TASK_SIZE@h /* check if kernel address */
428 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
429 rlwinm r2, r2, 28, 0xfffff000
430 #ifdef CONFIG_MODULES
432 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
433 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
434 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
436 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
437 lwz r2,0(r2) /* get pmd entry */
438 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
439 beq- InstructionAddressInvalid /* return if no mapping */
440 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
441 lwz r0,0(r2) /* get linux-style pte */
442 andc. r1,r1,r0 /* check access & ~permission */
443 bne- InstructionAddressInvalid /* return if access not permitted */
444 /* Convert linux-style PTE to low word of PPC-style PTE */
445 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
446 ori r1, r1, 0xe06 /* clear out reserved bits */
447 andc r1, r0, r1 /* PP = user? 1 : 0 */
449 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
450 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
453 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
456 InstructionAddressInvalid:
458 rlwinm r1,r3,9,6,6 /* Get load/store bit */
461 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
462 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
465 mfspr r1,SPRN_IMISS /* Get failing address */
466 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
467 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
469 mtspr SPRN_DAR,r1 /* Set fault address */
470 mfmsr r0 /* Restore "normal" registers */
471 xoris r0,r0,MSR_TGPR>>16
472 mtcrf 0x80,r3 /* Restore CR0 */
477 * Handle TLB miss for DATA Load operation on 603/603e
479 . = INTERRUPT_DATA_LOAD_TLB_MISS_603
483 * r1: linux style pte ( later becomes ppc hardware pte )
484 * r2: ptr to linux-style pte
487 /* Get PTE (linux-style) and check access */
489 lis r1, TASK_SIZE@h /* check if kernel address */
492 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
493 rlwinm r2, r2, 28, 0xfffff000
495 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
496 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
497 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
498 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
499 lwz r2,0(r2) /* get pmd entry */
500 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
501 beq- DataAddressInvalid /* return if no mapping */
502 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
503 lwz r0,0(r2) /* get linux-style pte */
504 andc. r1,r1,r0 /* check access & ~permission */
505 bne- DataAddressInvalid /* return if access not permitted */
506 /* Convert linux-style PTE to low word of PPC-style PTE */
507 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
508 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
509 rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
510 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
511 xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
512 ori r1,r1,0xe04 /* clear out reserved bits */
513 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
515 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
516 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
518 BEGIN_MMU_FTR_SECTION
520 mfspr r1,SPRN_SPRG_603_LRU
521 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
525 mtspr SPRN_SPRG_603_LRU,r1
527 rlwimi r2,r0,31-14,14,14
533 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
537 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
540 rlwinm r1,r3,9,6,6 /* Get load/store bit */
543 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
545 mfspr r1,SPRN_DMISS /* Get failing address */
546 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
547 beq 20f /* Jump if big endian */
549 20: mtspr SPRN_DAR,r1 /* Set fault address */
550 mfmsr r0 /* Restore "normal" registers */
551 xoris r0,r0,MSR_TGPR>>16
552 mtcrf 0x80,r3 /* Restore CR0 */
557 * Handle TLB miss for DATA Store on 603/603e
559 . = INTERRUPT_DATA_STORE_TLB_MISS_603
563 * r1: linux style pte ( later becomes ppc hardware pte )
564 * r2: ptr to linux-style pte
567 /* Get PTE (linux-style) and check access */
569 lis r1, TASK_SIZE@h /* check if kernel address */
572 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
573 rlwinm r2, r2, 28, 0xfffff000
575 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
576 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
577 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
578 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
579 lwz r2,0(r2) /* get pmd entry */
580 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
581 beq- DataAddressInvalid /* return if no mapping */
582 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
583 lwz r0,0(r2) /* get linux-style pte */
584 andc. r1,r1,r0 /* check access & ~permission */
585 bne- DataAddressInvalid /* return if access not permitted */
586 /* Convert linux-style PTE to low word of PPC-style PTE */
587 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
588 li r1,0xe06 /* clear out reserved bits & PP msb */
589 andc r1,r0,r1 /* PP = user? 1: 0 */
591 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
592 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
594 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
596 BEGIN_MMU_FTR_SECTION
598 mfspr r1,SPRN_SPRG_603_LRU
599 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
603 mtspr SPRN_SPRG_603_LRU,r1
605 rlwimi r2,r0,31-14,14,14
611 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
615 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
617 #ifndef CONFIG_ALTIVEC
618 #define altivec_assist_exception unknown_exception
621 #ifndef CONFIG_TAU_INT
622 #define TAUException unknown_async_exception
625 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
626 EXCEPTION(0x1400, SMI, SMIException)
627 EXCEPTION(0x1500, Trap_15, unknown_exception)
628 EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
629 EXCEPTION(0x1700, Trap_17, TAUException)
630 EXCEPTION(0x1800, Trap_18, unknown_exception)
631 EXCEPTION(0x1900, Trap_19, unknown_exception)
632 EXCEPTION(0x1a00, Trap_1a, unknown_exception)
633 EXCEPTION(0x1b00, Trap_1b, unknown_exception)
634 EXCEPTION(0x1c00, Trap_1c, unknown_exception)
635 EXCEPTION(0x1d00, Trap_1d, unknown_exception)
636 EXCEPTION(0x1e00, Trap_1e, unknown_exception)
637 EXCEPTION(0x1f00, Trap_1f, unknown_exception)
638 EXCEPTION(0x2000, RunMode, RunModeException)
639 EXCEPTION(0x2100, Trap_21, unknown_exception)
640 EXCEPTION(0x2200, Trap_22, unknown_exception)
641 EXCEPTION(0x2300, Trap_23, unknown_exception)
642 EXCEPTION(0x2400, Trap_24, unknown_exception)
643 EXCEPTION(0x2500, Trap_25, unknown_exception)
644 EXCEPTION(0x2600, Trap_26, unknown_exception)
645 EXCEPTION(0x2700, Trap_27, unknown_exception)
646 EXCEPTION(0x2800, Trap_28, unknown_exception)
647 EXCEPTION(0x2900, Trap_29, unknown_exception)
648 EXCEPTION(0x2a00, Trap_2a, unknown_exception)
649 EXCEPTION(0x2b00, Trap_2b, unknown_exception)
650 EXCEPTION(0x2c00, Trap_2c, unknown_exception)
651 EXCEPTION(0x2d00, Trap_2d, unknown_exception)
652 EXCEPTION(0x2e00, Trap_2e, unknown_exception)
653 EXCEPTION(0x2f00, Trap_2f, unknown_exception)
658 #ifdef CONFIG_PPC_BOOK3S_604
659 .macro save_regs_thread thread
660 stw r0, THR0(\thread)
661 stw r3, THR3(\thread)
662 stw r4, THR4(\thread)
663 stw r5, THR5(\thread)
664 stw r6, THR6(\thread)
665 stw r8, THR8(\thread)
666 stw r9, THR9(\thread)
668 stw r0, THLR(\thread)
670 stw r0, THCTR(\thread)
673 .macro restore_regs_thread thread
674 lwz r0, THLR(\thread)
676 lwz r0, THCTR(\thread)
678 lwz r0, THR0(\thread)
679 lwz r3, THR3(\thread)
680 lwz r4, THR4(\thread)
681 lwz r5, THR5(\thread)
682 lwz r6, THR6(\thread)
683 lwz r8, THR8(\thread)
684 lwz r9, THR9(\thread)
693 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
695 mfspr r10, SPRN_SPRG_THREAD
696 restore_regs_thread r10
697 b .Lhash_page_dsi_cont
701 mfspr r10, SPRN_SPRG_THREAD
707 mfspr r10, SPRN_SPRG_THREAD
708 restore_regs_thread r10
710 b .Lhash_page_isi_cont
712 .globl fast_hash_page_return
713 fast_hash_page_return:
714 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
715 mfspr r10, SPRN_SPRG_THREAD
716 restore_regs_thread r10
722 mfspr r10, SPRN_SPRG_SCRATCH2
727 mfspr r11, SPRN_SPRG_SCRATCH1
728 mfspr r10, SPRN_SPRG_SCRATCH0
730 #endif /* CONFIG_PPC_BOOK3S_604 */
732 #ifdef CONFIG_VMAP_STACK
733 vmap_stack_overflow_exception
738 EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
739 #ifdef CONFIG_ALTIVEC
741 bl load_up_altivec /* if from user, just load it up */
742 b fast_exception_return
743 #endif /* CONFIG_ALTIVEC */
744 1: prepare_transfer_to_handler
745 bl altivec_unavailable_exception
750 EXCEPTION_PROLOG 0xf00 PerformanceMonitor
751 prepare_transfer_to_handler
752 bl performance_monitor_exception
758 * This code is jumped to from the startup code to copy
759 * the kernel image to physical address PHYSICAL_START.
762 lis r3,PHYSICAL_START@h /* Destination base address */
763 li r6,0 /* Destination offset */
764 li r5,0x4000 /* # bytes of memory to copy */
765 bl copy_and_flush /* copy the first 0x4000 bytes */
766 addi r0,r3,4f@l /* jump to the address of 4f */
767 mtctr r0 /* in copy and do the rest. */
768 bctr /* jump to the copy */
769 4: lis r5,_end-KERNELBASE@h
770 ori r5,r5,_end-KERNELBASE@l
771 bl copy_and_flush /* copy the rest */
775 * Copy routine used to copy the kernel to start at physical address 0
776 * and flush and invalidate the caches as needed.
777 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
778 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
780 _GLOBAL(copy_and_flush)
783 4: li r0,L1_CACHE_BYTES/4
785 3: addi r6,r6,4 /* copy a cache line */
789 dcbst r6,r3 /* write it to memory */
791 icbi r6,r3 /* flush the icache line */
794 sync /* additional sync needed on g4 */
801 .globl __secondary_start_mpc86xx
802 __secondary_start_mpc86xx:
804 stw r3, __secondary_hold_acknowledge@l(0)
805 mr r24, r3 /* cpu # */
808 .globl __secondary_start_pmac_0
809 __secondary_start_pmac_0:
810 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
819 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
820 set to map the 0xf0000000 - 0xffffffff region */
822 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
826 .globl __secondary_start
828 /* Copy some CPU settings from CPU 0 */
829 bl __restore_cpu_setup
833 bl call_setup_cpu /* Call setup_cpu for this CPU */
837 /* get current's stack and current */
838 lis r2,secondary_current@ha
840 lwz r2,secondary_current@l(r2)
842 lwz r1,TASK_STACK(r1)
845 addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
850 /* load up the MMU */
851 bl load_segment_registers
854 /* ptr to phys current thread */
856 addi r4,r4,THREAD /* phys address of our thread_struct */
857 mtspr SPRN_SPRG_THREAD,r4
858 BEGIN_MMU_FTR_SECTION
859 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
860 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
861 rlwinm r4, r4, 4, 0xffff01ff
863 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
865 /* enable MMU and jump to start_secondary */
867 lis r3,start_secondary@h
868 ori r3,r3,start_secondary@l
872 #endif /* CONFIG_SMP */
874 #ifdef CONFIG_KVM_BOOK3S_HANDLER
875 #include "../kvm/book3s_rmhandlers.S"
879 * Load stuff into the MMU. Intended to be called with
882 SYM_FUNC_START_LOCAL(early_hash_table)
883 sync /* Force all PTE updates to finish */
885 tlbia /* Clear all TLB entries */
886 sync /* wait for tlbia/tlbie to finish */
887 TLBSYNC /* ... on all CPUs */
888 /* Load the SDR1 register (hash table base & size) */
889 lis r6, early_hash - PAGE_OFFSET@h
890 ori r6, r6, 3 /* 256kB table */
893 SYM_FUNC_END(early_hash_table)
895 SYM_FUNC_START_LOCAL(load_up_mmu)
896 sync /* Force all PTE updates to finish */
898 tlbia /* Clear all TLB entries */
899 sync /* wait for tlbia/tlbie to finish */
900 TLBSYNC /* ... on all CPUs */
901 BEGIN_MMU_FTR_SECTION
902 /* Load the SDR1 register (hash table base & size) */
907 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
909 /* Load the BAT registers with the values set up by MMU_init. */
917 BEGIN_MMU_FTR_SECTION
922 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
924 SYM_FUNC_END(load_up_mmu)
926 _GLOBAL(load_segment_registers)
927 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
928 mtctr r0 /* for context 0 */
929 #ifdef CONFIG_PPC_KUEP
930 lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
932 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
936 addi r3, r3, 0x111 /* increment VSID */
937 addis r4, r4, 0x1000 /* address of next segment */
939 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
940 mtctr r0 /* for context 0 */
941 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
942 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
943 oris r3, r3, SR_KP@h /* Kp = 1 */
945 addi r3, r3, 0x111 /* increment VSID */
946 addis r4, r4, 0x1000 /* address of next segment */
951 * This is where the main kernel code starts.
956 ori r2,r2,init_task@l
957 /* Set up for using our exception vectors */
958 /* ptr to phys current thread */
960 addi r4,r4,THREAD /* init task's THREAD */
961 mtspr SPRN_SPRG_THREAD,r4
962 BEGIN_MMU_FTR_SECTION
963 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
964 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
965 rlwinm r4, r4, 4, 0xffff01ff
967 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
970 lis r1,init_thread_union@ha
971 addi r1,r1,init_thread_union@l
973 stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
975 * Do early platform-specific initialization,
976 * and set up the MMU.
989 * Go back to running unmapped so we can load up new values
990 * for SDR1 (hash table pointer) and the segment registers
991 * and change to using our exception vectors.
996 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1002 /* Load up the kernel context */
1005 #ifdef CONFIG_BDI_SWITCH
1006 /* Add helper information for the Abatron bdiGDB debugger.
1007 * We do this here because we know the mmu is disabled, and
1008 * will be enabled for real in just a few instructions.
1010 lis r5, abatron_pteptrs@h
1011 ori r5, r5, abatron_pteptrs@l
1012 stw r5, 0xf0(0) /* This much match your Abatron config */
1013 lis r6, swapper_pg_dir@h
1014 ori r6, r6, swapper_pg_dir@l
1017 #endif /* CONFIG_BDI_SWITCH */
1019 /* Now turn on the MMU for real! */
1021 lis r3,start_kernel@h
1022 ori r3,r3,start_kernel@l
1028 * An undocumented "feature" of 604e requires that the v bit
1029 * be cleared before changing BAT values.
1031 * Also, newer IBM firmware does not clear bat3 and 4 so
1032 * this makes sure it's done.
1035 SYM_FUNC_START_LOCAL(clear_bats)
1038 mtspr SPRN_DBAT0U,r10
1039 mtspr SPRN_DBAT0L,r10
1040 mtspr SPRN_DBAT1U,r10
1041 mtspr SPRN_DBAT1L,r10
1042 mtspr SPRN_DBAT2U,r10
1043 mtspr SPRN_DBAT2L,r10
1044 mtspr SPRN_DBAT3U,r10
1045 mtspr SPRN_DBAT3L,r10
1046 mtspr SPRN_IBAT0U,r10
1047 mtspr SPRN_IBAT0L,r10
1048 mtspr SPRN_IBAT1U,r10
1049 mtspr SPRN_IBAT1L,r10
1050 mtspr SPRN_IBAT2U,r10
1051 mtspr SPRN_IBAT2L,r10
1052 mtspr SPRN_IBAT3U,r10
1053 mtspr SPRN_IBAT3L,r10
1054 BEGIN_MMU_FTR_SECTION
1055 /* Here's a tweak: at this point, CPU setup have
1056 * not been called yet, so HIGH_BAT_EN may not be
1057 * set in HID0 for the 745x processors. However, it
1058 * seems that doesn't affect our ability to actually
1059 * write to these SPRs.
1061 mtspr SPRN_DBAT4U,r10
1062 mtspr SPRN_DBAT4L,r10
1063 mtspr SPRN_DBAT5U,r10
1064 mtspr SPRN_DBAT5L,r10
1065 mtspr SPRN_DBAT6U,r10
1066 mtspr SPRN_DBAT6L,r10
1067 mtspr SPRN_DBAT7U,r10
1068 mtspr SPRN_DBAT7L,r10
1069 mtspr SPRN_IBAT4U,r10
1070 mtspr SPRN_IBAT4L,r10
1071 mtspr SPRN_IBAT5U,r10
1072 mtspr SPRN_IBAT5L,r10
1073 mtspr SPRN_IBAT6U,r10
1074 mtspr SPRN_IBAT6L,r10
1075 mtspr SPRN_IBAT7U,r10
1076 mtspr SPRN_IBAT7L,r10
1077 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1079 SYM_FUNC_END(clear_bats)
1081 _GLOBAL(update_bats)
1087 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1088 rlwinm r0, r6, 0, ~MSR_RI
1089 rlwinm r0, r0, 0, ~MSR_EE
1100 LOAD_BAT(0, r3, r4, r5)
1101 LOAD_BAT(1, r3, r4, r5)
1102 LOAD_BAT(2, r3, r4, r5)
1103 LOAD_BAT(3, r3, r4, r5)
1104 BEGIN_MMU_FTR_SECTION
1105 LOAD_BAT(4, r3, r4, r5)
1106 LOAD_BAT(5, r3, r4, r5)
1107 LOAD_BAT(6, r3, r4, r5)
1108 LOAD_BAT(7, r3, r4, r5)
1109 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1110 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1116 SYM_FUNC_START_LOCAL(flush_tlbs)
1118 1: addic. r10, r10, -0x1000
1123 SYM_FUNC_END(flush_tlbs)
1125 SYM_FUNC_START_LOCAL(mmu_off)
1126 addi r4, r3, __after_mmu_off - _start
1128 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1137 SYM_FUNC_END(mmu_off)
1139 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1140 SYM_FUNC_START_LOCAL(initial_bats)
1141 lis r11,PAGE_OFFSET@h
1144 ori r8,r8,0x12 /* R/W access, M=1 */
1146 ori r8,r8,2 /* R/W access */
1147 #endif /* CONFIG_SMP */
1148 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1150 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1151 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1152 mtspr SPRN_IBAT0L,r8
1153 mtspr SPRN_IBAT0U,r11
1156 SYM_FUNC_END(initial_bats)
1158 #ifdef CONFIG_BOOTX_TEXT
1159 SYM_FUNC_START_LOCAL(setup_disp_bat)
1161 * setup the display bat prepared for us in prom.c
1166 addis r8,r3,disp_BAT@ha
1167 addi r8,r8,disp_BAT@l
1172 mtspr SPRN_DBAT3L,r8
1173 mtspr SPRN_DBAT3U,r11
1175 SYM_FUNC_END(setup_disp_bat)
1176 #endif /* CONFIG_BOOTX_TEXT */
1178 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1179 SYM_FUNC_START_LOCAL(setup_cpm_bat)
1182 mtspr SPRN_DBAT1L, r8
1185 ori r11, r11, (BL_1M << 2) | 2
1186 mtspr SPRN_DBAT1U, r11
1189 SYM_FUNC_END(setup_cpm_bat)
1192 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1193 SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
1194 /* prepare a BAT for early io */
1195 #if defined(CONFIG_GAMECUBE)
1197 #elif defined(CONFIG_WII)
1200 #error Invalid platform for USB Gecko based early debugging.
1203 * The virtual address used must match the virtual address
1204 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1206 lis r11, 0xfffe /* top 128K */
1207 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1208 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1209 mtspr SPRN_DBAT1L, r8
1210 mtspr SPRN_DBAT1U, r11
1212 SYM_FUNC_END(setup_usbgecko_bat)