1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
20 #include <linux/pgtable.h>
21 #include <linux/linkage.h>
26 #include <asm/cputable.h>
27 #include <asm/cache.h>
28 #include <asm/thread_info.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/ptrace.h>
33 #include <asm/kvm_book3s_asm.h>
34 #include <asm/feature-fixups.h>
35 #include <asm/interrupt.h>
39 #define LOAD_BAT(n, reg, RA, RB) \
40 /* see the comment for clear_bats() -- Cort */ \
42 mtspr SPRN_IBAT##n##U,RA; \
43 mtspr SPRN_DBAT##n##U,RA; \
44 lwz RA,(n*16)+0(reg); \
45 lwz RB,(n*16)+4(reg); \
46 mtspr SPRN_IBAT##n##U,RA; \
47 mtspr SPRN_IBAT##n##L,RB; \
48 lwz RA,(n*16)+8(reg); \
49 lwz RB,(n*16)+12(reg); \
50 mtspr SPRN_DBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##L,RB
57 * _start is defined this way because the XCOFF loader in the OpenFirmware
58 * on the powermac expects the entry point to be a procedure descriptor.
62 * These are here for legacy reasons, the kernel used to
63 * need to look like a coff function entry for the pmac
64 * but we're always started by some kind of bootloader now.
67 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
72 * Enter here with the kernel text, data and bss loaded starting at
73 * 0, running with virtual == physical mapping.
74 * r5 points to the prom entry point (the client interface handler
75 * address). Address translation is turned on, with the prom
76 * managing the hash table. Interrupts are disabled. The stack
77 * pointer (r1) points to just below the end of the half-meg region
78 * from 0x380000 - 0x400000, which is mapped in already.
80 * If we are booted from MacOS via BootX, we enter with the kernel
81 * image loaded somewhere, and the following values in registers:
82 * r3: 'BooX' (0x426f6f58)
83 * r4: virtual address of boot_infos_t
87 * This is jumped to on prep systems right after the kernel is relocated
88 * to its proper place in memory by the boot loader. The expected layout
90 * r3: ptr to residual data
91 * r4: initrd_start or if no initrd then 0
92 * r5: initrd_end - unused if r4 is 0
93 * r6: Start of command line string
94 * r7: End of command line string
96 * This just gets a minimal mmu environment setup so we can call
97 * start_here() to do the real work.
104 * We have to do any OF calls before we map ourselves to KERNELBASE,
105 * because OF may have I/O devices mapped into that area
106 * (particularly on CHRP).
111 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
112 /* find out where we are now */
114 0: mflr r8 /* r8 = runtime addr here */
115 addis r8,r8,(_stext - 0b)@ha
116 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
118 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120 /* We never return. We also hit that trap if trying to boot
121 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
125 * Check for BootX signature when supporting PowerMac and branch to
126 * appropriate trampoline if it's present
128 #ifdef CONFIG_PPC_PMAC
135 #endif /* CONFIG_PPC_PMAC */
137 1: mr r31,r3 /* save device tree ptr */
141 * early_init() does the early machine identification and does
142 * the necessary low-level setup and clears the BSS
143 * -- Cort <cort@fsmlabs.com>
147 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
156 bl load_segment_registers
159 #if defined(CONFIG_BOOTX_TEXT)
162 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
165 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
166 bl setup_usbgecko_bat
170 * Call setup_cpu for CPU 0 and initialize 6xx Idle
174 bl call_setup_cpu /* Call setup_cpu for this CPU */
180 * We need to run with _start at physical address 0.
181 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
182 * the exception vectors at 0 (and therefore this copy
183 * overwrites OF's exception vectors with our own).
184 * The MMU is off at this point.
188 addis r4,r3,KERNELBASE@h /* current address of _start */
189 lis r5,PHYSICAL_START@h
190 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
193 * we now have the 1st 16M of ram mapped with the bats.
194 * prep needs the mmu to be turned on here, but pmac already has it on.
195 * this shouldn't bother the pmac since it just gets turned on again
196 * as we jump to our code at KERNELBASE. -- Cort
197 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
198 * off, and in other cases, we now turn it off before changing BATs above.
202 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
205 ori r0,r0,start_here@l
207 rfi /* enables MMU */
210 * We need __secondary_hold as a place to hold the other cpus on
211 * an SMP machine, even when we are running a UP kernel.
213 . = 0xc0 /* for prep bootloader */
214 li r3,1 /* MTX only has 1 cpu */
215 .globl __secondary_hold
217 /* tell the master we're here */
218 stw r3,__secondary_hold_acknowledge@l(0)
221 /* wait until we're told to start */
224 /* our cpu # was at addr 0 - go */
225 mr r24,r3 /* cpu # */
229 #endif /* CONFIG_SMP */
231 .globl __secondary_hold_spinloop
232 __secondary_hold_spinloop:
234 .globl __secondary_hold_acknowledge
235 __secondary_hold_acknowledge:
239 /* core99 pmac starts the seconary here by changing the vector, and
240 putting it back to what it was (unknown_async_exception) when done. */
241 EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
245 * On CHRP, this is complicated by the fact that we could get a
246 * machine check inside RTAS, and we have no guarantee that certain
247 * critical registers will have the values we expect. The set of
248 * registers that might have bad values includes all the GPRs
249 * and all the BATs. We indicate that we are in RTAS by putting
250 * a non-zero value, the address of the exception frame to use,
251 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
252 * and uses its value if it is non-zero.
253 * (Other exception handlers assume that r1 is a valid kernel stack
254 * pointer when we take an exception from supervisor mode.)
257 START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
259 #ifdef CONFIG_PPC_CHRP
260 mtspr SPRN_SPRG_SCRATCH2,r1
261 mfspr r1, SPRN_SPRG_THREAD
265 mfspr r1, SPRN_SPRG_SCRATCH2
266 #endif /* CONFIG_PPC_CHRP */
268 7: EXCEPTION_PROLOG_2 0x200 MachineCheck
269 #ifdef CONFIG_PPC_CHRP
273 1: prepare_transfer_to_handler
274 bl machine_check_exception
277 /* Data access exception. */
278 START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
279 #ifdef CONFIG_PPC_BOOK3S_604
280 BEGIN_MMU_FTR_SECTION
281 mtspr SPRN_SPRG_SCRATCH2,r10
282 mfspr r10, SPRN_SPRG_THREAD
284 mfspr r10, SPRN_DSISR
286 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
287 mfspr r10, SPRN_SPRG_THREAD
289 .Lhash_page_dsi_cont:
292 mfspr r10, SPRN_SPRG_SCRATCH2
295 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
297 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
299 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
300 prepare_transfer_to_handler
302 andis. r0, r5, DSISR_DABRMATCH@h
311 /* Instruction access exception. */
312 START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
313 mtspr SPRN_SPRG_SCRATCH0,r10
314 mtspr SPRN_SPRG_SCRATCH1,r11
315 mfspr r10, SPRN_SPRG_THREAD
318 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
321 #ifdef CONFIG_PPC_BOOK3S_604
322 BEGIN_MMU_FTR_SECTION
323 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
325 .Lhash_page_isi_cont:
326 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
327 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
329 andi. r11, r11, MSR_PR
332 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
333 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
336 prepare_transfer_to_handler
340 /* External interrupt */
341 EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
343 /* Alignment exception */
344 START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
345 EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
346 prepare_transfer_to_handler
347 bl alignment_exception
351 /* Program check exception */
352 START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
353 EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
354 prepare_transfer_to_handler
355 bl program_check_exception
359 /* Floating-point unavailable */
360 START_EXCEPTION(0x800, FPUnavailable)
361 #ifdef CONFIG_PPC_FPU
364 * Certain Freescale cores don't have a FPU and treat fp instructions
365 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
368 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
369 EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
371 bl load_up_fpu /* if from user, just load it up */
372 b fast_exception_return
373 1: prepare_transfer_to_handler
374 bl kernel_fp_unavailable_exception
381 EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
383 EXCEPTION(0xa00, Trap_0a, unknown_exception)
384 EXCEPTION(0xb00, Trap_0b, unknown_exception)
387 START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
388 SYSCALL_ENTRY INTERRUPT_SYSCALL
390 EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
391 EXCEPTION(0xe00, Trap_0e, unknown_exception)
394 * The Altivec unavailable trap is at 0x0f20. Foo.
395 * We effectively remap it to 0x3000.
396 * We include an altivec unavailable exception vector even if
397 * not configured for Altivec, so that you can't panic a
398 * non-altivec kernel running on a machine with altivec just
399 * by executing an altivec instruction.
401 START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
404 START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
409 * Handle TLB miss for instruction on 603/603e.
410 * Note: we get an alternate set of r0 - r3 to use automatically.
412 . = INTERRUPT_INST_TLB_MISS_603
415 * r0: userspace flag (later scratch)
416 * r1: linux style pte ( later becomes ppc hardware pte )
417 * r2: ptr to linux-style pte
420 /* Get PTE (linux-style) and check access */
422 #ifdef CONFIG_MODULES
423 lis r1, TASK_SIZE@h /* check if kernel address */
427 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
428 rlwinm r2, r2, 28, 0xfffff000
429 #ifdef CONFIG_MODULES
432 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
434 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
436 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
437 lwz r2,0(r2) /* get pmd entry */
438 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
439 beq- InstructionAddressInvalid /* return if no mapping */
440 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
441 lwz r2,0(r2) /* get linux-style pte */
442 andc. r1,r1,r2 /* check access & ~permission */
443 bne- InstructionAddressInvalid /* return if access not permitted */
444 /* Convert linux-style PTE to low word of PPC-style PTE */
445 #ifdef CONFIG_MODULES
446 rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
448 ori r1, r1, 0xe06 /* clear out reserved bits */
449 andc r1, r2, r1 /* PP = user? 1 : 0 */
451 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
452 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
455 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
458 InstructionAddressInvalid:
460 rlwinm r1,r3,9,6,6 /* Get load/store bit */
463 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
464 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
467 mfspr r1,SPRN_IMISS /* Get failing address */
468 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
469 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
471 mtspr SPRN_DAR,r1 /* Set fault address */
472 mfmsr r0 /* Restore "normal" registers */
473 xoris r0,r0,MSR_TGPR>>16
474 mtcrf 0x80,r3 /* Restore CR0 */
479 * Handle TLB miss for DATA Load operation on 603/603e
481 . = INTERRUPT_DATA_LOAD_TLB_MISS_603
484 * r0: userspace flag (later scratch)
485 * r1: linux style pte ( later becomes ppc hardware pte )
486 * r2: ptr to linux-style pte
489 /* Get PTE (linux-style) and check access */
491 lis r1, TASK_SIZE@h /* check if kernel address */
494 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
495 rlwinm r2, r2, 28, 0xfffff000
498 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
500 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
501 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
502 lwz r2,0(r2) /* get pmd entry */
503 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
504 beq- DataAddressInvalid /* return if no mapping */
505 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
506 lwz r2,0(r2) /* get linux-style pte */
507 andc. r1,r1,r2 /* check access & ~permission */
508 bne- DataAddressInvalid /* return if access not permitted */
509 /* Convert linux-style PTE to low word of PPC-style PTE */
510 rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
511 rlwimi r2,r0,0,30,31 /* userspace ? -> PP */
512 rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
513 xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
514 ori r1,r1,0xe04 /* clear out reserved bits */
515 andc r1,r2,r1 /* PP = user? rw? 1: 3: 0 */
517 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
518 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
520 BEGIN_MMU_FTR_SECTION
522 mfspr r1,SPRN_SPRG_603_LRU
523 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
527 mtspr SPRN_SPRG_603_LRU,r1
529 rlwimi r2,r0,31-14,14,14
535 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
539 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
542 rlwinm r1,r3,9,6,6 /* Get load/store bit */
545 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
547 mfspr r1,SPRN_DMISS /* Get failing address */
548 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
549 beq 20f /* Jump if big endian */
551 20: mtspr SPRN_DAR,r1 /* Set fault address */
552 mfmsr r0 /* Restore "normal" registers */
553 xoris r0,r0,MSR_TGPR>>16
554 mtcrf 0x80,r3 /* Restore CR0 */
559 * Handle TLB miss for DATA Store on 603/603e
561 . = INTERRUPT_DATA_STORE_TLB_MISS_603
564 * r0: userspace flag (later scratch)
565 * r1: linux style pte ( later becomes ppc hardware pte )
566 * r2: ptr to linux-style pte
569 /* Get PTE (linux-style) and check access */
571 lis r1, TASK_SIZE@h /* check if kernel address */
574 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
575 rlwinm r2, r2, 28, 0xfffff000
578 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
580 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
581 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
582 lwz r2,0(r2) /* get pmd entry */
583 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
584 beq- DataAddressInvalid /* return if no mapping */
585 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
586 lwz r2,0(r2) /* get linux-style pte */
587 andc. r1,r1,r2 /* check access & ~permission */
588 bne- DataAddressInvalid /* return if access not permitted */
589 /* Convert linux-style PTE to low word of PPC-style PTE */
590 rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */
591 li r1,0xe06 /* clear out reserved bits & PP msb */
592 andc r1,r2,r1 /* PP = user? 1: 0 */
594 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
595 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
597 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
599 BEGIN_MMU_FTR_SECTION
601 mfspr r1,SPRN_SPRG_603_LRU
602 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
606 mtspr SPRN_SPRG_603_LRU,r1
608 rlwimi r2,r0,31-14,14,14
614 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
618 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
620 #ifndef CONFIG_ALTIVEC
621 #define altivec_assist_exception unknown_exception
624 #ifndef CONFIG_TAU_INT
625 #define TAUException unknown_async_exception
628 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
629 EXCEPTION(0x1400, SMI, SMIException)
630 EXCEPTION(0x1500, Trap_15, unknown_exception)
631 EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
632 EXCEPTION(0x1700, Trap_17, TAUException)
633 EXCEPTION(0x1800, Trap_18, unknown_exception)
634 EXCEPTION(0x1900, Trap_19, unknown_exception)
635 EXCEPTION(0x1a00, Trap_1a, unknown_exception)
636 EXCEPTION(0x1b00, Trap_1b, unknown_exception)
637 EXCEPTION(0x1c00, Trap_1c, unknown_exception)
638 EXCEPTION(0x1d00, Trap_1d, unknown_exception)
639 EXCEPTION(0x1e00, Trap_1e, unknown_exception)
640 EXCEPTION(0x1f00, Trap_1f, unknown_exception)
641 EXCEPTION(0x2000, RunMode, RunModeException)
642 EXCEPTION(0x2100, Trap_21, unknown_exception)
643 EXCEPTION(0x2200, Trap_22, unknown_exception)
644 EXCEPTION(0x2300, Trap_23, unknown_exception)
645 EXCEPTION(0x2400, Trap_24, unknown_exception)
646 EXCEPTION(0x2500, Trap_25, unknown_exception)
647 EXCEPTION(0x2600, Trap_26, unknown_exception)
648 EXCEPTION(0x2700, Trap_27, unknown_exception)
649 EXCEPTION(0x2800, Trap_28, unknown_exception)
650 EXCEPTION(0x2900, Trap_29, unknown_exception)
651 EXCEPTION(0x2a00, Trap_2a, unknown_exception)
652 EXCEPTION(0x2b00, Trap_2b, unknown_exception)
653 EXCEPTION(0x2c00, Trap_2c, unknown_exception)
654 EXCEPTION(0x2d00, Trap_2d, unknown_exception)
655 EXCEPTION(0x2e00, Trap_2e, unknown_exception)
656 EXCEPTION(0x2f00, Trap_2f, unknown_exception)
661 #ifdef CONFIG_PPC_BOOK3S_604
662 .macro save_regs_thread thread
663 stw r0, THR0(\thread)
664 stw r3, THR3(\thread)
665 stw r4, THR4(\thread)
666 stw r5, THR5(\thread)
667 stw r6, THR6(\thread)
668 stw r8, THR8(\thread)
669 stw r9, THR9(\thread)
671 stw r0, THLR(\thread)
673 stw r0, THCTR(\thread)
676 .macro restore_regs_thread thread
677 lwz r0, THLR(\thread)
679 lwz r0, THCTR(\thread)
681 lwz r0, THR0(\thread)
682 lwz r3, THR3(\thread)
683 lwz r4, THR4(\thread)
684 lwz r5, THR5(\thread)
685 lwz r6, THR6(\thread)
686 lwz r8, THR8(\thread)
687 lwz r9, THR9(\thread)
696 rlwinm r3, r3, 32 - 15, _PAGE_WRITE /* DSISR_STORE -> _PAGE_WRITE */
697 ori r3, r3, _PAGE_PRESENT | _PAGE_READ
699 mfspr r10, SPRN_SPRG_THREAD
700 restore_regs_thread r10
701 b .Lhash_page_dsi_cont
705 mfspr r10, SPRN_SPRG_THREAD
707 li r3, _PAGE_PRESENT | _PAGE_EXEC
711 mfspr r10, SPRN_SPRG_THREAD
712 restore_regs_thread r10
714 b .Lhash_page_isi_cont
716 .globl fast_hash_page_return
717 fast_hash_page_return:
718 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
719 mfspr r10, SPRN_SPRG_THREAD
720 restore_regs_thread r10
726 mfspr r10, SPRN_SPRG_SCRATCH2
731 mfspr r11, SPRN_SPRG_SCRATCH1
732 mfspr r10, SPRN_SPRG_SCRATCH0
734 #endif /* CONFIG_PPC_BOOK3S_604 */
736 #ifdef CONFIG_VMAP_STACK
737 vmap_stack_overflow_exception
742 EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
743 #ifdef CONFIG_ALTIVEC
745 bl load_up_altivec /* if from user, just load it up */
746 b fast_exception_return
747 #endif /* CONFIG_ALTIVEC */
748 1: prepare_transfer_to_handler
749 bl altivec_unavailable_exception
754 EXCEPTION_PROLOG 0xf00 PerformanceMonitor
755 prepare_transfer_to_handler
756 bl performance_monitor_exception
762 * This code is jumped to from the startup code to copy
763 * the kernel image to physical address PHYSICAL_START.
766 lis r3,PHYSICAL_START@h /* Destination base address */
767 li r6,0 /* Destination offset */
768 li r5,0x4000 /* # bytes of memory to copy */
769 bl copy_and_flush /* copy the first 0x4000 bytes */
770 addi r0,r3,4f@l /* jump to the address of 4f */
771 mtctr r0 /* in copy and do the rest. */
772 bctr /* jump to the copy */
773 4: lis r5,_end-KERNELBASE@h
774 ori r5,r5,_end-KERNELBASE@l
775 bl copy_and_flush /* copy the rest */
779 * Copy routine used to copy the kernel to start at physical address 0
780 * and flush and invalidate the caches as needed.
781 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
782 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
784 _GLOBAL(copy_and_flush)
787 4: li r0,L1_CACHE_BYTES/4
789 3: addi r6,r6,4 /* copy a cache line */
793 dcbst r6,r3 /* write it to memory */
795 icbi r6,r3 /* flush the icache line */
798 sync /* additional sync needed on g4 */
805 .globl __secondary_start_mpc86xx
806 __secondary_start_mpc86xx:
808 stw r3, __secondary_hold_acknowledge@l(0)
809 mr r24, r3 /* cpu # */
812 .globl __secondary_start_pmac_0
813 __secondary_start_pmac_0:
814 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
823 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
824 set to map the 0xf0000000 - 0xffffffff region */
826 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
830 .globl __secondary_start
832 /* Copy some CPU settings from CPU 0 */
833 bl __restore_cpu_setup
837 bl call_setup_cpu /* Call setup_cpu for this CPU */
841 /* get current's stack and current */
842 lis r2,secondary_current@ha
844 lwz r2,secondary_current@l(r2)
846 lwz r1,TASK_STACK(r1)
849 addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
854 /* load up the MMU */
855 bl load_segment_registers
858 /* ptr to phys current thread */
860 addi r4,r4,THREAD /* phys address of our thread_struct */
861 mtspr SPRN_SPRG_THREAD,r4
862 BEGIN_MMU_FTR_SECTION
863 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
864 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
865 rlwinm r4, r4, 4, 0xffff01ff
867 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
869 /* enable MMU and jump to start_secondary */
871 lis r3,start_secondary@h
872 ori r3,r3,start_secondary@l
876 #endif /* CONFIG_SMP */
878 #ifdef CONFIG_KVM_BOOK3S_HANDLER
879 #include "../kvm/book3s_rmhandlers.S"
883 * Load stuff into the MMU. Intended to be called with
886 SYM_FUNC_START_LOCAL(early_hash_table)
887 sync /* Force all PTE updates to finish */
889 tlbia /* Clear all TLB entries */
890 sync /* wait for tlbia/tlbie to finish */
891 TLBSYNC /* ... on all CPUs */
892 /* Load the SDR1 register (hash table base & size) */
893 lis r6, early_hash - PAGE_OFFSET@h
894 ori r6, r6, 3 /* 256kB table */
897 SYM_FUNC_END(early_hash_table)
899 SYM_FUNC_START_LOCAL(load_up_mmu)
900 sync /* Force all PTE updates to finish */
902 tlbia /* Clear all TLB entries */
903 sync /* wait for tlbia/tlbie to finish */
904 TLBSYNC /* ... on all CPUs */
905 BEGIN_MMU_FTR_SECTION
906 /* Load the SDR1 register (hash table base & size) */
911 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
913 /* Load the BAT registers with the values set up by MMU_init. */
921 BEGIN_MMU_FTR_SECTION
926 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
928 SYM_FUNC_END(load_up_mmu)
930 _GLOBAL(load_segment_registers)
931 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
932 mtctr r0 /* for context 0 */
933 #ifdef CONFIG_PPC_KUEP
934 lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
936 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
940 addi r3, r3, 0x111 /* increment VSID */
941 addis r4, r4, 0x1000 /* address of next segment */
943 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
944 mtctr r0 /* for context 0 */
945 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
946 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
947 oris r3, r3, SR_KP@h /* Kp = 1 */
949 addi r3, r3, 0x111 /* increment VSID */
950 addis r4, r4, 0x1000 /* address of next segment */
955 * This is where the main kernel code starts.
960 ori r2,r2,init_task@l
961 /* Set up for using our exception vectors */
962 /* ptr to phys current thread */
964 addi r4,r4,THREAD /* init task's THREAD */
965 mtspr SPRN_SPRG_THREAD,r4
966 BEGIN_MMU_FTR_SECTION
967 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
968 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
969 rlwinm r4, r4, 4, 0xffff01ff
971 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
974 lis r1,init_thread_union@ha
975 addi r1,r1,init_thread_union@l
977 stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
979 * Do early platform-specific initialization,
980 * and set up the MMU.
993 * Go back to running unmapped so we can load up new values
994 * for SDR1 (hash table pointer) and the segment registers
995 * and change to using our exception vectors.
1000 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1006 /* Load up the kernel context */
1009 #ifdef CONFIG_BDI_SWITCH
1010 /* Add helper information for the Abatron bdiGDB debugger.
1011 * We do this here because we know the mmu is disabled, and
1012 * will be enabled for real in just a few instructions.
1014 lis r5, abatron_pteptrs@h
1015 ori r5, r5, abatron_pteptrs@l
1016 stw r5, 0xf0(0) /* This much match your Abatron config */
1017 lis r6, swapper_pg_dir@h
1018 ori r6, r6, swapper_pg_dir@l
1021 #endif /* CONFIG_BDI_SWITCH */
1023 /* Now turn on the MMU for real! */
1025 lis r3,start_kernel@h
1026 ori r3,r3,start_kernel@l
1032 * An undocumented "feature" of 604e requires that the v bit
1033 * be cleared before changing BAT values.
1035 * Also, newer IBM firmware does not clear bat3 and 4 so
1036 * this makes sure it's done.
1039 SYM_FUNC_START_LOCAL(clear_bats)
1042 mtspr SPRN_DBAT0U,r10
1043 mtspr SPRN_DBAT0L,r10
1044 mtspr SPRN_DBAT1U,r10
1045 mtspr SPRN_DBAT1L,r10
1046 mtspr SPRN_DBAT2U,r10
1047 mtspr SPRN_DBAT2L,r10
1048 mtspr SPRN_DBAT3U,r10
1049 mtspr SPRN_DBAT3L,r10
1050 mtspr SPRN_IBAT0U,r10
1051 mtspr SPRN_IBAT0L,r10
1052 mtspr SPRN_IBAT1U,r10
1053 mtspr SPRN_IBAT1L,r10
1054 mtspr SPRN_IBAT2U,r10
1055 mtspr SPRN_IBAT2L,r10
1056 mtspr SPRN_IBAT3U,r10
1057 mtspr SPRN_IBAT3L,r10
1058 BEGIN_MMU_FTR_SECTION
1059 /* Here's a tweak: at this point, CPU setup have
1060 * not been called yet, so HIGH_BAT_EN may not be
1061 * set in HID0 for the 745x processors. However, it
1062 * seems that doesn't affect our ability to actually
1063 * write to these SPRs.
1065 mtspr SPRN_DBAT4U,r10
1066 mtspr SPRN_DBAT4L,r10
1067 mtspr SPRN_DBAT5U,r10
1068 mtspr SPRN_DBAT5L,r10
1069 mtspr SPRN_DBAT6U,r10
1070 mtspr SPRN_DBAT6L,r10
1071 mtspr SPRN_DBAT7U,r10
1072 mtspr SPRN_DBAT7L,r10
1073 mtspr SPRN_IBAT4U,r10
1074 mtspr SPRN_IBAT4L,r10
1075 mtspr SPRN_IBAT5U,r10
1076 mtspr SPRN_IBAT5L,r10
1077 mtspr SPRN_IBAT6U,r10
1078 mtspr SPRN_IBAT6L,r10
1079 mtspr SPRN_IBAT7U,r10
1080 mtspr SPRN_IBAT7L,r10
1081 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1083 SYM_FUNC_END(clear_bats)
1085 _GLOBAL(update_bats)
1091 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1092 rlwinm r0, r6, 0, ~MSR_RI
1093 rlwinm r0, r0, 0, ~MSR_EE
1104 LOAD_BAT(0, r3, r4, r5)
1105 LOAD_BAT(1, r3, r4, r5)
1106 LOAD_BAT(2, r3, r4, r5)
1107 LOAD_BAT(3, r3, r4, r5)
1108 BEGIN_MMU_FTR_SECTION
1109 LOAD_BAT(4, r3, r4, r5)
1110 LOAD_BAT(5, r3, r4, r5)
1111 LOAD_BAT(6, r3, r4, r5)
1112 LOAD_BAT(7, r3, r4, r5)
1113 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1114 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1120 SYM_FUNC_START_LOCAL(flush_tlbs)
1122 1: addic. r10, r10, -0x1000
1127 SYM_FUNC_END(flush_tlbs)
1129 SYM_FUNC_START_LOCAL(mmu_off)
1130 addi r4, r3, __after_mmu_off - _start
1132 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1141 SYM_FUNC_END(mmu_off)
1143 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1144 SYM_FUNC_START_LOCAL(initial_bats)
1145 lis r11,PAGE_OFFSET@h
1148 ori r8,r8,0x12 /* R/W access, M=1 */
1150 ori r8,r8,2 /* R/W access */
1151 #endif /* CONFIG_SMP */
1152 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1154 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1155 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1156 mtspr SPRN_IBAT0L,r8
1157 mtspr SPRN_IBAT0U,r11
1160 SYM_FUNC_END(initial_bats)
1162 #ifdef CONFIG_BOOTX_TEXT
1163 SYM_FUNC_START_LOCAL(setup_disp_bat)
1165 * setup the display bat prepared for us in prom.c
1170 addis r8,r3,disp_BAT@ha
1171 addi r8,r8,disp_BAT@l
1176 mtspr SPRN_DBAT3L,r8
1177 mtspr SPRN_DBAT3U,r11
1179 SYM_FUNC_END(setup_disp_bat)
1180 #endif /* CONFIG_BOOTX_TEXT */
1182 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1183 SYM_FUNC_START_LOCAL(setup_cpm_bat)
1186 mtspr SPRN_DBAT1L, r8
1189 ori r11, r11, (BL_1M << 2) | 2
1190 mtspr SPRN_DBAT1U, r11
1193 SYM_FUNC_END(setup_cpm_bat)
1196 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1197 SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
1198 /* prepare a BAT for early io */
1199 #if defined(CONFIG_GAMECUBE)
1201 #elif defined(CONFIG_WII)
1204 #error Invalid platform for USB Gecko based early debugging.
1207 * The virtual address used must match the virtual address
1208 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1210 lis r11, 0xfffe /* top 128K */
1211 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1212 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1213 mtspr SPRN_DBAT1L, r8
1214 mtspr SPRN_DBAT1U, r11
1216 SYM_FUNC_END(setup_usbgecko_bat)