GNU Linux-libre 4.9.333-gnu1
[releases.git] / arch / openrisc / kernel / head.S
1 /*
2  * OpenRISC head.S
3  *
4  * Linux architectural port borrowing liberally from similar works of
5  * others.  All original copyrights apply as per the original source
6  * declaration.
7  *
8  * Modifications for the OpenRISC architecture:
9  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #include <linux/linkage.h>
19 #include <linux/threads.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/serial_reg.h>
23 #include <asm/processor.h>
24 #include <asm/page.h>
25 #include <asm/mmu.h>
26 #include <asm/pgtable.h>
27 #include <asm/cache.h>
28 #include <asm/spr_defs.h>
29 #include <asm/asm-offsets.h>
30 #include <linux/of_fdt.h>
31
32 #define tophys(rd,rs)                           \
33         l.movhi rd,hi(-KERNELBASE)              ;\
34         l.add   rd,rd,rs
35
36 #define CLEAR_GPR(gpr)                          \
37         l.or    gpr,r0,r0
38
39 #define LOAD_SYMBOL_2_GPR(gpr,symbol)           \
40         l.movhi gpr,hi(symbol)                  ;\
41         l.ori   gpr,gpr,lo(symbol)
42
43
44 #define UART_BASE_ADD      0x90000000
45
46 #define EXCEPTION_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
47 #define SYSCALL_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
48
49 /* ============================================[ tmp store locations ]=== */
50
51 /*
52  * emergency_print temporary stores
53  */
54 #define EMERGENCY_PRINT_STORE_GPR4      l.sw    0x20(r0),r4
55 #define EMERGENCY_PRINT_LOAD_GPR4       l.lwz   r4,0x20(r0)
56
57 #define EMERGENCY_PRINT_STORE_GPR5      l.sw    0x24(r0),r5
58 #define EMERGENCY_PRINT_LOAD_GPR5       l.lwz   r5,0x24(r0)
59
60 #define EMERGENCY_PRINT_STORE_GPR6      l.sw    0x28(r0),r6
61 #define EMERGENCY_PRINT_LOAD_GPR6       l.lwz   r6,0x28(r0)
62
63 #define EMERGENCY_PRINT_STORE_GPR7      l.sw    0x2c(r0),r7
64 #define EMERGENCY_PRINT_LOAD_GPR7       l.lwz   r7,0x2c(r0)
65
66 #define EMERGENCY_PRINT_STORE_GPR8      l.sw    0x30(r0),r8
67 #define EMERGENCY_PRINT_LOAD_GPR8       l.lwz   r8,0x30(r0)
68
69 #define EMERGENCY_PRINT_STORE_GPR9      l.sw    0x34(r0),r9
70 #define EMERGENCY_PRINT_LOAD_GPR9       l.lwz   r9,0x34(r0)
71
72
73 /*
74  * TLB miss handlers temorary stores
75  */
76 #define EXCEPTION_STORE_GPR9            l.sw    0x10(r0),r9
77 #define EXCEPTION_LOAD_GPR9             l.lwz   r9,0x10(r0)
78
79 #define EXCEPTION_STORE_GPR2            l.sw    0x64(r0),r2
80 #define EXCEPTION_LOAD_GPR2             l.lwz   r2,0x64(r0)
81
82 #define EXCEPTION_STORE_GPR3            l.sw    0x68(r0),r3
83 #define EXCEPTION_LOAD_GPR3             l.lwz   r3,0x68(r0)
84
85 #define EXCEPTION_STORE_GPR4            l.sw    0x6c(r0),r4
86 #define EXCEPTION_LOAD_GPR4             l.lwz   r4,0x6c(r0)
87
88 #define EXCEPTION_STORE_GPR5            l.sw    0x70(r0),r5
89 #define EXCEPTION_LOAD_GPR5             l.lwz   r5,0x70(r0)
90
91 #define EXCEPTION_STORE_GPR6            l.sw    0x74(r0),r6
92 #define EXCEPTION_LOAD_GPR6             l.lwz   r6,0x74(r0)
93
94
95 /*
96  * EXCEPTION_HANDLE temporary stores
97  */
98
99 #define EXCEPTION_T_STORE_GPR30         l.sw    0x78(r0),r30
100 #define EXCEPTION_T_LOAD_GPR30(reg)     l.lwz   reg,0x78(r0)
101
102 #define EXCEPTION_T_STORE_GPR10         l.sw    0x7c(r0),r10
103 #define EXCEPTION_T_LOAD_GPR10(reg)     l.lwz   reg,0x7c(r0)
104
105 #define EXCEPTION_T_STORE_SP            l.sw    0x80(r0),r1
106 #define EXCEPTION_T_LOAD_SP(reg)        l.lwz   reg,0x80(r0)
107
108 /*
109  * For UNHANLDED_EXCEPTION
110  */
111
112 #define EXCEPTION_T_STORE_GPR31         l.sw    0x84(r0),r31
113 #define EXCEPTION_T_LOAD_GPR31(reg)     l.lwz   reg,0x84(r0)
114
115 /* =========================================================[ macros ]=== */
116
117
118 #define GET_CURRENT_PGD(reg,t1)                                 \
119         LOAD_SYMBOL_2_GPR(reg,current_pgd)                      ;\
120         tophys  (t1,reg)                                        ;\
121         l.lwz   reg,0(t1)
122
123
124 /*
125  * DSCR: this is a common hook for handling exceptions. it will save
126  *       the needed registers, set up stack and pointer to current
127  *       then jump to the handler while enabling MMU
128  *
129  * PRMS: handler        - a function to jump to. it has to save the
130  *                      remaining registers to kernel stack, call
131  *                      appropriate arch-independant exception handler
132  *                      and finaly jump to ret_from_except
133  *
134  * PREQ: unchanged state from the time exception happened
135  *
136  * POST: SAVED the following registers original value
137  *             to the new created exception frame pointed to by r1
138  *
139  *       r1  - ksp      pointing to the new (exception) frame
140  *       r4  - EEAR     exception EA
141  *       r10 - current  pointing to current_thread_info struct
142  *       r12 - syscall  0, since we didn't come from syscall
143  *       r13 - temp     it actually contains new SR, not needed anymore
144  *       r31 - handler  address of the handler we'll jump to
145  *
146  *       handler has to save remaining registers to the exception
147  *       ksp frame *before* tainting them!
148  *
149  * NOTE: this function is not reentrant per se. reentrancy is guaranteed
150  *       by processor disabling all exceptions/interrupts when exception
151  *       accours.
152  *
153  * OPTM: no need to make it so wasteful to extract ksp when in user mode
154  */
155
156 #define EXCEPTION_HANDLE(handler)                               \
157         EXCEPTION_T_STORE_GPR30                                 ;\
158         l.mfspr r30,r0,SPR_ESR_BASE                             ;\
159         l.andi  r30,r30,SPR_SR_SM                               ;\
160         l.sfeqi r30,0                                           ;\
161         EXCEPTION_T_STORE_GPR10                                 ;\
162         l.bnf   2f                            /* kernel_mode */ ;\
163          EXCEPTION_T_STORE_SP                 /* delay slot */  ;\
164 1: /* user_mode:   */                                           ;\
165         LOAD_SYMBOL_2_GPR(r1,current_thread_info_set)           ;\
166         tophys  (r30,r1)                                        ;\
167         /* r10: current_thread_info  */                         ;\
168         l.lwz   r10,0(r30)                                      ;\
169         tophys  (r30,r10)                                       ;\
170         l.lwz   r1,(TI_KSP)(r30)                                ;\
171         /* fall through */                                      ;\
172 2: /* kernel_mode: */                                           ;\
173         /* create new stack frame, save only needed gprs */     ;\
174         /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */   ;\
175         /* r12: temp, syscall indicator */                      ;\
176         l.addi  r1,r1,-(INT_FRAME_SIZE)                         ;\
177         /* r1 is KSP, r30 is __pa(KSP) */                       ;\
178         tophys  (r30,r1)                                        ;\
179         l.sw    PT_GPR12(r30),r12                               ;\
180         l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
181         l.sw    PT_PC(r30),r12                                  ;\
182         l.mfspr r12,r0,SPR_ESR_BASE                             ;\
183         l.sw    PT_SR(r30),r12                                  ;\
184         /* save r30 */                                          ;\
185         EXCEPTION_T_LOAD_GPR30(r12)                             ;\
186         l.sw    PT_GPR30(r30),r12                               ;\
187         /* save r10 as was prior to exception */                ;\
188         EXCEPTION_T_LOAD_GPR10(r12)                             ;\
189         l.sw    PT_GPR10(r30),r12                               ;\
190         /* save PT_SP as was prior to exception */              ;\
191         EXCEPTION_T_LOAD_SP(r12)                                ;\
192         l.sw    PT_SP(r30),r12                                  ;\
193         /* save exception r4, set r4 = EA */                    ;\
194         l.sw    PT_GPR4(r30),r4                                 ;\
195         l.mfspr r4,r0,SPR_EEAR_BASE                             ;\
196         /* r12 == 1 if we come from syscall */                  ;\
197         CLEAR_GPR(r12)                                          ;\
198         /* ----- turn on MMU ----- */                           ;\
199         l.ori   r30,r0,(EXCEPTION_SR)                           ;\
200         l.mtspr r0,r30,SPR_ESR_BASE                             ;\
201         /* r30: EA address of handler */                        ;\
202         LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
203         l.mtspr r0,r30,SPR_EPCR_BASE                            ;\
204         l.rfe
205
206 /*
207  * this doesn't work
208  *
209  *
210  * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
211  * #define UNHANDLED_EXCEPTION(handler)                         \
212  *      l.ori   r3,r0,0x1                                       ;\
213  *      l.mtspr r0,r3,SPR_SR                                    ;\
214  *      l.movhi r3,hi(0xf0000100)                               ;\
215  *      l.ori   r3,r3,lo(0xf0000100)                            ;\
216  *      l.jr    r3                                              ;\
217  *      l.nop   1
218  *
219  * #endif
220  */
221
222 /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
223  *       a bit more carefull (if we have a PT_SP or current pointer
224  *       corruption) and set them up from 'current_set'
225  *
226  */
227 #define UNHANDLED_EXCEPTION(handler)                            \
228         EXCEPTION_T_STORE_GPR31                                 ;\
229         EXCEPTION_T_STORE_GPR10                                 ;\
230         EXCEPTION_T_STORE_SP                                    ;\
231         /* temporary store r3, r9 into r1, r10 */               ;\
232         l.addi  r1,r3,0x0                                       ;\
233         l.addi  r10,r9,0x0                                      ;\
234         /* the string referenced by r3 must be low enough */    ;\
235         l.jal   _emergency_print                                ;\
236         l.ori   r3,r0,lo(_string_unhandled_exception)           ;\
237         l.mfspr r3,r0,SPR_NPC                                   ;\
238         l.jal   _emergency_print_nr                             ;\
239         l.andi  r3,r3,0x1f00                                    ;\
240         /* the string referenced by r3 must be low enough */    ;\
241         l.jal   _emergency_print                                ;\
242         l.ori   r3,r0,lo(_string_epc_prefix)                    ;\
243         l.jal   _emergency_print_nr                             ;\
244         l.mfspr r3,r0,SPR_EPCR_BASE                             ;\
245         l.jal   _emergency_print                                ;\
246         l.ori   r3,r0,lo(_string_nl)                            ;\
247         /* end of printing */                                   ;\
248         l.addi  r3,r1,0x0                                       ;\
249         l.addi  r9,r10,0x0                                      ;\
250         /* extract current, ksp from current_set */             ;\
251         LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top)              ;\
252         LOAD_SYMBOL_2_GPR(r10,init_thread_union)                ;\
253         /* create new stack frame, save only needed gprs */     ;\
254         /* r1: KSP, r10: current, r31: __pa(KSP) */             ;\
255         /* r12: temp, syscall indicator, r13 temp */            ;\
256         l.addi  r1,r1,-(INT_FRAME_SIZE)                         ;\
257         /* r1 is KSP, r31 is __pa(KSP) */                       ;\
258         tophys  (r31,r1)                                        ;\
259         l.sw    PT_GPR12(r31),r12                                       ;\
260         l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
261         l.sw    PT_PC(r31),r12                                  ;\
262         l.mfspr r12,r0,SPR_ESR_BASE                             ;\
263         l.sw    PT_SR(r31),r12                                  ;\
264         /* save r31 */                                          ;\
265         EXCEPTION_T_LOAD_GPR31(r12)                             ;\
266         l.sw    PT_GPR31(r31),r12                                       ;\
267         /* save r10 as was prior to exception */                ;\
268         EXCEPTION_T_LOAD_GPR10(r12)                             ;\
269         l.sw    PT_GPR10(r31),r12                                       ;\
270         /* save PT_SP as was prior to exception */                      ;\
271         EXCEPTION_T_LOAD_SP(r12)                                ;\
272         l.sw    PT_SP(r31),r12                                  ;\
273         l.sw    PT_GPR13(r31),r13                                       ;\
274         /* --> */                                               ;\
275         /* save exception r4, set r4 = EA */                    ;\
276         l.sw    PT_GPR4(r31),r4                                 ;\
277         l.mfspr r4,r0,SPR_EEAR_BASE                             ;\
278         /* r12 == 1 if we come from syscall */                  ;\
279         CLEAR_GPR(r12)                                          ;\
280         /* ----- play a MMU trick ----- */                      ;\
281         l.ori   r31,r0,(EXCEPTION_SR)                           ;\
282         l.mtspr r0,r31,SPR_ESR_BASE                             ;\
283         /* r31: EA address of handler */                        ;\
284         LOAD_SYMBOL_2_GPR(r31,handler)                          ;\
285         l.mtspr r0,r31,SPR_EPCR_BASE                            ;\
286         l.rfe
287
288 /* =====================================================[ exceptions] === */
289
290 /* ---[ 0x100: RESET exception ]----------------------------------------- */
291     .org 0x100
292         /* Jump to .init code at _start which lives in the .head section
293          * and will be discarded after boot.
294          */
295         LOAD_SYMBOL_2_GPR(r15, _start)
296         tophys  (r13,r15)                       /* MMU disabled */
297         l.jr    r13
298          l.nop
299
300 /* ---[ 0x200: BUS exception ]------------------------------------------- */
301     .org 0x200
302 _dispatch_bus_fault:
303         EXCEPTION_HANDLE(_bus_fault_handler)
304
305 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
306     .org 0x300
307 _dispatch_do_dpage_fault:
308 //      totaly disable timer interrupt
309 //      l.mtspr r0,r0,SPR_TTMR
310 //      DEBUG_TLB_PROBE(0x300)
311 //      EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
312         EXCEPTION_HANDLE(_data_page_fault_handler)
313
314 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
315     .org 0x400
316 _dispatch_do_ipage_fault:
317 //      totaly disable timer interrupt
318 //      l.mtspr r0,r0,SPR_TTMR
319 //      DEBUG_TLB_PROBE(0x400)
320 //      EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
321         EXCEPTION_HANDLE(_insn_page_fault_handler)
322
323 /* ---[ 0x500: Timer exception ]----------------------------------------- */
324     .org 0x500
325         EXCEPTION_HANDLE(_timer_handler)
326
327 /* ---[ 0x600: Aligment exception ]-------------------------------------- */
328     .org 0x600
329         EXCEPTION_HANDLE(_alignment_handler)
330
331 /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
332     .org 0x700
333         EXCEPTION_HANDLE(_illegal_instruction_handler)
334
335 /* ---[ 0x800: External interrupt exception ]---------------------------- */
336     .org 0x800
337         EXCEPTION_HANDLE(_external_irq_handler)
338
339 /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
340     .org 0x900
341         l.j     boot_dtlb_miss_handler
342         l.nop
343
344 /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
345     .org 0xa00
346         l.j     boot_itlb_miss_handler
347         l.nop
348
349 /* ---[ 0xb00: Range exception ]----------------------------------------- */
350     .org 0xb00
351         UNHANDLED_EXCEPTION(_vector_0xb00)
352
353 /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
354     .org 0xc00
355         EXCEPTION_HANDLE(_sys_call_handler)
356
357 /* ---[ 0xd00: Trap exception ]------------------------------------------ */
358     .org 0xd00
359         UNHANDLED_EXCEPTION(_vector_0xd00)
360
361 /* ---[ 0xe00: Trap exception ]------------------------------------------ */
362     .org 0xe00
363 //      UNHANDLED_EXCEPTION(_vector_0xe00)
364         EXCEPTION_HANDLE(_trap_handler)
365
366 /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
367     .org 0xf00
368         UNHANDLED_EXCEPTION(_vector_0xf00)
369
370 /* ---[ 0x1000: Reserved exception ]------------------------------------- */
371     .org 0x1000
372         UNHANDLED_EXCEPTION(_vector_0x1000)
373
374 /* ---[ 0x1100: Reserved exception ]------------------------------------- */
375     .org 0x1100
376         UNHANDLED_EXCEPTION(_vector_0x1100)
377
378 /* ---[ 0x1200: Reserved exception ]------------------------------------- */
379     .org 0x1200
380         UNHANDLED_EXCEPTION(_vector_0x1200)
381
382 /* ---[ 0x1300: Reserved exception ]------------------------------------- */
383     .org 0x1300
384         UNHANDLED_EXCEPTION(_vector_0x1300)
385
386 /* ---[ 0x1400: Reserved exception ]------------------------------------- */
387     .org 0x1400
388         UNHANDLED_EXCEPTION(_vector_0x1400)
389
390 /* ---[ 0x1500: Reserved exception ]------------------------------------- */
391     .org 0x1500
392         UNHANDLED_EXCEPTION(_vector_0x1500)
393
394 /* ---[ 0x1600: Reserved exception ]------------------------------------- */
395     .org 0x1600
396         UNHANDLED_EXCEPTION(_vector_0x1600)
397
398 /* ---[ 0x1700: Reserved exception ]------------------------------------- */
399     .org 0x1700
400         UNHANDLED_EXCEPTION(_vector_0x1700)
401
402 /* ---[ 0x1800: Reserved exception ]------------------------------------- */
403     .org 0x1800
404         UNHANDLED_EXCEPTION(_vector_0x1800)
405
406 /* ---[ 0x1900: Reserved exception ]------------------------------------- */
407     .org 0x1900
408         UNHANDLED_EXCEPTION(_vector_0x1900)
409
410 /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
411     .org 0x1a00
412         UNHANDLED_EXCEPTION(_vector_0x1a00)
413
414 /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
415     .org 0x1b00
416         UNHANDLED_EXCEPTION(_vector_0x1b00)
417
418 /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
419     .org 0x1c00
420         UNHANDLED_EXCEPTION(_vector_0x1c00)
421
422 /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
423     .org 0x1d00
424         UNHANDLED_EXCEPTION(_vector_0x1d00)
425
426 /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
427     .org 0x1e00
428         UNHANDLED_EXCEPTION(_vector_0x1e00)
429
430 /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
431     .org 0x1f00
432         UNHANDLED_EXCEPTION(_vector_0x1f00)
433
434     .org 0x2000
435 /* ===================================================[ kernel start ]=== */
436
437 /*    .text*/
438
439 /* This early stuff belongs in HEAD, but some of the functions below definitely
440  * don't... */
441
442         __HEAD
443         .global _start
444 _start:
445         /* save kernel parameters */
446         l.or    r25,r0,r3       /* pointer to fdt */
447
448         /*
449          * ensure a deterministic start
450          */
451
452         l.ori   r3,r0,0x1
453         l.mtspr r0,r3,SPR_SR
454
455         /*
456          * Start the TTCR as early as possible, so that the RNG can make use of
457          * measurements of boot time from the earliest opportunity. Especially
458          * important is that the TTCR does not return zero by the time we reach
459          * rand_initialize().
460          */
461         l.movhi r3,hi(SPR_TTMR_CR)
462         l.mtspr r0,r3,SPR_TTMR
463
464         CLEAR_GPR(r1)
465         CLEAR_GPR(r2)
466         CLEAR_GPR(r3)
467         CLEAR_GPR(r4)
468         CLEAR_GPR(r5)
469         CLEAR_GPR(r6)
470         CLEAR_GPR(r7)
471         CLEAR_GPR(r8)
472         CLEAR_GPR(r9)
473         CLEAR_GPR(r10)
474         CLEAR_GPR(r11)
475         CLEAR_GPR(r12)
476         CLEAR_GPR(r13)
477         CLEAR_GPR(r14)
478         CLEAR_GPR(r15)
479         CLEAR_GPR(r16)
480         CLEAR_GPR(r17)
481         CLEAR_GPR(r18)
482         CLEAR_GPR(r19)
483         CLEAR_GPR(r20)
484         CLEAR_GPR(r21)
485         CLEAR_GPR(r22)
486         CLEAR_GPR(r23)
487         CLEAR_GPR(r24)
488         CLEAR_GPR(r26)
489         CLEAR_GPR(r27)
490         CLEAR_GPR(r28)
491         CLEAR_GPR(r29)
492         CLEAR_GPR(r30)
493         CLEAR_GPR(r31)
494
495         /*
496          * set up initial ksp and current
497          */
498         LOAD_SYMBOL_2_GPR(r1,init_thread_union+0x2000)  // setup kernel stack
499         LOAD_SYMBOL_2_GPR(r10,init_thread_union)        // setup current
500         tophys  (r31,r10)
501         l.sw    TI_KSP(r31), r1
502
503         l.ori   r4,r0,0x0
504
505
506         /*
507          * .data contains initialized data,
508          * .bss contains uninitialized data - clear it up
509          */
510 clear_bss:
511         LOAD_SYMBOL_2_GPR(r24, __bss_start)
512         LOAD_SYMBOL_2_GPR(r26, _end)
513         tophys(r28,r24)
514         tophys(r30,r26)
515         CLEAR_GPR(r24)
516         CLEAR_GPR(r26)
517 1:
518         l.sw    (0)(r28),r0
519         l.sfltu r28,r30
520         l.bf    1b
521         l.addi  r28,r28,4
522
523 enable_ic:
524         l.jal   _ic_enable
525          l.nop
526
527 enable_dc:
528         l.jal   _dc_enable
529          l.nop
530
531 flush_tlb:
532         /*
533          *  I N V A L I D A T E   T L B   e n t r i e s
534          */
535         LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
536         LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
537         l.addi  r7,r0,128 /* Maximum number of sets */
538 1:
539         l.mtspr r5,r0,0x0
540         l.mtspr r6,r0,0x0
541
542         l.addi  r5,r5,1
543         l.addi  r6,r6,1
544         l.sfeq  r7,r0
545         l.bnf   1b
546          l.addi r7,r7,-1
547
548
549 /* The MMU needs to be enabled before or32_early_setup is called */
550
551 enable_mmu:
552         /*
553          * enable dmmu & immu
554          * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
555          */
556         l.mfspr r30,r0,SPR_SR
557         l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
558         l.ori   r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
559         l.or    r30,r30,r28
560         l.mtspr r0,r30,SPR_SR
561         l.nop
562         l.nop
563         l.nop
564         l.nop
565         l.nop
566         l.nop
567         l.nop
568         l.nop
569         l.nop
570         l.nop
571         l.nop
572         l.nop
573         l.nop
574         l.nop
575         l.nop
576         l.nop
577
578         // reset the simulation counters
579         l.nop 5
580
581         /* check fdt header magic word */
582         l.lwz   r3,0(r25)       /* load magic from fdt into r3 */
583         l.movhi r4,hi(OF_DT_HEADER)
584         l.ori   r4,r4,lo(OF_DT_HEADER)
585         l.sfeq  r3,r4
586         l.bf    _fdt_found
587          l.nop
588         /* magic number mismatch, set fdt pointer to null */
589         l.or    r25,r0,r0
590 _fdt_found:
591         /* pass fdt pointer to or32_early_setup in r3 */
592         l.or    r3,r0,r25
593         LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
594         l.jalr r24
595          l.nop
596
597 clear_regs:
598         /*
599          * clear all GPRS to increase determinism
600          */
601         CLEAR_GPR(r2)
602         CLEAR_GPR(r3)
603         CLEAR_GPR(r4)
604         CLEAR_GPR(r5)
605         CLEAR_GPR(r6)
606         CLEAR_GPR(r7)
607         CLEAR_GPR(r8)
608         CLEAR_GPR(r9)
609         CLEAR_GPR(r11)
610         CLEAR_GPR(r12)
611         CLEAR_GPR(r13)
612         CLEAR_GPR(r14)
613         CLEAR_GPR(r15)
614         CLEAR_GPR(r16)
615         CLEAR_GPR(r17)
616         CLEAR_GPR(r18)
617         CLEAR_GPR(r19)
618         CLEAR_GPR(r20)
619         CLEAR_GPR(r21)
620         CLEAR_GPR(r22)
621         CLEAR_GPR(r23)
622         CLEAR_GPR(r24)
623         CLEAR_GPR(r25)
624         CLEAR_GPR(r26)
625         CLEAR_GPR(r27)
626         CLEAR_GPR(r28)
627         CLEAR_GPR(r29)
628         CLEAR_GPR(r30)
629         CLEAR_GPR(r31)
630
631 jump_start_kernel:
632         /*
633          * jump to kernel entry (start_kernel)
634          */
635         LOAD_SYMBOL_2_GPR(r30, start_kernel)
636         l.jr    r30
637          l.nop
638
639 /* ========================================[ cache ]=== */
640
641         /* aligment here so we don't change memory offsets with
642          * memory controler defined
643          */
644         .align 0x2000
645
646 _ic_enable:
647         /* Check if IC present and skip enabling otherwise */
648         l.mfspr r24,r0,SPR_UPR
649         l.andi  r26,r24,SPR_UPR_ICP
650         l.sfeq  r26,r0
651         l.bf    9f
652         l.nop
653
654         /* Disable IC */
655         l.mfspr r6,r0,SPR_SR
656         l.addi  r5,r0,-1
657         l.xori  r5,r5,SPR_SR_ICE
658         l.and   r5,r6,r5
659         l.mtspr r0,r5,SPR_SR
660
661         /* Establish cache block size
662            If BS=0, 16;
663            If BS=1, 32;
664            r14 contain block size
665         */
666         l.mfspr r24,r0,SPR_ICCFGR
667         l.andi  r26,r24,SPR_ICCFGR_CBS
668         l.srli  r28,r26,7
669         l.ori   r30,r0,16
670         l.sll   r14,r30,r28
671
672         /* Establish number of cache sets
673            r16 contains number of cache sets
674            r28 contains log(# of cache sets)
675         */
676         l.andi  r26,r24,SPR_ICCFGR_NCS
677         l.srli  r28,r26,3
678         l.ori   r30,r0,1
679         l.sll   r16,r30,r28
680
681         /* Invalidate IC */
682         l.addi  r6,r0,0
683         l.sll   r5,r14,r28
684 //        l.mul   r5,r14,r16
685 //      l.trap  1
686 //      l.addi  r5,r0,IC_SIZE
687 1:
688         l.mtspr r0,r6,SPR_ICBIR
689         l.sfne  r6,r5
690         l.bf    1b
691         l.add   r6,r6,r14
692  //       l.addi   r6,r6,IC_LINE
693
694         /* Enable IC */
695         l.mfspr r6,r0,SPR_SR
696         l.ori   r6,r6,SPR_SR_ICE
697         l.mtspr r0,r6,SPR_SR
698         l.nop
699         l.nop
700         l.nop
701         l.nop
702         l.nop
703         l.nop
704         l.nop
705         l.nop
706         l.nop
707         l.nop
708 9:
709         l.jr    r9
710         l.nop
711
712 _dc_enable:
713         /* Check if DC present and skip enabling otherwise */
714         l.mfspr r24,r0,SPR_UPR
715         l.andi  r26,r24,SPR_UPR_DCP
716         l.sfeq  r26,r0
717         l.bf    9f
718         l.nop
719
720         /* Disable DC */
721         l.mfspr r6,r0,SPR_SR
722         l.addi  r5,r0,-1
723         l.xori  r5,r5,SPR_SR_DCE
724         l.and   r5,r6,r5
725         l.mtspr r0,r5,SPR_SR
726
727         /* Establish cache block size
728            If BS=0, 16;
729            If BS=1, 32;
730            r14 contain block size
731         */
732         l.mfspr r24,r0,SPR_DCCFGR
733         l.andi  r26,r24,SPR_DCCFGR_CBS
734         l.srli  r28,r26,7
735         l.ori   r30,r0,16
736         l.sll   r14,r30,r28
737
738         /* Establish number of cache sets
739            r16 contains number of cache sets
740            r28 contains log(# of cache sets)
741         */
742         l.andi  r26,r24,SPR_DCCFGR_NCS
743         l.srli  r28,r26,3
744         l.ori   r30,r0,1
745         l.sll   r16,r30,r28
746
747         /* Invalidate DC */
748         l.addi  r6,r0,0
749         l.sll   r5,r14,r28
750 1:
751         l.mtspr r0,r6,SPR_DCBIR
752         l.sfne  r6,r5
753         l.bf    1b
754         l.add   r6,r6,r14
755
756         /* Enable DC */
757         l.mfspr r6,r0,SPR_SR
758         l.ori   r6,r6,SPR_SR_DCE
759         l.mtspr r0,r6,SPR_SR
760 9:
761         l.jr    r9
762         l.nop
763
764 /* ===============================================[ page table masks ]=== */
765
766 #define DTLB_UP_CONVERT_MASK  0x3fa
767 #define ITLB_UP_CONVERT_MASK  0x3a
768
769 /* for SMP we'd have (this is a bit subtle, CC must be always set
770  * for SMP, but since we have _PAGE_PRESENT bit always defined
771  * we can just modify the mask)
772  */
773 #define DTLB_SMP_CONVERT_MASK  0x3fb
774 #define ITLB_SMP_CONVERT_MASK  0x3b
775
776 /* ---[ boot dtlb miss handler ]----------------------------------------- */
777
778 boot_dtlb_miss_handler:
779
780 /* mask for DTLB_MR register: - (0) sets V (valid) bit,
781  *                            - (31-12) sets bits belonging to VPN (31-12)
782  */
783 #define DTLB_MR_MASK 0xfffff001
784
785 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
786  *                            - (4) sets A (access) bit,
787  *                            - (5) sets D (dirty) bit,
788  *                            - (8) sets SRE (superuser read) bit
789  *                            - (9) sets SWE (superuser write) bit
790  *                            - (31-12) sets bits belonging to VPN (31-12)
791  */
792 #define DTLB_TR_MASK 0xfffff332
793
794 /* These are for masking out the VPN/PPN value from the MR/TR registers...
795  * it's not the same as the PFN */
796 #define VPN_MASK 0xfffff000
797 #define PPN_MASK 0xfffff000
798
799
800         EXCEPTION_STORE_GPR6
801
802 #if 0
803         l.mfspr r6,r0,SPR_ESR_BASE         //
804         l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
805         l.sfeqi r6,0                       // r6 == 0x1 --> SM
806         l.bf    exit_with_no_dtranslation  //
807         l.nop
808 #endif
809
810         /* this could be optimized by moving storing of
811          * non r6 registers here, and jumping r6 restore
812          * if not in supervisor mode
813          */
814
815         EXCEPTION_STORE_GPR2
816         EXCEPTION_STORE_GPR3
817         EXCEPTION_STORE_GPR4
818         EXCEPTION_STORE_GPR5
819
820         l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
821
822 immediate_translation:
823         CLEAR_GPR(r6)
824
825         l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
826
827         l.mfspr r6, r0, SPR_DMMUCFGR
828         l.andi  r6, r6, SPR_DMMUCFGR_NTS
829         l.srli  r6, r6, SPR_DMMUCFGR_NTS_OFF
830         l.ori   r5, r0, 0x1
831         l.sll   r5, r5, r6      // r5 = number DMMU sets
832         l.addi  r6, r5, -1      // r6 = nsets mask
833         l.and   r2, r3, r6      // r2 <- r3 % NSETS_MASK
834
835         l.or    r6,r6,r4                   // r6 <- r4
836         l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
837         l.movhi r5,hi(DTLB_MR_MASK)        // r5 <- ffff:0000.x000
838         l.ori   r5,r5,lo(DTLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
839         l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have DTLBMR entry
840         l.mtspr r2,r5,SPR_DTLBMR_BASE(0)   // set DTLBMR
841
842         /* set up DTLB with no translation for EA <= 0xbfffffff */
843         LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
844         l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xbfffffff >= EA)
845         l.bf     1f                        // goto out
846         l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
847
848         tophys(r3,r4)                      // r3 <- PA
849 1:
850         l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
851         l.movhi r5,hi(DTLB_TR_MASK)        // r5 <- ffff:0000.x000
852         l.ori   r5,r5,lo(DTLB_TR_MASK)     // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
853         l.and   r5,r5,r3                   // r5 <- PPN :PPN .x330 - we have DTLBTR entry
854         l.mtspr r2,r5,SPR_DTLBTR_BASE(0)   // set DTLBTR
855
856         EXCEPTION_LOAD_GPR6
857         EXCEPTION_LOAD_GPR5
858         EXCEPTION_LOAD_GPR4
859         EXCEPTION_LOAD_GPR3
860         EXCEPTION_LOAD_GPR2
861
862         l.rfe                              // SR <- ESR, PC <- EPC
863
864 exit_with_no_dtranslation:
865         /* EA out of memory or not in supervisor mode */
866         EXCEPTION_LOAD_GPR6
867         EXCEPTION_LOAD_GPR4
868         l.j     _dispatch_bus_fault
869
870 /* ---[ boot itlb miss handler ]----------------------------------------- */
871
872 boot_itlb_miss_handler:
873
874 /* mask for ITLB_MR register: - sets V (valid) bit,
875  *                            - sets bits belonging to VPN (15-12)
876  */
877 #define ITLB_MR_MASK 0xfffff001
878
879 /* mask for ITLB_TR register: - sets A (access) bit,
880  *                            - sets SXE (superuser execute) bit
881  *                            - sets bits belonging to VPN (15-12)
882  */
883 #define ITLB_TR_MASK 0xfffff050
884
885 /*
886 #define VPN_MASK 0xffffe000
887 #define PPN_MASK 0xffffe000
888 */
889
890
891
892         EXCEPTION_STORE_GPR2
893         EXCEPTION_STORE_GPR3
894         EXCEPTION_STORE_GPR4
895         EXCEPTION_STORE_GPR5
896         EXCEPTION_STORE_GPR6
897
898 #if 0
899         l.mfspr r6,r0,SPR_ESR_BASE         //
900         l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
901         l.sfeqi r6,0                       // r6 == 0x1 --> SM
902         l.bf    exit_with_no_itranslation
903         l.nop
904 #endif
905
906
907         l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
908
909 earlyearly:
910         CLEAR_GPR(r6)
911
912         l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
913
914         l.mfspr r6, r0, SPR_IMMUCFGR
915         l.andi  r6, r6, SPR_IMMUCFGR_NTS
916         l.srli  r6, r6, SPR_IMMUCFGR_NTS_OFF
917         l.ori   r5, r0, 0x1
918         l.sll   r5, r5, r6      // r5 = number IMMU sets from IMMUCFGR
919         l.addi  r6, r5, -1      // r6 = nsets mask
920         l.and   r2, r3, r6      // r2 <- r3 % NSETS_MASK
921
922         l.or    r6,r6,r4                   // r6 <- r4
923         l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
924         l.movhi r5,hi(ITLB_MR_MASK)        // r5 <- ffff:0000.x000
925         l.ori   r5,r5,lo(ITLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
926         l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have ITLBMR entry
927         l.mtspr r2,r5,SPR_ITLBMR_BASE(0)   // set ITLBMR
928
929         /*
930          * set up ITLB with no translation for EA <= 0x0fffffff
931          *
932          * we need this for head.S mapping (EA = PA). if we move all functions
933          * which run with mmu enabled into entry.S, we might be able to eliminate this.
934          *
935          */
936         LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
937         l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xb0ffffff >= EA)
938         l.bf     1f                        // goto out
939         l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
940
941         tophys(r3,r4)                      // r3 <- PA
942 1:
943         l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
944         l.movhi r5,hi(ITLB_TR_MASK)        // r5 <- ffff:0000.x000
945         l.ori   r5,r5,lo(ITLB_TR_MASK)     // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
946         l.and   r5,r5,r3                   // r5 <- PPN :PPN .x050 - we have ITLBTR entry
947         l.mtspr r2,r5,SPR_ITLBTR_BASE(0)   // set ITLBTR
948
949         EXCEPTION_LOAD_GPR6
950         EXCEPTION_LOAD_GPR5
951         EXCEPTION_LOAD_GPR4
952         EXCEPTION_LOAD_GPR3
953         EXCEPTION_LOAD_GPR2
954
955         l.rfe                              // SR <- ESR, PC <- EPC
956
957 exit_with_no_itranslation:
958         EXCEPTION_LOAD_GPR4
959         EXCEPTION_LOAD_GPR6
960         l.j    _dispatch_bus_fault
961         l.nop
962
963 /* ====================================================================== */
964 /*
965  * Stuff below here shouldn't go into .head section... maybe this stuff
966  * can be moved to entry.S ???
967  */
968
969 /* ==============================================[ DTLB miss handler ]=== */
970
971 /*
972  * Comments:
973  *   Exception handlers are entered with MMU off so the following handler
974  *   needs to use physical addressing
975  *
976  */
977
978         .text
979 ENTRY(dtlb_miss_handler)
980         EXCEPTION_STORE_GPR2
981         EXCEPTION_STORE_GPR3
982         EXCEPTION_STORE_GPR4
983         EXCEPTION_STORE_GPR5
984         EXCEPTION_STORE_GPR6
985         /*
986          * get EA of the miss
987          */
988         l.mfspr r2,r0,SPR_EEAR_BASE
989         /*
990          * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
991          */
992         GET_CURRENT_PGD(r3,r5)          // r3 is current_pgd, r5 is temp
993         l.srli  r4,r2,0x18              // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
994         l.slli  r4,r4,0x2               // to get address << 2
995         l.add   r5,r4,r3                // r4 is pgd_index(daddr)
996         /*
997          * if (pmd_none(*pmd))
998          *   goto pmd_none:
999          */
1000         tophys  (r4,r5)
1001         l.lwz   r3,0x0(r4)              // get *pmd value
1002         l.sfne  r3,r0
1003         l.bnf   d_pmd_none
1004          l.andi r3,r3,~PAGE_MASK //0x1fff               // ~PAGE_MASK
1005         /*
1006          * if (pmd_bad(*pmd))
1007          *   pmd_clear(pmd)
1008          *   goto pmd_bad:
1009          */
1010 //      l.sfeq  r3,r0                   // check *pmd value
1011 //      l.bf    d_pmd_good
1012         l.addi  r3,r0,0xffffe000        // PAGE_MASK
1013 //      l.j     d_pmd_bad
1014 //      l.sw    0x0(r4),r0              // clear pmd
1015 d_pmd_good:
1016         /*
1017          * pte = *pte_offset(pmd, daddr);
1018          */
1019         l.lwz   r4,0x0(r4)              // get **pmd value
1020         l.and   r4,r4,r3                // & PAGE_MASK
1021         l.srli  r5,r2,0xd               // >> PAGE_SHIFT, r2 == EEAR
1022         l.andi  r3,r5,0x7ff             // (1UL << PAGE_SHIFT - 2) - 1
1023         l.slli  r3,r3,0x2               // to get address << 2
1024         l.add   r3,r3,r4
1025         l.lwz   r2,0x0(r3)              // this is pte at last
1026         /*
1027          * if (!pte_present(pte))
1028          */
1029         l.andi  r4,r2,0x1
1030         l.sfne  r4,r0                   // is pte present
1031         l.bnf   d_pte_not_present
1032         l.addi  r3,r0,0xffffe3fa        // PAGE_MASK | DTLB_UP_CONVERT_MASK
1033         /*
1034          * fill DTLB TR register
1035          */
1036         l.and   r4,r2,r3                // apply the mask
1037         // Determine number of DMMU sets
1038         l.mfspr r6, r0, SPR_DMMUCFGR
1039         l.andi  r6, r6, SPR_DMMUCFGR_NTS
1040         l.srli  r6, r6, SPR_DMMUCFGR_NTS_OFF
1041         l.ori   r3, r0, 0x1
1042         l.sll   r3, r3, r6      // r3 = number DMMU sets DMMUCFGR
1043         l.addi  r6, r3, -1      // r6 = nsets mask
1044         l.and   r5, r5, r6      // calc offset:  & (NUM_TLB_ENTRIES-1)
1045                                                            //NUM_TLB_ENTRIES
1046         l.mtspr r5,r4,SPR_DTLBTR_BASE(0)
1047         /*
1048          * fill DTLB MR register
1049          */
1050         l.mfspr r2,r0,SPR_EEAR_BASE
1051         l.addi  r3,r0,0xffffe000        // PAGE_MASK
1052         l.and   r4,r2,r3                // apply PAGE_MASK to EA (__PHX__ do we really need this?)
1053         l.ori   r4,r4,0x1               // set hardware valid bit: DTBL_MR entry
1054         l.mtspr r5,r4,SPR_DTLBMR_BASE(0)
1055
1056         EXCEPTION_LOAD_GPR2
1057         EXCEPTION_LOAD_GPR3
1058         EXCEPTION_LOAD_GPR4
1059         EXCEPTION_LOAD_GPR5
1060         EXCEPTION_LOAD_GPR6
1061         l.rfe
1062 d_pmd_bad:
1063         l.nop   1
1064         EXCEPTION_LOAD_GPR2
1065         EXCEPTION_LOAD_GPR3
1066         EXCEPTION_LOAD_GPR4
1067         EXCEPTION_LOAD_GPR5
1068         EXCEPTION_LOAD_GPR6
1069         l.rfe
1070 d_pmd_none:
1071 d_pte_not_present:
1072         EXCEPTION_LOAD_GPR2
1073         EXCEPTION_LOAD_GPR3
1074         EXCEPTION_LOAD_GPR4
1075         EXCEPTION_LOAD_GPR5
1076         EXCEPTION_LOAD_GPR6
1077         EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
1078
1079 /* ==============================================[ ITLB miss handler ]=== */
1080 ENTRY(itlb_miss_handler)
1081         EXCEPTION_STORE_GPR2
1082         EXCEPTION_STORE_GPR3
1083         EXCEPTION_STORE_GPR4
1084         EXCEPTION_STORE_GPR5
1085         EXCEPTION_STORE_GPR6
1086         /*
1087          * get EA of the miss
1088          */
1089         l.mfspr r2,r0,SPR_EEAR_BASE
1090
1091         /*
1092          * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1093          *
1094          */
1095         GET_CURRENT_PGD(r3,r5)          // r3 is current_pgd, r5 is temp
1096         l.srli  r4,r2,0x18              // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1097         l.slli  r4,r4,0x2               // to get address << 2
1098         l.add   r5,r4,r3                // r4 is pgd_index(daddr)
1099         /*
1100          * if (pmd_none(*pmd))
1101          *   goto pmd_none:
1102          */
1103         tophys  (r4,r5)
1104         l.lwz   r3,0x0(r4)              // get *pmd value
1105         l.sfne  r3,r0
1106         l.bnf   i_pmd_none
1107         l.andi  r3,r3,0x1fff            // ~PAGE_MASK
1108         /*
1109          * if (pmd_bad(*pmd))
1110          *   pmd_clear(pmd)
1111          *   goto pmd_bad:
1112          */
1113
1114 //      l.sfeq  r3,r0                   // check *pmd value
1115 //      l.bf    i_pmd_good
1116         l.addi  r3,r0,0xffffe000        // PAGE_MASK
1117 //      l.j     i_pmd_bad
1118 //      l.sw    0x0(r4),r0              // clear pmd
1119
1120 i_pmd_good:
1121         /*
1122          * pte = *pte_offset(pmd, iaddr);
1123          *
1124          */
1125         l.lwz   r4,0x0(r4)              // get **pmd value
1126         l.and   r4,r4,r3                // & PAGE_MASK
1127         l.srli  r5,r2,0xd               // >> PAGE_SHIFT, r2 == EEAR
1128         l.andi  r3,r5,0x7ff             // (1UL << PAGE_SHIFT - 2) - 1
1129         l.slli  r3,r3,0x2               // to get address << 2
1130         l.add   r3,r3,r4
1131         l.lwz   r2,0x0(r3)              // this is pte at last
1132         /*
1133          * if (!pte_present(pte))
1134          *
1135          */
1136         l.andi  r4,r2,0x1
1137         l.sfne  r4,r0                   // is pte present
1138         l.bnf   i_pte_not_present
1139         l.addi  r3,r0,0xffffe03a        // PAGE_MASK | ITLB_UP_CONVERT_MASK
1140         /*
1141          * fill ITLB TR register
1142          */
1143         l.and   r4,r2,r3                // apply the mask
1144         l.andi  r3,r2,0x7c0             // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE |  _PAGE_URE | _PAGE_UWE
1145 //      l.andi  r3,r2,0x400             // _PAGE_EXEC
1146         l.sfeq  r3,r0
1147         l.bf    itlb_tr_fill //_workaround
1148         // Determine number of IMMU sets
1149         l.mfspr r6, r0, SPR_IMMUCFGR
1150         l.andi  r6, r6, SPR_IMMUCFGR_NTS
1151         l.srli  r6, r6, SPR_IMMUCFGR_NTS_OFF
1152         l.ori   r3, r0, 0x1
1153         l.sll   r3, r3, r6      // r3 = number IMMU sets IMMUCFGR
1154         l.addi  r6, r3, -1      // r6 = nsets mask
1155         l.and   r5, r5, r6      // calc offset:  & (NUM_TLB_ENTRIES-1)
1156
1157 /*
1158  * __PHX__ :: fixme
1159  * we should not just blindly set executable flags,
1160  * but it does help with ping. the clean way would be to find out
1161  * (and fix it) why stack doesn't have execution permissions
1162  */
1163
1164 itlb_tr_fill_workaround:
1165         l.ori   r4,r4,0xc0              // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1166 itlb_tr_fill:
1167         l.mtspr r5,r4,SPR_ITLBTR_BASE(0)
1168         /*
1169          * fill DTLB MR register
1170          */
1171         l.mfspr r2,r0,SPR_EEAR_BASE
1172         l.addi  r3,r0,0xffffe000        // PAGE_MASK
1173         l.and   r4,r2,r3                // apply PAGE_MASK to EA (__PHX__ do we really need this?)
1174         l.ori   r4,r4,0x1               // set hardware valid bit: DTBL_MR entry
1175         l.mtspr r5,r4,SPR_ITLBMR_BASE(0)
1176
1177         EXCEPTION_LOAD_GPR2
1178         EXCEPTION_LOAD_GPR3
1179         EXCEPTION_LOAD_GPR4
1180         EXCEPTION_LOAD_GPR5
1181         EXCEPTION_LOAD_GPR6
1182         l.rfe
1183
1184 i_pmd_bad:
1185         l.nop   1
1186         EXCEPTION_LOAD_GPR2
1187         EXCEPTION_LOAD_GPR3
1188         EXCEPTION_LOAD_GPR4
1189         EXCEPTION_LOAD_GPR5
1190         EXCEPTION_LOAD_GPR6
1191         l.rfe
1192 i_pmd_none:
1193 i_pte_not_present:
1194         EXCEPTION_LOAD_GPR2
1195         EXCEPTION_LOAD_GPR3
1196         EXCEPTION_LOAD_GPR4
1197         EXCEPTION_LOAD_GPR5
1198         EXCEPTION_LOAD_GPR6
1199         EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
1200
1201 /* ==============================================[ boot tlb handlers ]=== */
1202
1203
1204 /* =================================================[ debugging aids ]=== */
1205
1206         .align 64
1207 _immu_trampoline:
1208         .space 64
1209 _immu_trampoline_top:
1210
1211 #define TRAMP_SLOT_0            (0x0)
1212 #define TRAMP_SLOT_1            (0x4)
1213 #define TRAMP_SLOT_2            (0x8)
1214 #define TRAMP_SLOT_3            (0xc)
1215 #define TRAMP_SLOT_4            (0x10)
1216 #define TRAMP_SLOT_5            (0x14)
1217 #define TRAMP_FRAME_SIZE        (0x18)
1218
1219 ENTRY(_immu_trampoline_workaround)
1220         // r2 EEA
1221         // r6 is physical EEA
1222         tophys(r6,r2)
1223
1224         LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1225         tophys  (r3,r5)                 // r3 is trampoline (physical)
1226
1227         LOAD_SYMBOL_2_GPR(r4,0x15000000)
1228         l.sw    TRAMP_SLOT_0(r3),r4
1229         l.sw    TRAMP_SLOT_1(r3),r4
1230         l.sw    TRAMP_SLOT_4(r3),r4
1231         l.sw    TRAMP_SLOT_5(r3),r4
1232
1233                                         // EPC = EEA - 0x4
1234         l.lwz   r4,0x0(r6)              // load op @ EEA + 0x0 (fc address)
1235         l.sw    TRAMP_SLOT_3(r3),r4     // store it to _immu_trampoline_data
1236         l.lwz   r4,-0x4(r6)             // load op @ EEA - 0x4 (f8 address)
1237         l.sw    TRAMP_SLOT_2(r3),r4     // store it to _immu_trampoline_data
1238
1239         l.srli  r5,r4,26                // check opcode for write access
1240         l.sfeqi r5,0                    // l.j
1241         l.bf    0f
1242         l.sfeqi r5,0x11                 // l.jr
1243         l.bf    1f
1244         l.sfeqi r5,1                    // l.jal
1245         l.bf    2f
1246         l.sfeqi r5,0x12                 // l.jalr
1247         l.bf    3f
1248         l.sfeqi r5,3                    // l.bnf
1249         l.bf    4f
1250         l.sfeqi r5,4                    // l.bf
1251         l.bf    5f
1252 99:
1253         l.nop
1254         l.j     99b                     // should never happen
1255         l.nop   1
1256
1257         // r2 is EEA
1258         // r3 is trampoline address (physical)
1259         // r4 is instruction
1260         // r6 is physical(EEA)
1261         //
1262         // r5
1263
1264 2:      // l.jal
1265
1266         /* 19 20 aa aa  l.movhi r9,0xaaaa
1267          * a9 29 bb bb  l.ori   r9,0xbbbb
1268          *
1269          * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1270          */
1271
1272         l.addi  r6,r2,0x4               // this is 0xaaaabbbb
1273
1274                                         // l.movhi r9,0xaaaa
1275         l.ori   r5,r0,0x1920            // 0x1920 == l.movhi r9
1276         l.sh    (TRAMP_SLOT_0+0x0)(r3),r5
1277         l.srli  r5,r6,16
1278         l.sh    (TRAMP_SLOT_0+0x2)(r3),r5
1279
1280                                         // l.ori   r9,0xbbbb
1281         l.ori   r5,r0,0xa929            // 0xa929 == l.ori r9
1282         l.sh    (TRAMP_SLOT_1+0x0)(r3),r5
1283         l.andi  r5,r6,0xffff
1284         l.sh    (TRAMP_SLOT_1+0x2)(r3),r5
1285
1286         /* falthrough, need to set up new jump offset */
1287
1288
1289 0:      // l.j
1290         l.slli  r6,r4,6                 // original offset shifted left 6 - 2
1291 //      l.srli  r6,r6,6                 // original offset shifted right 2
1292
1293         l.slli  r4,r2,4                 // old jump position: EEA shifted left 4
1294 //      l.srli  r4,r4,6                 // old jump position: shifted right 2
1295
1296         l.addi  r5,r3,0xc               // new jump position (physical)
1297         l.slli  r5,r5,4                 // new jump position: shifted left 4
1298
1299         // calculate new jump offset
1300         // new_off = old_off + (old_jump - new_jump)
1301
1302         l.sub   r5,r4,r5                // old_jump - new_jump
1303         l.add   r5,r6,r5                // orig_off + (old_jump - new_jump)
1304         l.srli  r5,r5,6                 // new offset shifted right 2
1305
1306         // r5 is new jump offset
1307                                         // l.j has opcode 0x0...
1308         l.sw    TRAMP_SLOT_2(r3),r5     // write it back
1309
1310         l.j     trampoline_out
1311         l.nop
1312
1313 /* ----------------------------- */
1314
1315 3:      // l.jalr
1316
1317         /* 19 20 aa aa  l.movhi r9,0xaaaa
1318          * a9 29 bb bb  l.ori   r9,0xbbbb
1319          *
1320          * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1321          */
1322
1323         l.addi  r6,r2,0x4               // this is 0xaaaabbbb
1324
1325                                         // l.movhi r9,0xaaaa
1326         l.ori   r5,r0,0x1920            // 0x1920 == l.movhi r9
1327         l.sh    (TRAMP_SLOT_0+0x0)(r3),r5
1328         l.srli  r5,r6,16
1329         l.sh    (TRAMP_SLOT_0+0x2)(r3),r5
1330
1331                                         // l.ori   r9,0xbbbb
1332         l.ori   r5,r0,0xa929            // 0xa929 == l.ori r9
1333         l.sh    (TRAMP_SLOT_1+0x0)(r3),r5
1334         l.andi  r5,r6,0xffff
1335         l.sh    (TRAMP_SLOT_1+0x2)(r3),r5
1336
1337         l.lhz   r5,(TRAMP_SLOT_2+0x0)(r3)       // load hi part of jump instruction
1338         l.andi  r5,r5,0x3ff             // clear out opcode part
1339         l.ori   r5,r5,0x4400            // opcode changed from l.jalr -> l.jr
1340         l.sh    (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
1341
1342         /* falthrough */
1343
1344 1:      // l.jr
1345         l.j     trampoline_out
1346         l.nop
1347
1348 /* ----------------------------- */
1349
1350 4:      // l.bnf
1351 5:      // l.bf
1352         l.slli  r6,r4,6                 // original offset shifted left 6 - 2
1353 //      l.srli  r6,r6,6                 // original offset shifted right 2
1354
1355         l.slli  r4,r2,4                 // old jump position: EEA shifted left 4
1356 //      l.srli  r4,r4,6                 // old jump position: shifted right 2
1357
1358         l.addi  r5,r3,0xc               // new jump position (physical)
1359         l.slli  r5,r5,4                 // new jump position: shifted left 4
1360
1361         // calculate new jump offset
1362         // new_off = old_off + (old_jump - new_jump)
1363
1364         l.add   r6,r6,r4                // (orig_off + old_jump)
1365         l.sub   r6,r6,r5                // (orig_off + old_jump) - new_jump
1366         l.srli  r6,r6,6                 // new offset shifted right 2
1367
1368         // r6 is new jump offset
1369         l.lwz   r4,(TRAMP_SLOT_2+0x0)(r3)       // load jump instruction
1370         l.srli  r4,r4,16
1371         l.andi  r4,r4,0xfc00            // get opcode part
1372         l.slli  r4,r4,16
1373         l.or    r6,r4,r6                // l.b(n)f new offset
1374         l.sw    TRAMP_SLOT_2(r3),r6     // write it back
1375
1376         /* we need to add l.j to EEA + 0x8 */
1377         tophys  (r4,r2)                 // may not be needed (due to shifts down_
1378         l.addi  r4,r4,(0x8 - 0x8)       // jump target = r2 + 0x8 (compensate for 0x8)
1379                                         // jump position = r5 + 0x8 (0x8 compensated)
1380         l.sub   r4,r4,r5                // jump offset = target - new_position + 0x8
1381
1382         l.slli  r4,r4,4                 // the amount of info in imediate of jump
1383         l.srli  r4,r4,6                 // jump instruction with offset
1384         l.sw    TRAMP_SLOT_4(r3),r4     // write it to 4th slot
1385
1386         /* fallthrough */
1387
1388 trampoline_out:
1389         // set up new EPC to point to our trampoline code
1390         LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1391         l.mtspr r0,r5,SPR_EPCR_BASE
1392
1393         // immu_trampoline is (4x) CACHE_LINE aligned
1394         // and only 6 instructions long,
1395         // so we need to invalidate only 2 lines
1396
1397         /* Establish cache block size
1398            If BS=0, 16;
1399            If BS=1, 32;
1400            r14 contain block size
1401         */
1402         l.mfspr r21,r0,SPR_ICCFGR
1403         l.andi  r21,r21,SPR_ICCFGR_CBS
1404         l.srli  r21,r21,7
1405         l.ori   r23,r0,16
1406         l.sll   r14,r23,r21
1407
1408         l.mtspr r0,r5,SPR_ICBIR
1409         l.add   r5,r5,r14
1410         l.mtspr r0,r5,SPR_ICBIR
1411
1412         l.jr    r9
1413         l.nop
1414
1415
1416 /*
1417  * DSCR: prints a string referenced by r3.
1418  *
1419  * PRMS: r3             - address of the first character of null
1420  *                      terminated string to be printed
1421  *
1422  * PREQ: UART at UART_BASE_ADD has to be initialized
1423  *
1424  * POST: caller should be aware that r3, r9 are changed
1425  */
1426 ENTRY(_emergency_print)
1427         EMERGENCY_PRINT_STORE_GPR4
1428         EMERGENCY_PRINT_STORE_GPR5
1429         EMERGENCY_PRINT_STORE_GPR6
1430         EMERGENCY_PRINT_STORE_GPR7
1431 2:
1432         l.lbz   r7,0(r3)
1433         l.sfeq  r7,r0
1434         l.bf    9f
1435         l.nop
1436
1437 // putc:
1438         l.movhi r4,hi(UART_BASE_ADD)
1439
1440         l.addi  r6,r0,0x20
1441 1:      l.lbz   r5,5(r4)
1442         l.andi  r5,r5,0x20
1443         l.sfeq  r5,r6
1444         l.bnf   1b
1445         l.nop
1446
1447         l.sb    0(r4),r7
1448
1449         l.addi  r6,r0,0x60
1450 1:      l.lbz   r5,5(r4)
1451         l.andi  r5,r5,0x60
1452         l.sfeq  r5,r6
1453         l.bnf   1b
1454         l.nop
1455
1456         /* next character */
1457         l.j     2b
1458         l.addi  r3,r3,0x1
1459
1460 9:
1461         EMERGENCY_PRINT_LOAD_GPR7
1462         EMERGENCY_PRINT_LOAD_GPR6
1463         EMERGENCY_PRINT_LOAD_GPR5
1464         EMERGENCY_PRINT_LOAD_GPR4
1465         l.jr    r9
1466         l.nop
1467
1468 ENTRY(_emergency_print_nr)
1469         EMERGENCY_PRINT_STORE_GPR4
1470         EMERGENCY_PRINT_STORE_GPR5
1471         EMERGENCY_PRINT_STORE_GPR6
1472         EMERGENCY_PRINT_STORE_GPR7
1473         EMERGENCY_PRINT_STORE_GPR8
1474
1475         l.addi  r8,r0,32                // shift register
1476
1477 1:      /* remove leading zeros */
1478         l.addi  r8,r8,-0x4
1479         l.srl   r7,r3,r8
1480         l.andi  r7,r7,0xf
1481
1482         /* don't skip the last zero if number == 0x0 */
1483         l.sfeqi r8,0x4
1484         l.bf    2f
1485         l.nop
1486
1487         l.sfeq  r7,r0
1488         l.bf    1b
1489         l.nop
1490
1491 2:
1492         l.srl   r7,r3,r8
1493
1494         l.andi  r7,r7,0xf
1495         l.sflts r8,r0
1496         l.bf    9f
1497
1498         l.sfgtui r7,0x9
1499         l.bnf   8f
1500         l.nop
1501         l.addi  r7,r7,0x27
1502
1503 8:
1504         l.addi  r7,r7,0x30
1505 // putc:
1506         l.movhi r4,hi(UART_BASE_ADD)
1507
1508         l.addi  r6,r0,0x20
1509 1:      l.lbz   r5,5(r4)
1510         l.andi  r5,r5,0x20
1511         l.sfeq  r5,r6
1512         l.bnf   1b
1513         l.nop
1514
1515         l.sb    0(r4),r7
1516
1517         l.addi  r6,r0,0x60
1518 1:      l.lbz   r5,5(r4)
1519         l.andi  r5,r5,0x60
1520         l.sfeq  r5,r6
1521         l.bnf   1b
1522         l.nop
1523
1524         /* next character */
1525         l.j     2b
1526         l.addi  r8,r8,-0x4
1527
1528 9:
1529         EMERGENCY_PRINT_LOAD_GPR8
1530         EMERGENCY_PRINT_LOAD_GPR7
1531         EMERGENCY_PRINT_LOAD_GPR6
1532         EMERGENCY_PRINT_LOAD_GPR5
1533         EMERGENCY_PRINT_LOAD_GPR4
1534         l.jr    r9
1535         l.nop
1536
1537
1538 /*
1539  * This should be used for debugging only.
1540  * It messes up the Linux early serial output
1541  * somehow, so use it sparingly and essentially
1542  * only if you need to debug something that goes wrong
1543  * before Linux gets the early serial going.
1544  *
1545  * Furthermore, you'll have to make sure you set the
1546  * UART_DEVISOR correctly according to the system
1547  * clock rate.
1548  *
1549  *
1550  */
1551
1552
1553
1554 #define SYS_CLK            20000000
1555 //#define SYS_CLK            1843200
1556 #define OR32_CONSOLE_BAUD  115200
1557 #define UART_DIVISOR       SYS_CLK/(16*OR32_CONSOLE_BAUD)
1558
1559 ENTRY(_early_uart_init)
1560         l.movhi r3,hi(UART_BASE_ADD)
1561
1562         l.addi  r4,r0,0x7
1563         l.sb    0x2(r3),r4
1564
1565         l.addi  r4,r0,0x0
1566         l.sb    0x1(r3),r4
1567
1568         l.addi  r4,r0,0x3
1569         l.sb    0x3(r3),r4
1570
1571         l.lbz   r5,3(r3)
1572         l.ori   r4,r5,0x80
1573         l.sb    0x3(r3),r4
1574         l.addi  r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1575         l.sb    UART_DLM(r3),r4
1576         l.addi  r4,r0,((UART_DIVISOR) & 0x000000ff)
1577         l.sb    UART_DLL(r3),r4
1578         l.sb    0x3(r3),r5
1579
1580         l.jr    r9
1581         l.nop
1582
1583 _string_copying_linux:
1584         .string "\n\n\n\n\n\rCopying Linux... \0"
1585
1586 _string_ok_booting:
1587         .string "Ok, booting the kernel.\n\r\0"
1588
1589 _string_unhandled_exception:
1590         .string "\n\rRunarunaround: Unhandled exception 0x\0"
1591
1592 _string_epc_prefix:
1593         .string ": EPC=0x\0"
1594
1595 _string_nl:
1596         .string "\n\r\0"
1597
1598         .global _string_esr_irq_bug
1599 _string_esr_irq_bug:
1600         .string "\n\rESR external interrupt bug, for details look into entry.S\n\r\0"
1601
1602
1603
1604 /* ========================================[ page aligned structures ]=== */
1605
1606 /*
1607  * .data section should be page aligned
1608  *      (look into arch/openrisc/kernel/vmlinux.lds.S)
1609  */
1610         .section .data,"aw"
1611         .align  8192
1612         .global  empty_zero_page
1613 empty_zero_page:
1614         .space  8192
1615
1616         .global  swapper_pg_dir
1617 swapper_pg_dir:
1618         .space  8192
1619
1620         .global _unhandled_stack
1621 _unhandled_stack:
1622         .space  8192
1623 _unhandled_stack_top:
1624
1625 /* ============================================================[ EOF ]=== */