GNU Linux-libre 4.14.313-gnu1
[releases.git] / arch / openrisc / kernel / head.S
1 /*
2  * OpenRISC head.S
3  *
4  * Linux architectural port borrowing liberally from similar works of
5  * others.  All original copyrights apply as per the original source
6  * declaration.
7  *
8  * Modifications for the OpenRISC architecture:
9  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17
18 #include <linux/linkage.h>
19 #include <linux/threads.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/serial_reg.h>
23 #include <asm/processor.h>
24 #include <asm/page.h>
25 #include <asm/mmu.h>
26 #include <asm/pgtable.h>
27 #include <asm/thread_info.h>
28 #include <asm/cache.h>
29 #include <asm/spr_defs.h>
30 #include <asm/asm-offsets.h>
31 #include <linux/of_fdt.h>
32
33 #define tophys(rd,rs)                           \
34         l.movhi rd,hi(-KERNELBASE)              ;\
35         l.add   rd,rd,rs
36
37 #define CLEAR_GPR(gpr)                          \
38         l.movhi gpr,0x0
39
40 #define LOAD_SYMBOL_2_GPR(gpr,symbol)           \
41         l.movhi gpr,hi(symbol)                  ;\
42         l.ori   gpr,gpr,lo(symbol)
43
44
45 #define UART_BASE_ADD      0x90000000
46
47 #define EXCEPTION_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
48 #define SYSCALL_SR  (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
49
50 /* ============================================[ tmp store locations ]=== */
51
52 /*
53  * emergency_print temporary stores
54  */
55 #define EMERGENCY_PRINT_STORE_GPR4      l.sw    0x20(r0),r4
56 #define EMERGENCY_PRINT_LOAD_GPR4       l.lwz   r4,0x20(r0)
57
58 #define EMERGENCY_PRINT_STORE_GPR5      l.sw    0x24(r0),r5
59 #define EMERGENCY_PRINT_LOAD_GPR5       l.lwz   r5,0x24(r0)
60
61 #define EMERGENCY_PRINT_STORE_GPR6      l.sw    0x28(r0),r6
62 #define EMERGENCY_PRINT_LOAD_GPR6       l.lwz   r6,0x28(r0)
63
64 #define EMERGENCY_PRINT_STORE_GPR7      l.sw    0x2c(r0),r7
65 #define EMERGENCY_PRINT_LOAD_GPR7       l.lwz   r7,0x2c(r0)
66
67 #define EMERGENCY_PRINT_STORE_GPR8      l.sw    0x30(r0),r8
68 #define EMERGENCY_PRINT_LOAD_GPR8       l.lwz   r8,0x30(r0)
69
70 #define EMERGENCY_PRINT_STORE_GPR9      l.sw    0x34(r0),r9
71 #define EMERGENCY_PRINT_LOAD_GPR9       l.lwz   r9,0x34(r0)
72
73
74 /*
75  * TLB miss handlers temorary stores
76  */
77 #define EXCEPTION_STORE_GPR9            l.sw    0x10(r0),r9
78 #define EXCEPTION_LOAD_GPR9             l.lwz   r9,0x10(r0)
79
80 #define EXCEPTION_STORE_GPR2            l.sw    0x64(r0),r2
81 #define EXCEPTION_LOAD_GPR2             l.lwz   r2,0x64(r0)
82
83 #define EXCEPTION_STORE_GPR3            l.sw    0x68(r0),r3
84 #define EXCEPTION_LOAD_GPR3             l.lwz   r3,0x68(r0)
85
86 #define EXCEPTION_STORE_GPR4            l.sw    0x6c(r0),r4
87 #define EXCEPTION_LOAD_GPR4             l.lwz   r4,0x6c(r0)
88
89 #define EXCEPTION_STORE_GPR5            l.sw    0x70(r0),r5
90 #define EXCEPTION_LOAD_GPR5             l.lwz   r5,0x70(r0)
91
92 #define EXCEPTION_STORE_GPR6            l.sw    0x74(r0),r6
93 #define EXCEPTION_LOAD_GPR6             l.lwz   r6,0x74(r0)
94
95
96 /*
97  * EXCEPTION_HANDLE temporary stores
98  */
99
100 #define EXCEPTION_T_STORE_GPR30         l.sw    0x78(r0),r30
101 #define EXCEPTION_T_LOAD_GPR30(reg)     l.lwz   reg,0x78(r0)
102
103 #define EXCEPTION_T_STORE_GPR10         l.sw    0x7c(r0),r10
104 #define EXCEPTION_T_LOAD_GPR10(reg)     l.lwz   reg,0x7c(r0)
105
106 #define EXCEPTION_T_STORE_SP            l.sw    0x80(r0),r1
107 #define EXCEPTION_T_LOAD_SP(reg)        l.lwz   reg,0x80(r0)
108
109 /*
110  * For UNHANLDED_EXCEPTION
111  */
112
113 #define EXCEPTION_T_STORE_GPR31         l.sw    0x84(r0),r31
114 #define EXCEPTION_T_LOAD_GPR31(reg)     l.lwz   reg,0x84(r0)
115
116 /* =========================================================[ macros ]=== */
117
118
119 #define GET_CURRENT_PGD(reg,t1)                                 \
120         LOAD_SYMBOL_2_GPR(reg,current_pgd)                      ;\
121         tophys  (t1,reg)                                        ;\
122         l.lwz   reg,0(t1)
123
124
125 /*
126  * DSCR: this is a common hook for handling exceptions. it will save
127  *       the needed registers, set up stack and pointer to current
128  *       then jump to the handler while enabling MMU
129  *
130  * PRMS: handler        - a function to jump to. it has to save the
131  *                      remaining registers to kernel stack, call
132  *                      appropriate arch-independant exception handler
133  *                      and finaly jump to ret_from_except
134  *
135  * PREQ: unchanged state from the time exception happened
136  *
137  * POST: SAVED the following registers original value
138  *             to the new created exception frame pointed to by r1
139  *
140  *       r1  - ksp      pointing to the new (exception) frame
141  *       r4  - EEAR     exception EA
142  *       r10 - current  pointing to current_thread_info struct
143  *       r12 - syscall  0, since we didn't come from syscall
144  *       r30 - handler  address of the handler we'll jump to
145  *
146  *       handler has to save remaining registers to the exception
147  *       ksp frame *before* tainting them!
148  *
149  * NOTE: this function is not reentrant per se. reentrancy is guaranteed
150  *       by processor disabling all exceptions/interrupts when exception
151  *       accours.
152  *
153  * OPTM: no need to make it so wasteful to extract ksp when in user mode
154  */
155
156 #define EXCEPTION_HANDLE(handler)                               \
157         EXCEPTION_T_STORE_GPR30                                 ;\
158         l.mfspr r30,r0,SPR_ESR_BASE                             ;\
159         l.andi  r30,r30,SPR_SR_SM                               ;\
160         l.sfeqi r30,0                                           ;\
161         EXCEPTION_T_STORE_GPR10                                 ;\
162         l.bnf   2f                            /* kernel_mode */ ;\
163          EXCEPTION_T_STORE_SP                 /* delay slot */  ;\
164 1: /* user_mode:   */                                           ;\
165         LOAD_SYMBOL_2_GPR(r1,current_thread_info_set)           ;\
166         tophys  (r30,r1)                                        ;\
167         /* r10: current_thread_info  */                         ;\
168         l.lwz   r10,0(r30)                                      ;\
169         tophys  (r30,r10)                                       ;\
170         l.lwz   r1,(TI_KSP)(r30)                                ;\
171         /* fall through */                                      ;\
172 2: /* kernel_mode: */                                           ;\
173         /* create new stack frame, save only needed gprs */     ;\
174         /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */   ;\
175         /* r12: temp, syscall indicator */                      ;\
176         l.addi  r1,r1,-(INT_FRAME_SIZE)                         ;\
177         /* r1 is KSP, r30 is __pa(KSP) */                       ;\
178         tophys  (r30,r1)                                        ;\
179         l.sw    PT_GPR12(r30),r12                               ;\
180         /* r4 use for tmp before EA */                          ;\
181         l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
182         l.sw    PT_PC(r30),r12                                  ;\
183         l.mfspr r12,r0,SPR_ESR_BASE                             ;\
184         l.sw    PT_SR(r30),r12                                  ;\
185         /* save r30 */                                          ;\
186         EXCEPTION_T_LOAD_GPR30(r12)                             ;\
187         l.sw    PT_GPR30(r30),r12                               ;\
188         /* save r10 as was prior to exception */                ;\
189         EXCEPTION_T_LOAD_GPR10(r12)                             ;\
190         l.sw    PT_GPR10(r30),r12                               ;\
191         /* save PT_SP as was prior to exception */              ;\
192         EXCEPTION_T_LOAD_SP(r12)                                ;\
193         l.sw    PT_SP(r30),r12                                  ;\
194         /* save exception r4, set r4 = EA */                    ;\
195         l.sw    PT_GPR4(r30),r4                                 ;\
196         l.mfspr r4,r0,SPR_EEAR_BASE                             ;\
197         /* r12 == 1 if we come from syscall */                  ;\
198         CLEAR_GPR(r12)                                          ;\
199         /* ----- turn on MMU ----- */                           ;\
200         /* Carry DSX into exception SR */                       ;\
201         l.mfspr r30,r0,SPR_SR                                   ;\
202         l.andi  r30,r30,SPR_SR_DSX                              ;\
203         l.ori   r30,r30,(EXCEPTION_SR)                          ;\
204         l.mtspr r0,r30,SPR_ESR_BASE                             ;\
205         /* r30: EA address of handler */                        ;\
206         LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
207         l.mtspr r0,r30,SPR_EPCR_BASE                            ;\
208         l.rfe
209
210 /*
211  * this doesn't work
212  *
213  *
214  * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
215  * #define UNHANDLED_EXCEPTION(handler)                         \
216  *      l.ori   r3,r0,0x1                                       ;\
217  *      l.mtspr r0,r3,SPR_SR                                    ;\
218  *      l.movhi r3,hi(0xf0000100)                               ;\
219  *      l.ori   r3,r3,lo(0xf0000100)                            ;\
220  *      l.jr    r3                                              ;\
221  *      l.nop   1
222  *
223  * #endif
224  */
225
226 /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
227  *       a bit more carefull (if we have a PT_SP or current pointer
228  *       corruption) and set them up from 'current_set'
229  *
230  */
231 #define UNHANDLED_EXCEPTION(handler)                            \
232         EXCEPTION_T_STORE_GPR31                                 ;\
233         EXCEPTION_T_STORE_GPR10                                 ;\
234         EXCEPTION_T_STORE_SP                                    ;\
235         /* temporary store r3, r9 into r1, r10 */               ;\
236         l.addi  r1,r3,0x0                                       ;\
237         l.addi  r10,r9,0x0                                      ;\
238         /* the string referenced by r3 must be low enough */    ;\
239         l.jal   _emergency_print                                ;\
240         l.ori   r3,r0,lo(_string_unhandled_exception)           ;\
241         l.mfspr r3,r0,SPR_NPC                                   ;\
242         l.jal   _emergency_print_nr                             ;\
243         l.andi  r3,r3,0x1f00                                    ;\
244         /* the string referenced by r3 must be low enough */    ;\
245         l.jal   _emergency_print                                ;\
246         l.ori   r3,r0,lo(_string_epc_prefix)                    ;\
247         l.jal   _emergency_print_nr                             ;\
248         l.mfspr r3,r0,SPR_EPCR_BASE                             ;\
249         l.jal   _emergency_print                                ;\
250         l.ori   r3,r0,lo(_string_nl)                            ;\
251         /* end of printing */                                   ;\
252         l.addi  r3,r1,0x0                                       ;\
253         l.addi  r9,r10,0x0                                      ;\
254         /* extract current, ksp from current_set */             ;\
255         LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top)              ;\
256         LOAD_SYMBOL_2_GPR(r10,init_thread_union)                ;\
257         /* create new stack frame, save only needed gprs */     ;\
258         /* r1: KSP, r10: current, r31: __pa(KSP) */             ;\
259         /* r12: temp, syscall indicator, r13 temp */            ;\
260         l.addi  r1,r1,-(INT_FRAME_SIZE)                         ;\
261         /* r1 is KSP, r31 is __pa(KSP) */                       ;\
262         tophys  (r31,r1)                                        ;\
263         l.sw    PT_GPR12(r31),r12                                       ;\
264         l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
265         l.sw    PT_PC(r31),r12                                  ;\
266         l.mfspr r12,r0,SPR_ESR_BASE                             ;\
267         l.sw    PT_SR(r31),r12                                  ;\
268         /* save r31 */                                          ;\
269         EXCEPTION_T_LOAD_GPR31(r12)                             ;\
270         l.sw    PT_GPR31(r31),r12                                       ;\
271         /* save r10 as was prior to exception */                ;\
272         EXCEPTION_T_LOAD_GPR10(r12)                             ;\
273         l.sw    PT_GPR10(r31),r12                                       ;\
274         /* save PT_SP as was prior to exception */                      ;\
275         EXCEPTION_T_LOAD_SP(r12)                                ;\
276         l.sw    PT_SP(r31),r12                                  ;\
277         l.sw    PT_GPR13(r31),r13                                       ;\
278         /* --> */                                               ;\
279         /* save exception r4, set r4 = EA */                    ;\
280         l.sw    PT_GPR4(r31),r4                                 ;\
281         l.mfspr r4,r0,SPR_EEAR_BASE                             ;\
282         /* r12 == 1 if we come from syscall */                  ;\
283         CLEAR_GPR(r12)                                          ;\
284         /* ----- play a MMU trick ----- */                      ;\
285         l.ori   r31,r0,(EXCEPTION_SR)                           ;\
286         l.mtspr r0,r31,SPR_ESR_BASE                             ;\
287         /* r31: EA address of handler */                        ;\
288         LOAD_SYMBOL_2_GPR(r31,handler)                          ;\
289         l.mtspr r0,r31,SPR_EPCR_BASE                            ;\
290         l.rfe
291
292 /* =====================================================[ exceptions] === */
293
294 /* ---[ 0x100: RESET exception ]----------------------------------------- */
295     .org 0x100
296         /* Jump to .init code at _start which lives in the .head section
297          * and will be discarded after boot.
298          */
299         LOAD_SYMBOL_2_GPR(r15, _start)
300         tophys  (r13,r15)                       /* MMU disabled */
301         l.jr    r13
302          l.nop
303
304 /* ---[ 0x200: BUS exception ]------------------------------------------- */
305     .org 0x200
306 _dispatch_bus_fault:
307         EXCEPTION_HANDLE(_bus_fault_handler)
308
309 /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
310     .org 0x300
311 _dispatch_do_dpage_fault:
312 //      totaly disable timer interrupt
313 //      l.mtspr r0,r0,SPR_TTMR
314 //      DEBUG_TLB_PROBE(0x300)
315 //      EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
316         EXCEPTION_HANDLE(_data_page_fault_handler)
317
318 /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
319     .org 0x400
320 _dispatch_do_ipage_fault:
321 //      totaly disable timer interrupt
322 //      l.mtspr r0,r0,SPR_TTMR
323 //      DEBUG_TLB_PROBE(0x400)
324 //      EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
325         EXCEPTION_HANDLE(_insn_page_fault_handler)
326
327 /* ---[ 0x500: Timer exception ]----------------------------------------- */
328     .org 0x500
329         EXCEPTION_HANDLE(_timer_handler)
330
331 /* ---[ 0x600: Alignment exception ]-------------------------------------- */
332     .org 0x600
333         EXCEPTION_HANDLE(_alignment_handler)
334
335 /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
336     .org 0x700
337         EXCEPTION_HANDLE(_illegal_instruction_handler)
338
339 /* ---[ 0x800: External interrupt exception ]---------------------------- */
340     .org 0x800
341         EXCEPTION_HANDLE(_external_irq_handler)
342
343 /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
344     .org 0x900
345         l.j     boot_dtlb_miss_handler
346         l.nop
347
348 /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
349     .org 0xa00
350         l.j     boot_itlb_miss_handler
351         l.nop
352
353 /* ---[ 0xb00: Range exception ]----------------------------------------- */
354     .org 0xb00
355         UNHANDLED_EXCEPTION(_vector_0xb00)
356
357 /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
358     .org 0xc00
359         EXCEPTION_HANDLE(_sys_call_handler)
360
361 /* ---[ 0xd00: Trap exception ]------------------------------------------ */
362     .org 0xd00
363         UNHANDLED_EXCEPTION(_vector_0xd00)
364
365 /* ---[ 0xe00: Trap exception ]------------------------------------------ */
366     .org 0xe00
367 //      UNHANDLED_EXCEPTION(_vector_0xe00)
368         EXCEPTION_HANDLE(_trap_handler)
369
370 /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
371     .org 0xf00
372         UNHANDLED_EXCEPTION(_vector_0xf00)
373
374 /* ---[ 0x1000: Reserved exception ]------------------------------------- */
375     .org 0x1000
376         UNHANDLED_EXCEPTION(_vector_0x1000)
377
378 /* ---[ 0x1100: Reserved exception ]------------------------------------- */
379     .org 0x1100
380         UNHANDLED_EXCEPTION(_vector_0x1100)
381
382 /* ---[ 0x1200: Reserved exception ]------------------------------------- */
383     .org 0x1200
384         UNHANDLED_EXCEPTION(_vector_0x1200)
385
386 /* ---[ 0x1300: Reserved exception ]------------------------------------- */
387     .org 0x1300
388         UNHANDLED_EXCEPTION(_vector_0x1300)
389
390 /* ---[ 0x1400: Reserved exception ]------------------------------------- */
391     .org 0x1400
392         UNHANDLED_EXCEPTION(_vector_0x1400)
393
394 /* ---[ 0x1500: Reserved exception ]------------------------------------- */
395     .org 0x1500
396         UNHANDLED_EXCEPTION(_vector_0x1500)
397
398 /* ---[ 0x1600: Reserved exception ]------------------------------------- */
399     .org 0x1600
400         UNHANDLED_EXCEPTION(_vector_0x1600)
401
402 /* ---[ 0x1700: Reserved exception ]------------------------------------- */
403     .org 0x1700
404         UNHANDLED_EXCEPTION(_vector_0x1700)
405
406 /* ---[ 0x1800: Reserved exception ]------------------------------------- */
407     .org 0x1800
408         UNHANDLED_EXCEPTION(_vector_0x1800)
409
410 /* ---[ 0x1900: Reserved exception ]------------------------------------- */
411     .org 0x1900
412         UNHANDLED_EXCEPTION(_vector_0x1900)
413
414 /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
415     .org 0x1a00
416         UNHANDLED_EXCEPTION(_vector_0x1a00)
417
418 /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
419     .org 0x1b00
420         UNHANDLED_EXCEPTION(_vector_0x1b00)
421
422 /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
423     .org 0x1c00
424         UNHANDLED_EXCEPTION(_vector_0x1c00)
425
426 /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
427     .org 0x1d00
428         UNHANDLED_EXCEPTION(_vector_0x1d00)
429
430 /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
431     .org 0x1e00
432         UNHANDLED_EXCEPTION(_vector_0x1e00)
433
434 /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
435     .org 0x1f00
436         UNHANDLED_EXCEPTION(_vector_0x1f00)
437
438     .org 0x2000
439 /* ===================================================[ kernel start ]=== */
440
441 /*    .text*/
442
443 /* This early stuff belongs in HEAD, but some of the functions below definitely
444  * don't... */
445
446         __HEAD
447         .global _start
448 _start:
449         /* Init r0 to zero as per spec */
450         CLEAR_GPR(r0)
451
452         /* save kernel parameters */
453         l.or    r25,r0,r3       /* pointer to fdt */
454
455         /*
456          * ensure a deterministic start
457          */
458
459         l.ori   r3,r0,0x1
460         l.mtspr r0,r3,SPR_SR
461
462         /*
463          * Start the TTCR as early as possible, so that the RNG can make use of
464          * measurements of boot time from the earliest opportunity. Especially
465          * important is that the TTCR does not return zero by the time we reach
466          * rand_initialize().
467          */
468         l.movhi r3,hi(SPR_TTMR_CR)
469         l.mtspr r0,r3,SPR_TTMR
470
471         CLEAR_GPR(r1)
472         CLEAR_GPR(r2)
473         CLEAR_GPR(r3)
474         CLEAR_GPR(r4)
475         CLEAR_GPR(r5)
476         CLEAR_GPR(r6)
477         CLEAR_GPR(r7)
478         CLEAR_GPR(r8)
479         CLEAR_GPR(r9)
480         CLEAR_GPR(r10)
481         CLEAR_GPR(r11)
482         CLEAR_GPR(r12)
483         CLEAR_GPR(r13)
484         CLEAR_GPR(r14)
485         CLEAR_GPR(r15)
486         CLEAR_GPR(r16)
487         CLEAR_GPR(r17)
488         CLEAR_GPR(r18)
489         CLEAR_GPR(r19)
490         CLEAR_GPR(r20)
491         CLEAR_GPR(r21)
492         CLEAR_GPR(r22)
493         CLEAR_GPR(r23)
494         CLEAR_GPR(r24)
495         CLEAR_GPR(r26)
496         CLEAR_GPR(r27)
497         CLEAR_GPR(r28)
498         CLEAR_GPR(r29)
499         CLEAR_GPR(r30)
500         CLEAR_GPR(r31)
501
502         /*
503          * set up initial ksp and current
504          */
505         /* setup kernel stack */
506         LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
507         LOAD_SYMBOL_2_GPR(r10,init_thread_union)        // setup current
508         tophys  (r31,r10)
509         l.sw    TI_KSP(r31), r1
510
511         l.ori   r4,r0,0x0
512
513
514         /*
515          * .data contains initialized data,
516          * .bss contains uninitialized data - clear it up
517          */
518 clear_bss:
519         LOAD_SYMBOL_2_GPR(r24, __bss_start)
520         LOAD_SYMBOL_2_GPR(r26, _end)
521         tophys(r28,r24)
522         tophys(r30,r26)
523         CLEAR_GPR(r24)
524         CLEAR_GPR(r26)
525 1:
526         l.sw    (0)(r28),r0
527         l.sfltu r28,r30
528         l.bf    1b
529         l.addi  r28,r28,4
530
531 enable_ic:
532         l.jal   _ic_enable
533          l.nop
534
535 enable_dc:
536         l.jal   _dc_enable
537          l.nop
538
539 flush_tlb:
540         l.jal   _flush_tlb
541          l.nop
542
543 /* The MMU needs to be enabled before or32_early_setup is called */
544
545 enable_mmu:
546         /*
547          * enable dmmu & immu
548          * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
549          */
550         l.mfspr r30,r0,SPR_SR
551         l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
552         l.ori   r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
553         l.or    r30,r30,r28
554         l.mtspr r0,r30,SPR_SR
555         l.nop
556         l.nop
557         l.nop
558         l.nop
559         l.nop
560         l.nop
561         l.nop
562         l.nop
563         l.nop
564         l.nop
565         l.nop
566         l.nop
567         l.nop
568         l.nop
569         l.nop
570         l.nop
571
572         // reset the simulation counters
573         l.nop 5
574
575         /* check fdt header magic word */
576         l.lwz   r3,0(r25)       /* load magic from fdt into r3 */
577         l.movhi r4,hi(OF_DT_HEADER)
578         l.ori   r4,r4,lo(OF_DT_HEADER)
579         l.sfeq  r3,r4
580         l.bf    _fdt_found
581          l.nop
582         /* magic number mismatch, set fdt pointer to null */
583         l.or    r25,r0,r0
584 _fdt_found:
585         /* pass fdt pointer to or32_early_setup in r3 */
586         l.or    r3,r0,r25
587         LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
588         l.jalr r24
589          l.nop
590
591 clear_regs:
592         /*
593          * clear all GPRS to increase determinism
594          */
595         CLEAR_GPR(r2)
596         CLEAR_GPR(r3)
597         CLEAR_GPR(r4)
598         CLEAR_GPR(r5)
599         CLEAR_GPR(r6)
600         CLEAR_GPR(r7)
601         CLEAR_GPR(r8)
602         CLEAR_GPR(r9)
603         CLEAR_GPR(r11)
604         CLEAR_GPR(r12)
605         CLEAR_GPR(r13)
606         CLEAR_GPR(r14)
607         CLEAR_GPR(r15)
608         CLEAR_GPR(r16)
609         CLEAR_GPR(r17)
610         CLEAR_GPR(r18)
611         CLEAR_GPR(r19)
612         CLEAR_GPR(r20)
613         CLEAR_GPR(r21)
614         CLEAR_GPR(r22)
615         CLEAR_GPR(r23)
616         CLEAR_GPR(r24)
617         CLEAR_GPR(r25)
618         CLEAR_GPR(r26)
619         CLEAR_GPR(r27)
620         CLEAR_GPR(r28)
621         CLEAR_GPR(r29)
622         CLEAR_GPR(r30)
623         CLEAR_GPR(r31)
624
625 jump_start_kernel:
626         /*
627          * jump to kernel entry (start_kernel)
628          */
629         LOAD_SYMBOL_2_GPR(r30, start_kernel)
630         l.jr    r30
631          l.nop
632
633 _flush_tlb:
634         /*
635          *  I N V A L I D A T E   T L B   e n t r i e s
636          */
637         LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
638         LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
639         l.addi  r7,r0,128 /* Maximum number of sets */
640 1:
641         l.mtspr r5,r0,0x0
642         l.mtspr r6,r0,0x0
643
644         l.addi  r5,r5,1
645         l.addi  r6,r6,1
646         l.sfeq  r7,r0
647         l.bnf   1b
648          l.addi r7,r7,-1
649
650         l.jr    r9
651          l.nop
652
653 /* ========================================[ cache ]=== */
654
655         /* alignment here so we don't change memory offsets with
656          * memory controller defined
657          */
658         .align 0x2000
659
660 _ic_enable:
661         /* Check if IC present and skip enabling otherwise */
662         l.mfspr r24,r0,SPR_UPR
663         l.andi  r26,r24,SPR_UPR_ICP
664         l.sfeq  r26,r0
665         l.bf    9f
666         l.nop
667
668         /* Disable IC */
669         l.mfspr r6,r0,SPR_SR
670         l.addi  r5,r0,-1
671         l.xori  r5,r5,SPR_SR_ICE
672         l.and   r5,r6,r5
673         l.mtspr r0,r5,SPR_SR
674
675         /* Establish cache block size
676            If BS=0, 16;
677            If BS=1, 32;
678            r14 contain block size
679         */
680         l.mfspr r24,r0,SPR_ICCFGR
681         l.andi  r26,r24,SPR_ICCFGR_CBS
682         l.srli  r28,r26,7
683         l.ori   r30,r0,16
684         l.sll   r14,r30,r28
685
686         /* Establish number of cache sets
687            r16 contains number of cache sets
688            r28 contains log(# of cache sets)
689         */
690         l.andi  r26,r24,SPR_ICCFGR_NCS
691         l.srli  r28,r26,3
692         l.ori   r30,r0,1
693         l.sll   r16,r30,r28
694
695         /* Invalidate IC */
696         l.addi  r6,r0,0
697         l.sll   r5,r14,r28
698 //        l.mul   r5,r14,r16
699 //      l.trap  1
700 //      l.addi  r5,r0,IC_SIZE
701 1:
702         l.mtspr r0,r6,SPR_ICBIR
703         l.sfne  r6,r5
704         l.bf    1b
705         l.add   r6,r6,r14
706  //       l.addi   r6,r6,IC_LINE
707
708         /* Enable IC */
709         l.mfspr r6,r0,SPR_SR
710         l.ori   r6,r6,SPR_SR_ICE
711         l.mtspr r0,r6,SPR_SR
712         l.nop
713         l.nop
714         l.nop
715         l.nop
716         l.nop
717         l.nop
718         l.nop
719         l.nop
720         l.nop
721         l.nop
722 9:
723         l.jr    r9
724         l.nop
725
726 _dc_enable:
727         /* Check if DC present and skip enabling otherwise */
728         l.mfspr r24,r0,SPR_UPR
729         l.andi  r26,r24,SPR_UPR_DCP
730         l.sfeq  r26,r0
731         l.bf    9f
732         l.nop
733
734         /* Disable DC */
735         l.mfspr r6,r0,SPR_SR
736         l.addi  r5,r0,-1
737         l.xori  r5,r5,SPR_SR_DCE
738         l.and   r5,r6,r5
739         l.mtspr r0,r5,SPR_SR
740
741         /* Establish cache block size
742            If BS=0, 16;
743            If BS=1, 32;
744            r14 contain block size
745         */
746         l.mfspr r24,r0,SPR_DCCFGR
747         l.andi  r26,r24,SPR_DCCFGR_CBS
748         l.srli  r28,r26,7
749         l.ori   r30,r0,16
750         l.sll   r14,r30,r28
751
752         /* Establish number of cache sets
753            r16 contains number of cache sets
754            r28 contains log(# of cache sets)
755         */
756         l.andi  r26,r24,SPR_DCCFGR_NCS
757         l.srli  r28,r26,3
758         l.ori   r30,r0,1
759         l.sll   r16,r30,r28
760
761         /* Invalidate DC */
762         l.addi  r6,r0,0
763         l.sll   r5,r14,r28
764 1:
765         l.mtspr r0,r6,SPR_DCBIR
766         l.sfne  r6,r5
767         l.bf    1b
768         l.add   r6,r6,r14
769
770         /* Enable DC */
771         l.mfspr r6,r0,SPR_SR
772         l.ori   r6,r6,SPR_SR_DCE
773         l.mtspr r0,r6,SPR_SR
774 9:
775         l.jr    r9
776         l.nop
777
778 /* ===============================================[ page table masks ]=== */
779
780 #define DTLB_UP_CONVERT_MASK  0x3fa
781 #define ITLB_UP_CONVERT_MASK  0x3a
782
783 /* for SMP we'd have (this is a bit subtle, CC must be always set
784  * for SMP, but since we have _PAGE_PRESENT bit always defined
785  * we can just modify the mask)
786  */
787 #define DTLB_SMP_CONVERT_MASK  0x3fb
788 #define ITLB_SMP_CONVERT_MASK  0x3b
789
790 /* ---[ boot dtlb miss handler ]----------------------------------------- */
791
792 boot_dtlb_miss_handler:
793
794 /* mask for DTLB_MR register: - (0) sets V (valid) bit,
795  *                            - (31-12) sets bits belonging to VPN (31-12)
796  */
797 #define DTLB_MR_MASK 0xfffff001
798
799 /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
800  *                            - (4) sets A (access) bit,
801  *                            - (5) sets D (dirty) bit,
802  *                            - (8) sets SRE (superuser read) bit
803  *                            - (9) sets SWE (superuser write) bit
804  *                            - (31-12) sets bits belonging to VPN (31-12)
805  */
806 #define DTLB_TR_MASK 0xfffff332
807
808 /* These are for masking out the VPN/PPN value from the MR/TR registers...
809  * it's not the same as the PFN */
810 #define VPN_MASK 0xfffff000
811 #define PPN_MASK 0xfffff000
812
813
814         EXCEPTION_STORE_GPR6
815
816 #if 0
817         l.mfspr r6,r0,SPR_ESR_BASE         //
818         l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
819         l.sfeqi r6,0                       // r6 == 0x1 --> SM
820         l.bf    exit_with_no_dtranslation  //
821         l.nop
822 #endif
823
824         /* this could be optimized by moving storing of
825          * non r6 registers here, and jumping r6 restore
826          * if not in supervisor mode
827          */
828
829         EXCEPTION_STORE_GPR2
830         EXCEPTION_STORE_GPR3
831         EXCEPTION_STORE_GPR4
832         EXCEPTION_STORE_GPR5
833
834         l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
835
836 immediate_translation:
837         CLEAR_GPR(r6)
838
839         l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
840
841         l.mfspr r6, r0, SPR_DMMUCFGR
842         l.andi  r6, r6, SPR_DMMUCFGR_NTS
843         l.srli  r6, r6, SPR_DMMUCFGR_NTS_OFF
844         l.ori   r5, r0, 0x1
845         l.sll   r5, r5, r6      // r5 = number DMMU sets
846         l.addi  r6, r5, -1      // r6 = nsets mask
847         l.and   r2, r3, r6      // r2 <- r3 % NSETS_MASK
848
849         l.or    r6,r6,r4                   // r6 <- r4
850         l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
851         l.movhi r5,hi(DTLB_MR_MASK)        // r5 <- ffff:0000.x000
852         l.ori   r5,r5,lo(DTLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
853         l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have DTLBMR entry
854         l.mtspr r2,r5,SPR_DTLBMR_BASE(0)   // set DTLBMR
855
856         /* set up DTLB with no translation for EA <= 0xbfffffff */
857         LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
858         l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xbfffffff >= EA)
859         l.bf     1f                        // goto out
860         l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
861
862         tophys(r3,r4)                      // r3 <- PA
863 1:
864         l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
865         l.movhi r5,hi(DTLB_TR_MASK)        // r5 <- ffff:0000.x000
866         l.ori   r5,r5,lo(DTLB_TR_MASK)     // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
867         l.and   r5,r5,r3                   // r5 <- PPN :PPN .x330 - we have DTLBTR entry
868         l.mtspr r2,r5,SPR_DTLBTR_BASE(0)   // set DTLBTR
869
870         EXCEPTION_LOAD_GPR6
871         EXCEPTION_LOAD_GPR5
872         EXCEPTION_LOAD_GPR4
873         EXCEPTION_LOAD_GPR3
874         EXCEPTION_LOAD_GPR2
875
876         l.rfe                              // SR <- ESR, PC <- EPC
877
878 exit_with_no_dtranslation:
879         /* EA out of memory or not in supervisor mode */
880         EXCEPTION_LOAD_GPR6
881         EXCEPTION_LOAD_GPR4
882         l.j     _dispatch_bus_fault
883
884 /* ---[ boot itlb miss handler ]----------------------------------------- */
885
886 boot_itlb_miss_handler:
887
888 /* mask for ITLB_MR register: - sets V (valid) bit,
889  *                            - sets bits belonging to VPN (15-12)
890  */
891 #define ITLB_MR_MASK 0xfffff001
892
893 /* mask for ITLB_TR register: - sets A (access) bit,
894  *                            - sets SXE (superuser execute) bit
895  *                            - sets bits belonging to VPN (15-12)
896  */
897 #define ITLB_TR_MASK 0xfffff050
898
899 /*
900 #define VPN_MASK 0xffffe000
901 #define PPN_MASK 0xffffe000
902 */
903
904
905
906         EXCEPTION_STORE_GPR2
907         EXCEPTION_STORE_GPR3
908         EXCEPTION_STORE_GPR4
909         EXCEPTION_STORE_GPR5
910         EXCEPTION_STORE_GPR6
911
912 #if 0
913         l.mfspr r6,r0,SPR_ESR_BASE         //
914         l.andi  r6,r6,SPR_SR_SM            // are we in kernel mode ?
915         l.sfeqi r6,0                       // r6 == 0x1 --> SM
916         l.bf    exit_with_no_itranslation
917         l.nop
918 #endif
919
920
921         l.mfspr r4,r0,SPR_EEAR_BASE        // get the offending EA
922
923 earlyearly:
924         CLEAR_GPR(r6)
925
926         l.srli  r3,r4,0xd                  // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
927
928         l.mfspr r6, r0, SPR_IMMUCFGR
929         l.andi  r6, r6, SPR_IMMUCFGR_NTS
930         l.srli  r6, r6, SPR_IMMUCFGR_NTS_OFF
931         l.ori   r5, r0, 0x1
932         l.sll   r5, r5, r6      // r5 = number IMMU sets from IMMUCFGR
933         l.addi  r6, r5, -1      // r6 = nsets mask
934         l.and   r2, r3, r6      // r2 <- r3 % NSETS_MASK
935
936         l.or    r6,r6,r4                   // r6 <- r4
937         l.ori   r6,r6,~(VPN_MASK)          // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
938         l.movhi r5,hi(ITLB_MR_MASK)        // r5 <- ffff:0000.x000
939         l.ori   r5,r5,lo(ITLB_MR_MASK)     // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
940         l.and   r5,r5,r6                   // r5 <- VPN :VPN .x001 - we have ITLBMR entry
941         l.mtspr r2,r5,SPR_ITLBMR_BASE(0)   // set ITLBMR
942
943         /*
944          * set up ITLB with no translation for EA <= 0x0fffffff
945          *
946          * we need this for head.S mapping (EA = PA). if we move all functions
947          * which run with mmu enabled into entry.S, we might be able to eliminate this.
948          *
949          */
950         LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
951         l.sfgeu  r6,r4                     // flag if r6 >= r4 (if 0xb0ffffff >= EA)
952         l.bf     1f                        // goto out
953         l.and    r3,r4,r4                  // delay slot :: 24 <- r4 (if flag==1)
954
955         tophys(r3,r4)                      // r3 <- PA
956 1:
957         l.ori   r3,r3,~(PPN_MASK)          // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
958         l.movhi r5,hi(ITLB_TR_MASK)        // r5 <- ffff:0000.x000
959         l.ori   r5,r5,lo(ITLB_TR_MASK)     // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
960         l.and   r5,r5,r3                   // r5 <- PPN :PPN .x050 - we have ITLBTR entry
961         l.mtspr r2,r5,SPR_ITLBTR_BASE(0)   // set ITLBTR
962
963         EXCEPTION_LOAD_GPR6
964         EXCEPTION_LOAD_GPR5
965         EXCEPTION_LOAD_GPR4
966         EXCEPTION_LOAD_GPR3
967         EXCEPTION_LOAD_GPR2
968
969         l.rfe                              // SR <- ESR, PC <- EPC
970
971 exit_with_no_itranslation:
972         EXCEPTION_LOAD_GPR4
973         EXCEPTION_LOAD_GPR6
974         l.j    _dispatch_bus_fault
975         l.nop
976
977 /* ====================================================================== */
978 /*
979  * Stuff below here shouldn't go into .head section... maybe this stuff
980  * can be moved to entry.S ???
981  */
982
983 /* ==============================================[ DTLB miss handler ]=== */
984
985 /*
986  * Comments:
987  *   Exception handlers are entered with MMU off so the following handler
988  *   needs to use physical addressing
989  *
990  */
991
992         .text
993 ENTRY(dtlb_miss_handler)
994         EXCEPTION_STORE_GPR2
995         EXCEPTION_STORE_GPR3
996         EXCEPTION_STORE_GPR4
997         /*
998          * get EA of the miss
999          */
1000         l.mfspr r2,r0,SPR_EEAR_BASE
1001         /*
1002          * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1003          */
1004         GET_CURRENT_PGD(r3,r4)          // r3 is current_pgd, r4 is temp
1005         l.srli  r4,r2,0x18              // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1006         l.slli  r4,r4,0x2               // to get address << 2
1007         l.add   r3,r4,r3                // r4 is pgd_index(daddr)
1008         /*
1009          * if (pmd_none(*pmd))
1010          *   goto pmd_none:
1011          */
1012         tophys  (r4,r3)
1013         l.lwz   r3,0x0(r4)              // get *pmd value
1014         l.sfne  r3,r0
1015         l.bnf   d_pmd_none
1016          l.addi r3,r0,0xffffe000        // PAGE_MASK
1017
1018 d_pmd_good:
1019         /*
1020          * pte = *pte_offset(pmd, daddr);
1021          */
1022         l.lwz   r4,0x0(r4)              // get **pmd value
1023         l.and   r4,r4,r3                // & PAGE_MASK
1024         l.srli  r2,r2,0xd               // >> PAGE_SHIFT, r2 == EEAR
1025         l.andi  r3,r2,0x7ff             // (1UL << PAGE_SHIFT - 2) - 1
1026         l.slli  r3,r3,0x2               // to get address << 2
1027         l.add   r3,r3,r4
1028         l.lwz   r3,0x0(r3)              // this is pte at last
1029         /*
1030          * if (!pte_present(pte))
1031          */
1032         l.andi  r4,r3,0x1
1033         l.sfne  r4,r0                   // is pte present
1034         l.bnf   d_pte_not_present
1035         l.addi  r4,r0,0xffffe3fa        // PAGE_MASK | DTLB_UP_CONVERT_MASK
1036         /*
1037          * fill DTLB TR register
1038          */
1039         l.and   r4,r3,r4                // apply the mask
1040         // Determine number of DMMU sets
1041         l.mfspr r2, r0, SPR_DMMUCFGR
1042         l.andi  r2, r2, SPR_DMMUCFGR_NTS
1043         l.srli  r2, r2, SPR_DMMUCFGR_NTS_OFF
1044         l.ori   r3, r0, 0x1
1045         l.sll   r3, r3, r2      // r3 = number DMMU sets DMMUCFGR
1046         l.addi  r2, r3, -1      // r2 = nsets mask
1047         l.mfspr r3, r0, SPR_EEAR_BASE
1048         l.srli  r3, r3, 0xd     // >> PAGE_SHIFT
1049         l.and   r2, r3, r2      // calc offset:  & (NUM_TLB_ENTRIES-1)
1050                                                            //NUM_TLB_ENTRIES
1051         l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
1052         /*
1053          * fill DTLB MR register
1054          */
1055         l.slli  r3, r3, 0xd             /* << PAGE_SHIFT => EA & PAGE_MASK */
1056         l.ori   r4,r3,0x1               // set hardware valid bit: DTBL_MR entry
1057         l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
1058
1059         EXCEPTION_LOAD_GPR2
1060         EXCEPTION_LOAD_GPR3
1061         EXCEPTION_LOAD_GPR4
1062         l.rfe
1063 d_pmd_none:
1064 d_pte_not_present:
1065         EXCEPTION_LOAD_GPR2
1066         EXCEPTION_LOAD_GPR3
1067         EXCEPTION_LOAD_GPR4
1068         EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
1069
1070 /* ==============================================[ ITLB miss handler ]=== */
1071 ENTRY(itlb_miss_handler)
1072         EXCEPTION_STORE_GPR2
1073         EXCEPTION_STORE_GPR3
1074         EXCEPTION_STORE_GPR4
1075         /*
1076          * get EA of the miss
1077          */
1078         l.mfspr r2,r0,SPR_EEAR_BASE
1079
1080         /*
1081          * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
1082          *
1083          */
1084         GET_CURRENT_PGD(r3,r4)          // r3 is current_pgd, r5 is temp
1085         l.srli  r4,r2,0x18              // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
1086         l.slli  r4,r4,0x2               // to get address << 2
1087         l.add   r3,r4,r3                // r4 is pgd_index(daddr)
1088         /*
1089          * if (pmd_none(*pmd))
1090          *   goto pmd_none:
1091          */
1092         tophys  (r4,r3)
1093         l.lwz   r3,0x0(r4)              // get *pmd value
1094         l.sfne  r3,r0
1095         l.bnf   i_pmd_none
1096          l.addi r3,r0,0xffffe000        // PAGE_MASK
1097
1098 i_pmd_good:
1099         /*
1100          * pte = *pte_offset(pmd, iaddr);
1101          *
1102          */
1103         l.lwz   r4,0x0(r4)              // get **pmd value
1104         l.and   r4,r4,r3                // & PAGE_MASK
1105         l.srli  r2,r2,0xd               // >> PAGE_SHIFT, r2 == EEAR
1106         l.andi  r3,r2,0x7ff             // (1UL << PAGE_SHIFT - 2) - 1
1107         l.slli  r3,r3,0x2               // to get address << 2
1108         l.add   r3,r3,r4
1109         l.lwz   r3,0x0(r3)              // this is pte at last
1110         /*
1111          * if (!pte_present(pte))
1112          *
1113          */
1114         l.andi  r4,r3,0x1
1115         l.sfne  r4,r0                   // is pte present
1116         l.bnf   i_pte_not_present
1117          l.addi r4,r0,0xffffe03a        // PAGE_MASK | ITLB_UP_CONVERT_MASK
1118         /*
1119          * fill ITLB TR register
1120          */
1121         l.and   r4,r3,r4                // apply the mask
1122         l.andi  r3,r3,0x7c0             // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE |  _PAGE_URE | _PAGE_UWE
1123         l.sfeq  r3,r0
1124         l.bf    itlb_tr_fill //_workaround
1125         // Determine number of IMMU sets
1126         l.mfspr r2, r0, SPR_IMMUCFGR
1127         l.andi  r2, r2, SPR_IMMUCFGR_NTS
1128         l.srli  r2, r2, SPR_IMMUCFGR_NTS_OFF
1129         l.ori   r3, r0, 0x1
1130         l.sll   r3, r3, r2      // r3 = number IMMU sets IMMUCFGR
1131         l.addi  r2, r3, -1      // r2 = nsets mask
1132         l.mfspr r3, r0, SPR_EEAR_BASE
1133         l.srli  r3, r3, 0xd     // >> PAGE_SHIFT
1134         l.and   r2, r3, r2      // calc offset:  & (NUM_TLB_ENTRIES-1)
1135
1136 /*
1137  * __PHX__ :: fixme
1138  * we should not just blindly set executable flags,
1139  * but it does help with ping. the clean way would be to find out
1140  * (and fix it) why stack doesn't have execution permissions
1141  */
1142
1143 itlb_tr_fill_workaround:
1144         l.ori   r4,r4,0xc0              // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
1145 itlb_tr_fill:
1146         l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
1147         /*
1148          * fill DTLB MR register
1149          */
1150         l.slli  r3, r3, 0xd             /* << PAGE_SHIFT => EA & PAGE_MASK */
1151         l.ori   r4,r3,0x1               // set hardware valid bit: ITBL_MR entry
1152         l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
1153
1154         EXCEPTION_LOAD_GPR2
1155         EXCEPTION_LOAD_GPR3
1156         EXCEPTION_LOAD_GPR4
1157         l.rfe
1158
1159 i_pmd_none:
1160 i_pte_not_present:
1161         EXCEPTION_LOAD_GPR2
1162         EXCEPTION_LOAD_GPR3
1163         EXCEPTION_LOAD_GPR4
1164         EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
1165
1166 /* ==============================================[ boot tlb handlers ]=== */
1167
1168
1169 /* =================================================[ debugging aids ]=== */
1170
1171         .align 64
1172 _immu_trampoline:
1173         .space 64
1174 _immu_trampoline_top:
1175
1176 #define TRAMP_SLOT_0            (0x0)
1177 #define TRAMP_SLOT_1            (0x4)
1178 #define TRAMP_SLOT_2            (0x8)
1179 #define TRAMP_SLOT_3            (0xc)
1180 #define TRAMP_SLOT_4            (0x10)
1181 #define TRAMP_SLOT_5            (0x14)
1182 #define TRAMP_FRAME_SIZE        (0x18)
1183
1184 ENTRY(_immu_trampoline_workaround)
1185         // r2 EEA
1186         // r6 is physical EEA
1187         tophys(r6,r2)
1188
1189         LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1190         tophys  (r3,r5)                 // r3 is trampoline (physical)
1191
1192         LOAD_SYMBOL_2_GPR(r4,0x15000000)
1193         l.sw    TRAMP_SLOT_0(r3),r4
1194         l.sw    TRAMP_SLOT_1(r3),r4
1195         l.sw    TRAMP_SLOT_4(r3),r4
1196         l.sw    TRAMP_SLOT_5(r3),r4
1197
1198                                         // EPC = EEA - 0x4
1199         l.lwz   r4,0x0(r6)              // load op @ EEA + 0x0 (fc address)
1200         l.sw    TRAMP_SLOT_3(r3),r4     // store it to _immu_trampoline_data
1201         l.lwz   r4,-0x4(r6)             // load op @ EEA - 0x4 (f8 address)
1202         l.sw    TRAMP_SLOT_2(r3),r4     // store it to _immu_trampoline_data
1203
1204         l.srli  r5,r4,26                // check opcode for write access
1205         l.sfeqi r5,0                    // l.j
1206         l.bf    0f
1207         l.sfeqi r5,0x11                 // l.jr
1208         l.bf    1f
1209         l.sfeqi r5,1                    // l.jal
1210         l.bf    2f
1211         l.sfeqi r5,0x12                 // l.jalr
1212         l.bf    3f
1213         l.sfeqi r5,3                    // l.bnf
1214         l.bf    4f
1215         l.sfeqi r5,4                    // l.bf
1216         l.bf    5f
1217 99:
1218         l.nop
1219         l.j     99b                     // should never happen
1220         l.nop   1
1221
1222         // r2 is EEA
1223         // r3 is trampoline address (physical)
1224         // r4 is instruction
1225         // r6 is physical(EEA)
1226         //
1227         // r5
1228
1229 2:      // l.jal
1230
1231         /* 19 20 aa aa  l.movhi r9,0xaaaa
1232          * a9 29 bb bb  l.ori   r9,0xbbbb
1233          *
1234          * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1235          */
1236
1237         l.addi  r6,r2,0x4               // this is 0xaaaabbbb
1238
1239                                         // l.movhi r9,0xaaaa
1240         l.ori   r5,r0,0x1920            // 0x1920 == l.movhi r9
1241         l.sh    (TRAMP_SLOT_0+0x0)(r3),r5
1242         l.srli  r5,r6,16
1243         l.sh    (TRAMP_SLOT_0+0x2)(r3),r5
1244
1245                                         // l.ori   r9,0xbbbb
1246         l.ori   r5,r0,0xa929            // 0xa929 == l.ori r9
1247         l.sh    (TRAMP_SLOT_1+0x0)(r3),r5
1248         l.andi  r5,r6,0xffff
1249         l.sh    (TRAMP_SLOT_1+0x2)(r3),r5
1250
1251         /* falthrough, need to set up new jump offset */
1252
1253
1254 0:      // l.j
1255         l.slli  r6,r4,6                 // original offset shifted left 6 - 2
1256 //      l.srli  r6,r6,6                 // original offset shifted right 2
1257
1258         l.slli  r4,r2,4                 // old jump position: EEA shifted left 4
1259 //      l.srli  r4,r4,6                 // old jump position: shifted right 2
1260
1261         l.addi  r5,r3,0xc               // new jump position (physical)
1262         l.slli  r5,r5,4                 // new jump position: shifted left 4
1263
1264         // calculate new jump offset
1265         // new_off = old_off + (old_jump - new_jump)
1266
1267         l.sub   r5,r4,r5                // old_jump - new_jump
1268         l.add   r5,r6,r5                // orig_off + (old_jump - new_jump)
1269         l.srli  r5,r5,6                 // new offset shifted right 2
1270
1271         // r5 is new jump offset
1272                                         // l.j has opcode 0x0...
1273         l.sw    TRAMP_SLOT_2(r3),r5     // write it back
1274
1275         l.j     trampoline_out
1276         l.nop
1277
1278 /* ----------------------------- */
1279
1280 3:      // l.jalr
1281
1282         /* 19 20 aa aa  l.movhi r9,0xaaaa
1283          * a9 29 bb bb  l.ori   r9,0xbbbb
1284          *
1285          * where 0xaaaabbbb is EEA + 0x4 shifted right 2
1286          */
1287
1288         l.addi  r6,r2,0x4               // this is 0xaaaabbbb
1289
1290                                         // l.movhi r9,0xaaaa
1291         l.ori   r5,r0,0x1920            // 0x1920 == l.movhi r9
1292         l.sh    (TRAMP_SLOT_0+0x0)(r3),r5
1293         l.srli  r5,r6,16
1294         l.sh    (TRAMP_SLOT_0+0x2)(r3),r5
1295
1296                                         // l.ori   r9,0xbbbb
1297         l.ori   r5,r0,0xa929            // 0xa929 == l.ori r9
1298         l.sh    (TRAMP_SLOT_1+0x0)(r3),r5
1299         l.andi  r5,r6,0xffff
1300         l.sh    (TRAMP_SLOT_1+0x2)(r3),r5
1301
1302         l.lhz   r5,(TRAMP_SLOT_2+0x0)(r3)       // load hi part of jump instruction
1303         l.andi  r5,r5,0x3ff             // clear out opcode part
1304         l.ori   r5,r5,0x4400            // opcode changed from l.jalr -> l.jr
1305         l.sh    (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
1306
1307         /* falthrough */
1308
1309 1:      // l.jr
1310         l.j     trampoline_out
1311         l.nop
1312
1313 /* ----------------------------- */
1314
1315 4:      // l.bnf
1316 5:      // l.bf
1317         l.slli  r6,r4,6                 // original offset shifted left 6 - 2
1318 //      l.srli  r6,r6,6                 // original offset shifted right 2
1319
1320         l.slli  r4,r2,4                 // old jump position: EEA shifted left 4
1321 //      l.srli  r4,r4,6                 // old jump position: shifted right 2
1322
1323         l.addi  r5,r3,0xc               // new jump position (physical)
1324         l.slli  r5,r5,4                 // new jump position: shifted left 4
1325
1326         // calculate new jump offset
1327         // new_off = old_off + (old_jump - new_jump)
1328
1329         l.add   r6,r6,r4                // (orig_off + old_jump)
1330         l.sub   r6,r6,r5                // (orig_off + old_jump) - new_jump
1331         l.srli  r6,r6,6                 // new offset shifted right 2
1332
1333         // r6 is new jump offset
1334         l.lwz   r4,(TRAMP_SLOT_2+0x0)(r3)       // load jump instruction
1335         l.srli  r4,r4,16
1336         l.andi  r4,r4,0xfc00            // get opcode part
1337         l.slli  r4,r4,16
1338         l.or    r6,r4,r6                // l.b(n)f new offset
1339         l.sw    TRAMP_SLOT_2(r3),r6     // write it back
1340
1341         /* we need to add l.j to EEA + 0x8 */
1342         tophys  (r4,r2)                 // may not be needed (due to shifts down_
1343         l.addi  r4,r4,(0x8 - 0x8)       // jump target = r2 + 0x8 (compensate for 0x8)
1344                                         // jump position = r5 + 0x8 (0x8 compensated)
1345         l.sub   r4,r4,r5                // jump offset = target - new_position + 0x8
1346
1347         l.slli  r4,r4,4                 // the amount of info in imediate of jump
1348         l.srli  r4,r4,6                 // jump instruction with offset
1349         l.sw    TRAMP_SLOT_4(r3),r4     // write it to 4th slot
1350
1351         /* fallthrough */
1352
1353 trampoline_out:
1354         // set up new EPC to point to our trampoline code
1355         LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
1356         l.mtspr r0,r5,SPR_EPCR_BASE
1357
1358         // immu_trampoline is (4x) CACHE_LINE aligned
1359         // and only 6 instructions long,
1360         // so we need to invalidate only 2 lines
1361
1362         /* Establish cache block size
1363            If BS=0, 16;
1364            If BS=1, 32;
1365            r14 contain block size
1366         */
1367         l.mfspr r21,r0,SPR_ICCFGR
1368         l.andi  r21,r21,SPR_ICCFGR_CBS
1369         l.srli  r21,r21,7
1370         l.ori   r23,r0,16
1371         l.sll   r14,r23,r21
1372
1373         l.mtspr r0,r5,SPR_ICBIR
1374         l.add   r5,r5,r14
1375         l.mtspr r0,r5,SPR_ICBIR
1376
1377         l.jr    r9
1378         l.nop
1379
1380
1381 /*
1382  * DSCR: prints a string referenced by r3.
1383  *
1384  * PRMS: r3             - address of the first character of null
1385  *                      terminated string to be printed
1386  *
1387  * PREQ: UART at UART_BASE_ADD has to be initialized
1388  *
1389  * POST: caller should be aware that r3, r9 are changed
1390  */
1391 ENTRY(_emergency_print)
1392         EMERGENCY_PRINT_STORE_GPR4
1393         EMERGENCY_PRINT_STORE_GPR5
1394         EMERGENCY_PRINT_STORE_GPR6
1395         EMERGENCY_PRINT_STORE_GPR7
1396 2:
1397         l.lbz   r7,0(r3)
1398         l.sfeq  r7,r0
1399         l.bf    9f
1400         l.nop
1401
1402 // putc:
1403         l.movhi r4,hi(UART_BASE_ADD)
1404
1405         l.addi  r6,r0,0x20
1406 1:      l.lbz   r5,5(r4)
1407         l.andi  r5,r5,0x20
1408         l.sfeq  r5,r6
1409         l.bnf   1b
1410         l.nop
1411
1412         l.sb    0(r4),r7
1413
1414         l.addi  r6,r0,0x60
1415 1:      l.lbz   r5,5(r4)
1416         l.andi  r5,r5,0x60
1417         l.sfeq  r5,r6
1418         l.bnf   1b
1419         l.nop
1420
1421         /* next character */
1422         l.j     2b
1423         l.addi  r3,r3,0x1
1424
1425 9:
1426         EMERGENCY_PRINT_LOAD_GPR7
1427         EMERGENCY_PRINT_LOAD_GPR6
1428         EMERGENCY_PRINT_LOAD_GPR5
1429         EMERGENCY_PRINT_LOAD_GPR4
1430         l.jr    r9
1431         l.nop
1432
1433 ENTRY(_emergency_print_nr)
1434         EMERGENCY_PRINT_STORE_GPR4
1435         EMERGENCY_PRINT_STORE_GPR5
1436         EMERGENCY_PRINT_STORE_GPR6
1437         EMERGENCY_PRINT_STORE_GPR7
1438         EMERGENCY_PRINT_STORE_GPR8
1439
1440         l.addi  r8,r0,32                // shift register
1441
1442 1:      /* remove leading zeros */
1443         l.addi  r8,r8,-0x4
1444         l.srl   r7,r3,r8
1445         l.andi  r7,r7,0xf
1446
1447         /* don't skip the last zero if number == 0x0 */
1448         l.sfeqi r8,0x4
1449         l.bf    2f
1450         l.nop
1451
1452         l.sfeq  r7,r0
1453         l.bf    1b
1454         l.nop
1455
1456 2:
1457         l.srl   r7,r3,r8
1458
1459         l.andi  r7,r7,0xf
1460         l.sflts r8,r0
1461         l.bf    9f
1462
1463         l.sfgtui r7,0x9
1464         l.bnf   8f
1465         l.nop
1466         l.addi  r7,r7,0x27
1467
1468 8:
1469         l.addi  r7,r7,0x30
1470 // putc:
1471         l.movhi r4,hi(UART_BASE_ADD)
1472
1473         l.addi  r6,r0,0x20
1474 1:      l.lbz   r5,5(r4)
1475         l.andi  r5,r5,0x20
1476         l.sfeq  r5,r6
1477         l.bnf   1b
1478         l.nop
1479
1480         l.sb    0(r4),r7
1481
1482         l.addi  r6,r0,0x60
1483 1:      l.lbz   r5,5(r4)
1484         l.andi  r5,r5,0x60
1485         l.sfeq  r5,r6
1486         l.bnf   1b
1487         l.nop
1488
1489         /* next character */
1490         l.j     2b
1491         l.addi  r8,r8,-0x4
1492
1493 9:
1494         EMERGENCY_PRINT_LOAD_GPR8
1495         EMERGENCY_PRINT_LOAD_GPR7
1496         EMERGENCY_PRINT_LOAD_GPR6
1497         EMERGENCY_PRINT_LOAD_GPR5
1498         EMERGENCY_PRINT_LOAD_GPR4
1499         l.jr    r9
1500         l.nop
1501
1502
1503 /*
1504  * This should be used for debugging only.
1505  * It messes up the Linux early serial output
1506  * somehow, so use it sparingly and essentially
1507  * only if you need to debug something that goes wrong
1508  * before Linux gets the early serial going.
1509  *
1510  * Furthermore, you'll have to make sure you set the
1511  * UART_DEVISOR correctly according to the system
1512  * clock rate.
1513  *
1514  *
1515  */
1516
1517
1518
1519 #define SYS_CLK            20000000
1520 //#define SYS_CLK            1843200
1521 #define OR32_CONSOLE_BAUD  115200
1522 #define UART_DIVISOR       SYS_CLK/(16*OR32_CONSOLE_BAUD)
1523
1524 ENTRY(_early_uart_init)
1525         l.movhi r3,hi(UART_BASE_ADD)
1526
1527         l.addi  r4,r0,0x7
1528         l.sb    0x2(r3),r4
1529
1530         l.addi  r4,r0,0x0
1531         l.sb    0x1(r3),r4
1532
1533         l.addi  r4,r0,0x3
1534         l.sb    0x3(r3),r4
1535
1536         l.lbz   r5,3(r3)
1537         l.ori   r4,r5,0x80
1538         l.sb    0x3(r3),r4
1539         l.addi  r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
1540         l.sb    UART_DLM(r3),r4
1541         l.addi  r4,r0,((UART_DIVISOR) & 0x000000ff)
1542         l.sb    UART_DLL(r3),r4
1543         l.sb    0x3(r3),r5
1544
1545         l.jr    r9
1546         l.nop
1547
1548         .section .rodata
1549 _string_unhandled_exception:
1550         .string "\n\rRunarunaround: Unhandled exception 0x\0"
1551
1552 _string_epc_prefix:
1553         .string ": EPC=0x\0"
1554
1555 _string_nl:
1556         .string "\n\r\0"
1557
1558
1559 /* ========================================[ page aligned structures ]=== */
1560
1561 /*
1562  * .data section should be page aligned
1563  *      (look into arch/openrisc/kernel/vmlinux.lds.S)
1564  */
1565         .section .data,"aw"
1566         .align  8192
1567         .global  empty_zero_page
1568 empty_zero_page:
1569         .space  8192
1570
1571         .global  swapper_pg_dir
1572 swapper_pg_dir:
1573         .space  8192
1574
1575         .global _unhandled_stack
1576 _unhandled_stack:
1577         .space  8192
1578 _unhandled_stack_top:
1579
1580 /* ============================================================[ EOF ]=== */