1 /* arch/sparc64/kernel/traps.c
3 * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
8 * I like traps on v9, :))))
11 #include <linux/extable.h>
12 #include <linux/sched/mm.h>
13 #include <linux/sched/debug.h>
14 #include <linux/linkage.h>
15 #include <linux/kernel.h>
16 #include <linux/signal.h>
17 #include <linux/smp.h>
19 #include <linux/init.h>
20 #include <linux/kdebug.h>
21 #include <linux/ftrace.h>
22 #include <linux/reboot.h>
23 #include <linux/gfp.h>
24 #include <linux/context_tracking.h>
27 #include <asm/delay.h>
28 #include <asm/ptrace.h>
29 #include <asm/oplib.h>
31 #include <asm/pgtable.h>
32 #include <asm/unistd.h>
33 #include <linux/uaccess.h>
34 #include <asm/fpumacro.h>
37 #include <asm/estate.h>
38 #include <asm/chafsr.h>
39 #include <asm/sfafsr.h>
40 #include <asm/psrcompat.h>
41 #include <asm/processor.h>
42 #include <asm/timer.h>
45 #include <asm/memctrl.h>
46 #include <asm/cacheflush.h>
47 #include <asm/setup.h>
53 /* When an irrecoverable trap occurs at tl > 0, the trap entry
54 * code logs the trap state registers at every level in the trap
55 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
68 static void dump_tl1_traplog(struct tl1_traplog *p)
72 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
73 "dumping track stack.\n", p->tl);
75 limit = (tlb_type == hypervisor) ? 2 : 4;
76 for (i = 0; i < limit; i++) {
78 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
79 "TNPC[%016lx] TT[%lx]\n",
81 p->trapstack[i].tstate, p->trapstack[i].tpc,
82 p->trapstack[i].tnpc, p->trapstack[i].tt);
83 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
87 void bad_trap(struct pt_regs *regs, long lvl)
92 if (notify_die(DIE_TRAP, "bad trap", regs,
93 0, lvl, SIGTRAP) == NOTIFY_STOP)
97 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
98 die_if_kernel(buffer, regs);
102 if (regs->tstate & TSTATE_PRIV) {
103 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
104 die_if_kernel(buffer, regs);
106 if (test_thread_flag(TIF_32BIT)) {
107 regs->tpc &= 0xffffffff;
108 regs->tnpc &= 0xffffffff;
110 info.si_signo = SIGILL;
112 info.si_code = ILL_ILLTRP;
113 info.si_addr = (void __user *)regs->tpc;
114 info.si_trapno = lvl;
115 force_sig_info(SIGILL, &info, current);
118 void bad_trap_tl1(struct pt_regs *regs, long lvl)
122 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
123 0, lvl, SIGTRAP) == NOTIFY_STOP)
126 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
128 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
129 die_if_kernel (buffer, regs);
132 #ifdef CONFIG_DEBUG_BUGVERBOSE
133 void do_BUG(const char *file, int line)
136 printk("kernel BUG at %s:%d!\n", file, line);
138 EXPORT_SYMBOL(do_BUG);
141 static DEFINE_SPINLOCK(dimm_handler_lock);
142 static dimm_printer_t dimm_handler;
144 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
149 spin_lock_irqsave(&dimm_handler_lock, flags);
151 ret = dimm_handler(synd_code, paddr, buf, buflen);
152 } else if (tlb_type == spitfire) {
153 if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
159 spin_unlock_irqrestore(&dimm_handler_lock, flags);
164 int register_dimm_printer(dimm_printer_t func)
169 spin_lock_irqsave(&dimm_handler_lock, flags);
174 spin_unlock_irqrestore(&dimm_handler_lock, flags);
178 EXPORT_SYMBOL_GPL(register_dimm_printer);
180 void unregister_dimm_printer(dimm_printer_t func)
184 spin_lock_irqsave(&dimm_handler_lock, flags);
185 if (dimm_handler == func)
187 spin_unlock_irqrestore(&dimm_handler_lock, flags);
189 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
191 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
193 enum ctx_state prev_state = exception_enter();
196 if (notify_die(DIE_TRAP, "instruction access exception", regs,
197 0, 0x8, SIGTRAP) == NOTIFY_STOP)
200 if (regs->tstate & TSTATE_PRIV) {
201 printk("spitfire_insn_access_exception: SFSR[%016lx] "
202 "SFAR[%016lx], going.\n", sfsr, sfar);
203 die_if_kernel("Iax", regs);
205 if (test_thread_flag(TIF_32BIT)) {
206 regs->tpc &= 0xffffffff;
207 regs->tnpc &= 0xffffffff;
209 info.si_signo = SIGSEGV;
211 info.si_code = SEGV_MAPERR;
212 info.si_addr = (void __user *)regs->tpc;
214 force_sig_info(SIGSEGV, &info, current);
216 exception_exit(prev_state);
219 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
221 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
222 0, 0x8, SIGTRAP) == NOTIFY_STOP)
225 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
226 spitfire_insn_access_exception(regs, sfsr, sfar);
229 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
231 unsigned short type = (type_ctx >> 16);
232 unsigned short ctx = (type_ctx & 0xffff);
235 if (notify_die(DIE_TRAP, "instruction access exception", regs,
236 0, 0x8, SIGTRAP) == NOTIFY_STOP)
239 if (regs->tstate & TSTATE_PRIV) {
240 printk("sun4v_insn_access_exception: ADDR[%016lx] "
241 "CTX[%04x] TYPE[%04x], going.\n",
243 die_if_kernel("Iax", regs);
246 if (test_thread_flag(TIF_32BIT)) {
247 regs->tpc &= 0xffffffff;
248 regs->tnpc &= 0xffffffff;
250 info.si_signo = SIGSEGV;
252 info.si_code = SEGV_MAPERR;
253 info.si_addr = (void __user *) addr;
255 force_sig_info(SIGSEGV, &info, current);
258 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
260 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
261 0, 0x8, SIGTRAP) == NOTIFY_STOP)
264 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
265 sun4v_insn_access_exception(regs, addr, type_ctx);
268 bool is_no_fault_exception(struct pt_regs *regs)
273 if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT)
277 * Must do a little instruction decoding here in order to
278 * decide on a course of action. The bits of interest are:
279 * insn[31:30] = op, where 3 indicates the load/store group
280 * insn[24:19] = op3, which identifies individual opcodes
281 * insn[13] indicates an immediate offset
282 * op3[4]=1 identifies alternate space instructions
283 * op3[5:4]=3 identifies floating point instructions
284 * op3[2]=1 identifies stores
285 * See "Opcode Maps" in the appendix of any Sparc V9
286 * architecture spec for full details.
288 if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */
289 if (insn & 0x2000) /* immediate offset */
290 asi = (regs->tstate >> 24); /* saved %asi */
292 asi = (insn >> 5); /* immediate asi */
293 if ((asi & 0xf6) == ASI_PNF) {
294 if (insn & 0x200000) /* op3[2], stores */
296 if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
297 handle_ldf_stq(insn, regs);
299 handle_ld_nf(insn, regs);
306 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
308 enum ctx_state prev_state = exception_enter();
311 if (notify_die(DIE_TRAP, "data access exception", regs,
312 0, 0x30, SIGTRAP) == NOTIFY_STOP)
315 if (regs->tstate & TSTATE_PRIV) {
316 /* Test if this comes from uaccess places. */
317 const struct exception_table_entry *entry;
319 entry = search_exception_tables(regs->tpc);
321 /* Ouch, somebody is trying VM hole tricks on us... */
322 #ifdef DEBUG_EXCEPTIONS
323 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
324 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
325 regs->tpc, entry->fixup);
327 regs->tpc = entry->fixup;
328 regs->tnpc = regs->tpc + 4;
332 printk("spitfire_data_access_exception: SFSR[%016lx] "
333 "SFAR[%016lx], going.\n", sfsr, sfar);
334 die_if_kernel("Dax", regs);
337 if (is_no_fault_exception(regs))
340 info.si_signo = SIGSEGV;
342 info.si_code = SEGV_MAPERR;
343 info.si_addr = (void __user *)sfar;
345 force_sig_info(SIGSEGV, &info, current);
347 exception_exit(prev_state);
350 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
352 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
353 0, 0x30, SIGTRAP) == NOTIFY_STOP)
356 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
357 spitfire_data_access_exception(regs, sfsr, sfar);
360 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
362 unsigned short type = (type_ctx >> 16);
363 unsigned short ctx = (type_ctx & 0xffff);
366 if (notify_die(DIE_TRAP, "data access exception", regs,
367 0, 0x8, SIGTRAP) == NOTIFY_STOP)
370 if (regs->tstate & TSTATE_PRIV) {
371 /* Test if this comes from uaccess places. */
372 const struct exception_table_entry *entry;
374 entry = search_exception_tables(regs->tpc);
376 /* Ouch, somebody is trying VM hole tricks on us... */
377 #ifdef DEBUG_EXCEPTIONS
378 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
379 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
380 regs->tpc, entry->fixup);
382 regs->tpc = entry->fixup;
383 regs->tnpc = regs->tpc + 4;
386 printk("sun4v_data_access_exception: ADDR[%016lx] "
387 "CTX[%04x] TYPE[%04x], going.\n",
389 die_if_kernel("Dax", regs);
392 if (test_thread_flag(TIF_32BIT)) {
393 regs->tpc &= 0xffffffff;
394 regs->tnpc &= 0xffffffff;
396 if (is_no_fault_exception(regs))
399 info.si_signo = SIGSEGV;
401 info.si_code = SEGV_MAPERR;
402 info.si_addr = (void __user *) addr;
404 force_sig_info(SIGSEGV, &info, current);
407 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
409 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
410 0, 0x8, SIGTRAP) == NOTIFY_STOP)
413 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
414 sun4v_data_access_exception(regs, addr, type_ctx);
418 #include "pci_impl.h"
421 /* When access exceptions happen, we must do this. */
422 static void spitfire_clean_and_reenable_l1_caches(void)
426 if (tlb_type != spitfire)
430 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
431 spitfire_put_icache_tag(va, 0x0);
432 spitfire_put_dcache_tag(va, 0x0);
435 /* Re-enable in LSU. */
436 __asm__ __volatile__("flush %%g6\n\t"
438 "stxa %0, [%%g0] %1\n\t"
441 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
442 LSU_CONTROL_IM | LSU_CONTROL_DM),
443 "i" (ASI_LSU_CONTROL)
447 static void spitfire_enable_estate_errors(void)
449 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
452 : "r" (ESTATE_ERR_ALL),
453 "i" (ASI_ESTATE_ERROR_EN));
456 static char ecc_syndrome_table[] = {
457 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
458 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
459 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
460 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
461 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
462 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
463 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
464 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
465 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
466 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
467 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
468 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
469 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
470 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
471 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
472 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
473 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
474 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
475 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
476 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
477 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
478 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
479 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
480 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
481 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
482 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
483 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
484 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
485 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
486 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
487 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
488 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
491 static char *syndrome_unknown = "<Unknown>";
493 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
495 unsigned short scode;
496 char memmod_str[64], *p;
499 scode = ecc_syndrome_table[udbl & 0xff];
500 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
501 p = syndrome_unknown;
504 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
505 "Memory Module \"%s\"\n",
506 smp_processor_id(), scode, p);
510 scode = ecc_syndrome_table[udbh & 0xff];
511 if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
512 p = syndrome_unknown;
515 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
516 "Memory Module \"%s\"\n",
517 smp_processor_id(), scode, p);
522 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
525 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
526 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
527 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
529 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
531 /* We always log it, even if someone is listening for this
534 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
535 0, TRAP_TYPE_CEE, SIGTRAP);
537 /* The Correctable ECC Error trap does not disable I/D caches. So
538 * we only have to restore the ESTATE Error Enable register.
540 spitfire_enable_estate_errors();
543 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
547 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
548 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
549 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
551 /* XXX add more human friendly logging of the error status
552 * XXX as is implemented for cheetah
555 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
557 /* We always log it, even if someone is listening for this
560 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
563 if (regs->tstate & TSTATE_PRIV) {
565 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
566 die_if_kernel("UE", regs);
569 /* XXX need more intelligent processing here, such as is implemented
570 * XXX for cheetah errors, in fact if the E-cache still holds the
571 * XXX line with bad parity this will loop
574 spitfire_clean_and_reenable_l1_caches();
575 spitfire_enable_estate_errors();
577 if (test_thread_flag(TIF_32BIT)) {
578 regs->tpc &= 0xffffffff;
579 regs->tnpc &= 0xffffffff;
581 info.si_signo = SIGBUS;
583 info.si_code = BUS_OBJERR;
584 info.si_addr = (void *)0;
586 force_sig_info(SIGBUS, &info, current);
589 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
591 unsigned long afsr, tt, udbh, udbl;
594 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
595 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
596 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
597 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
598 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
601 if (tt == TRAP_TYPE_DAE &&
602 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
603 spitfire_clean_and_reenable_l1_caches();
604 spitfire_enable_estate_errors();
606 pci_poke_faulted = 1;
607 regs->tnpc = regs->tpc + 4;
612 if (afsr & SFAFSR_UE)
613 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
615 if (tt == TRAP_TYPE_CEE) {
616 /* Handle the case where we took a CEE trap, but ACK'd
617 * only the UE state in the UDB error registers.
619 if (afsr & SFAFSR_UE) {
620 if (udbh & UDBE_CE) {
621 __asm__ __volatile__(
622 "stxa %0, [%1] %2\n\t"
625 : "r" (udbh & UDBE_CE),
626 "r" (0x0), "i" (ASI_UDB_ERROR_W));
628 if (udbl & UDBE_CE) {
629 __asm__ __volatile__(
630 "stxa %0, [%1] %2\n\t"
633 : "r" (udbl & UDBE_CE),
634 "r" (0x18), "i" (ASI_UDB_ERROR_W));
638 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
642 int cheetah_pcache_forced_on;
644 void cheetah_enable_pcache(void)
648 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
651 __asm__ __volatile__("ldxa [%%g0] %1, %0"
653 : "i" (ASI_DCU_CONTROL_REG));
654 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
655 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
658 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
661 /* Cheetah error trap handling. */
662 static unsigned long ecache_flush_physbase;
663 static unsigned long ecache_flush_linesize;
664 static unsigned long ecache_flush_size;
666 /* This table is ordered in priority of errors and matches the
667 * AFAR overwrite policy as well.
670 struct afsr_error_table {
675 static const char CHAFSR_PERR_msg[] =
676 "System interface protocol error";
677 static const char CHAFSR_IERR_msg[] =
678 "Internal processor error";
679 static const char CHAFSR_ISAP_msg[] =
680 "System request parity error on incoming address";
681 static const char CHAFSR_UCU_msg[] =
682 "Uncorrectable E-cache ECC error for ifetch/data";
683 static const char CHAFSR_UCC_msg[] =
684 "SW Correctable E-cache ECC error for ifetch/data";
685 static const char CHAFSR_UE_msg[] =
686 "Uncorrectable system bus data ECC error for read";
687 static const char CHAFSR_EDU_msg[] =
688 "Uncorrectable E-cache ECC error for stmerge/blkld";
689 static const char CHAFSR_EMU_msg[] =
690 "Uncorrectable system bus MTAG error";
691 static const char CHAFSR_WDU_msg[] =
692 "Uncorrectable E-cache ECC error for writeback";
693 static const char CHAFSR_CPU_msg[] =
694 "Uncorrectable ECC error for copyout";
695 static const char CHAFSR_CE_msg[] =
696 "HW corrected system bus data ECC error for read";
697 static const char CHAFSR_EDC_msg[] =
698 "HW corrected E-cache ECC error for stmerge/blkld";
699 static const char CHAFSR_EMC_msg[] =
700 "HW corrected system bus MTAG ECC error";
701 static const char CHAFSR_WDC_msg[] =
702 "HW corrected E-cache ECC error for writeback";
703 static const char CHAFSR_CPC_msg[] =
704 "HW corrected ECC error for copyout";
705 static const char CHAFSR_TO_msg[] =
706 "Unmapped error from system bus";
707 static const char CHAFSR_BERR_msg[] =
708 "Bus error response from system bus";
709 static const char CHAFSR_IVC_msg[] =
710 "HW corrected system bus data ECC error for ivec read";
711 static const char CHAFSR_IVU_msg[] =
712 "Uncorrectable system bus data ECC error for ivec read";
713 static struct afsr_error_table __cheetah_error_table[] = {
714 { CHAFSR_PERR, CHAFSR_PERR_msg },
715 { CHAFSR_IERR, CHAFSR_IERR_msg },
716 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
717 { CHAFSR_UCU, CHAFSR_UCU_msg },
718 { CHAFSR_UCC, CHAFSR_UCC_msg },
719 { CHAFSR_UE, CHAFSR_UE_msg },
720 { CHAFSR_EDU, CHAFSR_EDU_msg },
721 { CHAFSR_EMU, CHAFSR_EMU_msg },
722 { CHAFSR_WDU, CHAFSR_WDU_msg },
723 { CHAFSR_CPU, CHAFSR_CPU_msg },
724 { CHAFSR_CE, CHAFSR_CE_msg },
725 { CHAFSR_EDC, CHAFSR_EDC_msg },
726 { CHAFSR_EMC, CHAFSR_EMC_msg },
727 { CHAFSR_WDC, CHAFSR_WDC_msg },
728 { CHAFSR_CPC, CHAFSR_CPC_msg },
729 { CHAFSR_TO, CHAFSR_TO_msg },
730 { CHAFSR_BERR, CHAFSR_BERR_msg },
731 /* These two do not update the AFAR. */
732 { CHAFSR_IVC, CHAFSR_IVC_msg },
733 { CHAFSR_IVU, CHAFSR_IVU_msg },
736 static const char CHPAFSR_DTO_msg[] =
737 "System bus unmapped error for prefetch/storequeue-read";
738 static const char CHPAFSR_DBERR_msg[] =
739 "System bus error for prefetch/storequeue-read";
740 static const char CHPAFSR_THCE_msg[] =
741 "Hardware corrected E-cache Tag ECC error";
742 static const char CHPAFSR_TSCE_msg[] =
743 "SW handled correctable E-cache Tag ECC error";
744 static const char CHPAFSR_TUE_msg[] =
745 "Uncorrectable E-cache Tag ECC error";
746 static const char CHPAFSR_DUE_msg[] =
747 "System bus uncorrectable data ECC error due to prefetch/store-fill";
748 static struct afsr_error_table __cheetah_plus_error_table[] = {
749 { CHAFSR_PERR, CHAFSR_PERR_msg },
750 { CHAFSR_IERR, CHAFSR_IERR_msg },
751 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
752 { CHAFSR_UCU, CHAFSR_UCU_msg },
753 { CHAFSR_UCC, CHAFSR_UCC_msg },
754 { CHAFSR_UE, CHAFSR_UE_msg },
755 { CHAFSR_EDU, CHAFSR_EDU_msg },
756 { CHAFSR_EMU, CHAFSR_EMU_msg },
757 { CHAFSR_WDU, CHAFSR_WDU_msg },
758 { CHAFSR_CPU, CHAFSR_CPU_msg },
759 { CHAFSR_CE, CHAFSR_CE_msg },
760 { CHAFSR_EDC, CHAFSR_EDC_msg },
761 { CHAFSR_EMC, CHAFSR_EMC_msg },
762 { CHAFSR_WDC, CHAFSR_WDC_msg },
763 { CHAFSR_CPC, CHAFSR_CPC_msg },
764 { CHAFSR_TO, CHAFSR_TO_msg },
765 { CHAFSR_BERR, CHAFSR_BERR_msg },
766 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
767 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
768 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
769 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
770 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
771 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
772 /* These two do not update the AFAR. */
773 { CHAFSR_IVC, CHAFSR_IVC_msg },
774 { CHAFSR_IVU, CHAFSR_IVU_msg },
777 static const char JPAFSR_JETO_msg[] =
778 "System interface protocol error, hw timeout caused";
779 static const char JPAFSR_SCE_msg[] =
780 "Parity error on system snoop results";
781 static const char JPAFSR_JEIC_msg[] =
782 "System interface protocol error, illegal command detected";
783 static const char JPAFSR_JEIT_msg[] =
784 "System interface protocol error, illegal ADTYPE detected";
785 static const char JPAFSR_OM_msg[] =
786 "Out of range memory error has occurred";
787 static const char JPAFSR_ETP_msg[] =
788 "Parity error on L2 cache tag SRAM";
789 static const char JPAFSR_UMS_msg[] =
790 "Error due to unsupported store";
791 static const char JPAFSR_RUE_msg[] =
792 "Uncorrectable ECC error from remote cache/memory";
793 static const char JPAFSR_RCE_msg[] =
794 "Correctable ECC error from remote cache/memory";
795 static const char JPAFSR_BP_msg[] =
796 "JBUS parity error on returned read data";
797 static const char JPAFSR_WBP_msg[] =
798 "JBUS parity error on data for writeback or block store";
799 static const char JPAFSR_FRC_msg[] =
800 "Foreign read to DRAM incurring correctable ECC error";
801 static const char JPAFSR_FRU_msg[] =
802 "Foreign read to DRAM incurring uncorrectable ECC error";
803 static struct afsr_error_table __jalapeno_error_table[] = {
804 { JPAFSR_JETO, JPAFSR_JETO_msg },
805 { JPAFSR_SCE, JPAFSR_SCE_msg },
806 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
807 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
808 { CHAFSR_PERR, CHAFSR_PERR_msg },
809 { CHAFSR_IERR, CHAFSR_IERR_msg },
810 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
811 { CHAFSR_UCU, CHAFSR_UCU_msg },
812 { CHAFSR_UCC, CHAFSR_UCC_msg },
813 { CHAFSR_UE, CHAFSR_UE_msg },
814 { CHAFSR_EDU, CHAFSR_EDU_msg },
815 { JPAFSR_OM, JPAFSR_OM_msg },
816 { CHAFSR_WDU, CHAFSR_WDU_msg },
817 { CHAFSR_CPU, CHAFSR_CPU_msg },
818 { CHAFSR_CE, CHAFSR_CE_msg },
819 { CHAFSR_EDC, CHAFSR_EDC_msg },
820 { JPAFSR_ETP, JPAFSR_ETP_msg },
821 { CHAFSR_WDC, CHAFSR_WDC_msg },
822 { CHAFSR_CPC, CHAFSR_CPC_msg },
823 { CHAFSR_TO, CHAFSR_TO_msg },
824 { CHAFSR_BERR, CHAFSR_BERR_msg },
825 { JPAFSR_UMS, JPAFSR_UMS_msg },
826 { JPAFSR_RUE, JPAFSR_RUE_msg },
827 { JPAFSR_RCE, JPAFSR_RCE_msg },
828 { JPAFSR_BP, JPAFSR_BP_msg },
829 { JPAFSR_WBP, JPAFSR_WBP_msg },
830 { JPAFSR_FRC, JPAFSR_FRC_msg },
831 { JPAFSR_FRU, JPAFSR_FRU_msg },
832 /* These two do not update the AFAR. */
833 { CHAFSR_IVU, CHAFSR_IVU_msg },
836 static struct afsr_error_table *cheetah_error_table;
837 static unsigned long cheetah_afsr_errors;
839 struct cheetah_err_info *cheetah_error_log;
841 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
843 struct cheetah_err_info *p;
844 int cpu = smp_processor_id();
846 if (!cheetah_error_log)
849 p = cheetah_error_log + (cpu * 2);
850 if ((afsr & CHAFSR_TL1) != 0UL)
856 extern unsigned int tl0_icpe[], tl1_icpe[];
857 extern unsigned int tl0_dcpe[], tl1_dcpe[];
858 extern unsigned int tl0_fecc[], tl1_fecc[];
859 extern unsigned int tl0_cee[], tl1_cee[];
860 extern unsigned int tl0_iae[], tl1_iae[];
861 extern unsigned int tl0_dae[], tl1_dae[];
862 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
863 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
864 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
865 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
866 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
868 void __init cheetah_ecache_flush_init(void)
870 unsigned long largest_size, smallest_linesize, order, ver;
873 /* Scan all cpu device tree nodes, note two values:
874 * 1) largest E-cache size
875 * 2) smallest E-cache line size
878 smallest_linesize = ~0UL;
880 for (i = 0; i < NR_CPUS; i++) {
883 val = cpu_data(i).ecache_size;
887 if (val > largest_size)
890 val = cpu_data(i).ecache_line_size;
891 if (val < smallest_linesize)
892 smallest_linesize = val;
896 if (largest_size == 0UL || smallest_linesize == ~0UL) {
897 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
902 ecache_flush_size = (2 * largest_size);
903 ecache_flush_linesize = smallest_linesize;
905 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
907 if (ecache_flush_physbase == ~0UL) {
908 prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
909 "contiguous physical memory.\n",
914 /* Now allocate error trap reporting scoreboard. */
915 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
916 for (order = 0; order < MAX_ORDER; order++) {
917 if ((PAGE_SIZE << order) >= sz)
920 cheetah_error_log = (struct cheetah_err_info *)
921 __get_free_pages(GFP_KERNEL, order);
922 if (!cheetah_error_log) {
923 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
924 "error logging scoreboard (%d bytes).\n", sz);
927 memset(cheetah_error_log, 0, PAGE_SIZE << order);
929 /* Mark all AFSRs as invalid so that the trap handler will
930 * log new new information there.
932 for (i = 0; i < 2 * NR_CPUS; i++)
933 cheetah_error_log[i].afsr = CHAFSR_INVALID;
935 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
936 if ((ver >> 32) == __JALAPENO_ID ||
937 (ver >> 32) == __SERRANO_ID) {
938 cheetah_error_table = &__jalapeno_error_table[0];
939 cheetah_afsr_errors = JPAFSR_ERRORS;
940 } else if ((ver >> 32) == 0x003e0015) {
941 cheetah_error_table = &__cheetah_plus_error_table[0];
942 cheetah_afsr_errors = CHPAFSR_ERRORS;
944 cheetah_error_table = &__cheetah_error_table[0];
945 cheetah_afsr_errors = CHAFSR_ERRORS;
948 /* Now patch trap tables. */
949 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
950 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
951 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
952 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
953 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
954 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
955 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
956 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
957 if (tlb_type == cheetah_plus) {
958 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
959 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
960 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
961 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
966 static void cheetah_flush_ecache(void)
968 unsigned long flush_base = ecache_flush_physbase;
969 unsigned long flush_linesize = ecache_flush_linesize;
970 unsigned long flush_size = ecache_flush_size;
972 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
973 " bne,pt %%xcc, 1b\n\t"
974 " ldxa [%2 + %0] %3, %%g0\n\t"
976 : "0" (flush_size), "r" (flush_base),
977 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
980 static void cheetah_flush_ecache_line(unsigned long physaddr)
984 physaddr &= ~(8UL - 1UL);
985 physaddr = (ecache_flush_physbase +
986 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
987 alias = physaddr + (ecache_flush_size >> 1UL);
988 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
989 "ldxa [%1] %2, %%g0\n\t"
992 : "r" (physaddr), "r" (alias),
993 "i" (ASI_PHYS_USE_EC));
996 /* Unfortunately, the diagnostic access to the I-cache tags we need to
997 * use to clear the thing interferes with I-cache coherency transactions.
999 * So we must only flush the I-cache when it is disabled.
1001 static void __cheetah_flush_icache(void)
1003 unsigned int icache_size, icache_line_size;
1006 icache_size = local_cpu_data().icache_size;
1007 icache_line_size = local_cpu_data().icache_line_size;
1009 /* Clear the valid bits in all the tags. */
1010 for (addr = 0; addr < icache_size; addr += icache_line_size) {
1011 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1014 : "r" (addr | (2 << 3)),
1019 static void cheetah_flush_icache(void)
1021 unsigned long dcu_save;
1023 /* Save current DCU, disable I-cache. */
1024 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1025 "or %0, %2, %%g1\n\t"
1026 "stxa %%g1, [%%g0] %1\n\t"
1029 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
1032 __cheetah_flush_icache();
1034 /* Restore DCU register */
1035 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1038 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
1041 static void cheetah_flush_dcache(void)
1043 unsigned int dcache_size, dcache_line_size;
1046 dcache_size = local_cpu_data().dcache_size;
1047 dcache_line_size = local_cpu_data().dcache_line_size;
1049 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1050 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1053 : "r" (addr), "i" (ASI_DCACHE_TAG));
1057 /* In order to make the even parity correct we must do two things.
1058 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1059 * Next, we clear out all 32-bytes of data for that line. Data of
1060 * all-zero + tag parity value of zero == correct parity.
1062 static void cheetah_plus_zap_dcache_parity(void)
1064 unsigned int dcache_size, dcache_line_size;
1067 dcache_size = local_cpu_data().dcache_size;
1068 dcache_line_size = local_cpu_data().dcache_line_size;
1070 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1071 unsigned long tag = (addr >> 14);
1074 __asm__ __volatile__("membar #Sync\n\t"
1075 "stxa %0, [%1] %2\n\t"
1078 : "r" (tag), "r" (addr),
1079 "i" (ASI_DCACHE_UTAG));
1080 for (line = addr; line < addr + dcache_line_size; line += 8)
1081 __asm__ __volatile__("membar #Sync\n\t"
1082 "stxa %%g0, [%0] %1\n\t"
1086 "i" (ASI_DCACHE_DATA));
1090 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1091 * something palatable to the memory controller driver get_unumber
1115 static unsigned char cheetah_ecc_syntab[] = {
1116 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1117 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1118 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1119 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1120 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1121 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1122 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1123 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1124 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1125 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1126 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1127 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1128 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1129 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1130 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1131 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1132 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1133 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1134 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1135 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1136 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1137 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1138 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1139 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1140 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1141 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1142 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1143 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1144 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1145 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1146 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1147 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1149 static unsigned char cheetah_mtag_syntab[] = {
1160 /* Return the highest priority error conditon mentioned. */
1161 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1163 unsigned long tmp = 0;
1166 for (i = 0; cheetah_error_table[i].mask; i++) {
1167 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1173 static const char *cheetah_get_string(unsigned long bit)
1177 for (i = 0; cheetah_error_table[i].mask; i++) {
1178 if ((bit & cheetah_error_table[i].mask) != 0UL)
1179 return cheetah_error_table[i].name;
1184 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1185 unsigned long afsr, unsigned long afar, int recoverable)
1187 unsigned long hipri;
1190 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1191 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1193 (afsr & CHAFSR_TL1) ? 1 : 0);
1194 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1195 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1196 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1197 printk("%s" "ERROR(%d): ",
1198 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1199 printk("TPC<%pS>\n", (void *) regs->tpc);
1200 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1201 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1203 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1204 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1205 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1206 hipri = cheetah_get_hipri(afsr);
1207 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1208 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1209 hipri, cheetah_get_string(hipri));
1211 /* Try to get unumber if relevant. */
1212 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1213 CHAFSR_CPC | CHAFSR_CPU | \
1214 CHAFSR_UE | CHAFSR_CE | \
1215 CHAFSR_EDC | CHAFSR_EDU | \
1216 CHAFSR_UCC | CHAFSR_UCU | \
1217 CHAFSR_WDU | CHAFSR_WDC)
1218 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1219 if (afsr & ESYND_ERRORS) {
1223 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1224 syndrome = cheetah_ecc_syntab[syndrome];
1225 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1227 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1228 (recoverable ? KERN_WARNING : KERN_CRIT),
1229 smp_processor_id(), unum);
1230 } else if (afsr & MSYND_ERRORS) {
1234 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1235 syndrome = cheetah_mtag_syntab[syndrome];
1236 ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1238 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1239 (recoverable ? KERN_WARNING : KERN_CRIT),
1240 smp_processor_id(), unum);
1243 /* Now dump the cache snapshots. */
1244 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1245 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1246 (int) info->dcache_index,
1250 printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1251 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1252 info->dcache_data[0],
1253 info->dcache_data[1],
1254 info->dcache_data[2],
1255 info->dcache_data[3]);
1256 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1257 "u[%016llx] l[%016llx]\n",
1258 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1259 (int) info->icache_index,
1264 info->icache_lower);
1265 printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1266 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1267 info->icache_data[0],
1268 info->icache_data[1],
1269 info->icache_data[2],
1270 info->icache_data[3]);
1271 printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1272 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1273 info->icache_data[4],
1274 info->icache_data[5],
1275 info->icache_data[6],
1276 info->icache_data[7]);
1277 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1278 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1279 (int) info->ecache_index, info->ecache_tag);
1280 printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1281 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1282 info->ecache_data[0],
1283 info->ecache_data[1],
1284 info->ecache_data[2],
1285 info->ecache_data[3]);
1287 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1288 while (afsr != 0UL) {
1289 unsigned long bit = cheetah_get_hipri(afsr);
1291 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1292 (recoverable ? KERN_WARNING : KERN_CRIT),
1293 bit, cheetah_get_string(bit));
1299 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1302 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1304 unsigned long afsr, afar;
1307 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1310 if ((afsr & cheetah_afsr_errors) != 0) {
1312 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1320 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1322 : : "r" (afsr), "i" (ASI_AFSR));
1327 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1329 struct cheetah_err_info local_snapshot, *p;
1333 cheetah_flush_ecache();
1335 p = cheetah_get_error_log(afsr);
1337 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1339 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1340 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1344 /* Grab snapshot of logged error. */
1345 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1347 /* If the current trap snapshot does not match what the
1348 * trap handler passed along into our args, big trouble.
1349 * In such a case, mark the local copy as invalid.
1351 * Else, it matches and we mark the afsr in the non-local
1352 * copy as invalid so we may log new error traps there.
1354 if (p->afsr != afsr || p->afar != afar)
1355 local_snapshot.afsr = CHAFSR_INVALID;
1357 p->afsr = CHAFSR_INVALID;
1359 cheetah_flush_icache();
1360 cheetah_flush_dcache();
1362 /* Re-enable I-cache/D-cache */
1363 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1364 "or %%g1, %1, %%g1\n\t"
1365 "stxa %%g1, [%%g0] %0\n\t"
1368 : "i" (ASI_DCU_CONTROL_REG),
1369 "i" (DCU_DC | DCU_IC)
1372 /* Re-enable error reporting */
1373 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1374 "or %%g1, %1, %%g1\n\t"
1375 "stxa %%g1, [%%g0] %0\n\t"
1378 : "i" (ASI_ESTATE_ERROR_EN),
1379 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1382 /* Decide if we can continue after handling this trap and
1383 * logging the error.
1386 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1389 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1390 * error was logged while we had error reporting traps disabled.
1392 if (cheetah_recheck_errors(&local_snapshot)) {
1393 unsigned long new_afsr = local_snapshot.afsr;
1395 /* If we got a new asynchronous error, die... */
1396 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1397 CHAFSR_WDU | CHAFSR_CPU |
1398 CHAFSR_IVU | CHAFSR_UE |
1399 CHAFSR_BERR | CHAFSR_TO))
1404 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1407 panic("Irrecoverable Fast-ECC error trap.\n");
1409 /* Flush E-cache to kick the error trap handlers out. */
1410 cheetah_flush_ecache();
1413 /* Try to fix a correctable error by pushing the line out from
1414 * the E-cache. Recheck error reporting registers to see if the
1415 * problem is intermittent.
1417 static int cheetah_fix_ce(unsigned long physaddr)
1419 unsigned long orig_estate;
1420 unsigned long alias1, alias2;
1423 /* Make sure correctable error traps are disabled. */
1424 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1425 "andn %0, %1, %%g1\n\t"
1426 "stxa %%g1, [%%g0] %2\n\t"
1428 : "=&r" (orig_estate)
1429 : "i" (ESTATE_ERROR_CEEN),
1430 "i" (ASI_ESTATE_ERROR_EN)
1433 /* We calculate alias addresses that will force the
1434 * cache line in question out of the E-cache. Then
1435 * we bring it back in with an atomic instruction so
1436 * that we get it in some modified/exclusive state,
1437 * then we displace it again to try and get proper ECC
1438 * pushed back into the system.
1440 physaddr &= ~(8UL - 1UL);
1441 alias1 = (ecache_flush_physbase +
1442 (physaddr & ((ecache_flush_size >> 1) - 1)));
1443 alias2 = alias1 + (ecache_flush_size >> 1);
1444 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1445 "ldxa [%1] %3, %%g0\n\t"
1446 "casxa [%2] %3, %%g0, %%g0\n\t"
1447 "ldxa [%0] %3, %%g0\n\t"
1448 "ldxa [%1] %3, %%g0\n\t"
1451 : "r" (alias1), "r" (alias2),
1452 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1454 /* Did that trigger another error? */
1455 if (cheetah_recheck_errors(NULL)) {
1456 /* Try one more time. */
1457 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1459 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1460 if (cheetah_recheck_errors(NULL))
1465 /* No new error, intermittent problem. */
1469 /* Restore error enables. */
1470 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1472 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1477 /* Return non-zero if PADDR is a valid physical memory address. */
1478 static int cheetah_check_main_memory(unsigned long paddr)
1480 unsigned long vaddr = PAGE_OFFSET + paddr;
1482 if (vaddr > (unsigned long) high_memory)
1485 return kern_addr_valid(vaddr);
1488 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1490 struct cheetah_err_info local_snapshot, *p;
1491 int recoverable, is_memory;
1493 p = cheetah_get_error_log(afsr);
1495 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1497 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1498 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1502 /* Grab snapshot of logged error. */
1503 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1505 /* If the current trap snapshot does not match what the
1506 * trap handler passed along into our args, big trouble.
1507 * In such a case, mark the local copy as invalid.
1509 * Else, it matches and we mark the afsr in the non-local
1510 * copy as invalid so we may log new error traps there.
1512 if (p->afsr != afsr || p->afar != afar)
1513 local_snapshot.afsr = CHAFSR_INVALID;
1515 p->afsr = CHAFSR_INVALID;
1517 is_memory = cheetah_check_main_memory(afar);
1519 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1520 /* XXX Might want to log the results of this operation
1521 * XXX somewhere... -DaveM
1523 cheetah_fix_ce(afar);
1527 int flush_all, flush_line;
1529 flush_all = flush_line = 0;
1530 if ((afsr & CHAFSR_EDC) != 0UL) {
1531 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1535 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1536 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1542 /* Trap handler only disabled I-cache, flush it. */
1543 cheetah_flush_icache();
1545 /* Re-enable I-cache */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1551 : "i" (ASI_DCU_CONTROL_REG),
1556 cheetah_flush_ecache();
1557 else if (flush_line)
1558 cheetah_flush_ecache_line(afar);
1561 /* Re-enable error reporting */
1562 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1563 "or %%g1, %1, %%g1\n\t"
1564 "stxa %%g1, [%%g0] %0\n\t"
1567 : "i" (ASI_ESTATE_ERROR_EN),
1568 "i" (ESTATE_ERROR_CEEN)
1571 /* Decide if we can continue after handling this trap and
1572 * logging the error.
1575 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1578 /* Re-check AFSR/AFAR */
1579 (void) cheetah_recheck_errors(&local_snapshot);
1582 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1585 panic("Irrecoverable Correctable-ECC error trap.\n");
1588 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1590 struct cheetah_err_info local_snapshot, *p;
1591 int recoverable, is_memory;
1594 /* Check for the special PCI poke sequence. */
1595 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1596 cheetah_flush_icache();
1597 cheetah_flush_dcache();
1599 /* Re-enable I-cache/D-cache */
1600 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1601 "or %%g1, %1, %%g1\n\t"
1602 "stxa %%g1, [%%g0] %0\n\t"
1605 : "i" (ASI_DCU_CONTROL_REG),
1606 "i" (DCU_DC | DCU_IC)
1609 /* Re-enable error reporting */
1610 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1611 "or %%g1, %1, %%g1\n\t"
1612 "stxa %%g1, [%%g0] %0\n\t"
1615 : "i" (ASI_ESTATE_ERROR_EN),
1616 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1619 (void) cheetah_recheck_errors(NULL);
1621 pci_poke_faulted = 1;
1623 regs->tnpc = regs->tpc + 4;
1628 p = cheetah_get_error_log(afsr);
1630 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1632 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1633 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1637 /* Grab snapshot of logged error. */
1638 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1640 /* If the current trap snapshot does not match what the
1641 * trap handler passed along into our args, big trouble.
1642 * In such a case, mark the local copy as invalid.
1644 * Else, it matches and we mark the afsr in the non-local
1645 * copy as invalid so we may log new error traps there.
1647 if (p->afsr != afsr || p->afar != afar)
1648 local_snapshot.afsr = CHAFSR_INVALID;
1650 p->afsr = CHAFSR_INVALID;
1652 is_memory = cheetah_check_main_memory(afar);
1655 int flush_all, flush_line;
1657 flush_all = flush_line = 0;
1658 if ((afsr & CHAFSR_EDU) != 0UL) {
1659 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1663 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1664 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1670 cheetah_flush_icache();
1671 cheetah_flush_dcache();
1673 /* Re-enable I/D caches */
1674 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1675 "or %%g1, %1, %%g1\n\t"
1676 "stxa %%g1, [%%g0] %0\n\t"
1679 : "i" (ASI_DCU_CONTROL_REG),
1680 "i" (DCU_IC | DCU_DC)
1684 cheetah_flush_ecache();
1685 else if (flush_line)
1686 cheetah_flush_ecache_line(afar);
1689 /* Re-enable error reporting */
1690 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1691 "or %%g1, %1, %%g1\n\t"
1692 "stxa %%g1, [%%g0] %0\n\t"
1695 : "i" (ASI_ESTATE_ERROR_EN),
1696 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1699 /* Decide if we can continue after handling this trap and
1700 * logging the error.
1703 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1706 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1707 * error was logged while we had error reporting traps disabled.
1709 if (cheetah_recheck_errors(&local_snapshot)) {
1710 unsigned long new_afsr = local_snapshot.afsr;
1712 /* If we got a new asynchronous error, die... */
1713 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1714 CHAFSR_WDU | CHAFSR_CPU |
1715 CHAFSR_IVU | CHAFSR_UE |
1716 CHAFSR_BERR | CHAFSR_TO))
1721 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1723 /* "Recoverable" here means we try to yank the page from ever
1724 * being newly used again. This depends upon a few things:
1725 * 1) Must be main memory, and AFAR must be valid.
1726 * 2) If we trapped from user, OK.
1727 * 3) Else, if we trapped from kernel we must find exception
1728 * table entry (ie. we have to have been accessing user
1731 * If AFAR is not in main memory, or we trapped from kernel
1732 * and cannot find an exception table entry, it is unacceptable
1733 * to try and continue.
1735 if (recoverable && is_memory) {
1736 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1737 /* OK, usermode access. */
1740 const struct exception_table_entry *entry;
1742 entry = search_exception_tables(regs->tpc);
1744 /* OK, kernel access to userspace. */
1748 /* BAD, privileged state is corrupted. */
1753 if (pfn_valid(afar >> PAGE_SHIFT))
1754 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1758 /* Only perform fixup if we still have a
1759 * recoverable condition.
1762 regs->tpc = entry->fixup;
1763 regs->tnpc = regs->tpc + 4;
1772 panic("Irrecoverable deferred error trap.\n");
1775 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1777 * Bit0: 0=dcache,1=icache
1778 * Bit1: 0=recoverable,1=unrecoverable
1780 * The hardware has disabled both the I-cache and D-cache in
1781 * the %dcr register.
1783 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1786 __cheetah_flush_icache();
1788 cheetah_plus_zap_dcache_parity();
1789 cheetah_flush_dcache();
1791 /* Re-enable I-cache/D-cache */
1792 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1793 "or %%g1, %1, %%g1\n\t"
1794 "stxa %%g1, [%%g0] %0\n\t"
1797 : "i" (ASI_DCU_CONTROL_REG),
1798 "i" (DCU_DC | DCU_IC)
1802 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1804 (type & 0x1) ? 'I' : 'D',
1806 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1807 panic("Irrecoverable Cheetah+ parity error.");
1810 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1812 (type & 0x1) ? 'I' : 'D',
1814 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1817 struct sun4v_error_entry {
1818 /* Unique error handle */
1819 /*0x00*/u64 err_handle;
1821 /* %stick value at the time of the error */
1822 /*0x08*/u64 err_stick;
1824 /*0x10*/u8 reserved_1[3];
1827 /*0x13*/u8 err_type;
1828 #define SUN4V_ERR_TYPE_UNDEFINED 0
1829 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1830 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1831 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1832 #define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4
1833 #define SUN4V_ERR_TYPE_DUMP_CORE 5
1834 #define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6
1835 #define SUN4V_ERR_TYPE_NUM 7
1837 /* Error attributes */
1838 /*0x14*/u32 err_attrs;
1839 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1840 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1841 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1842 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1843 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1844 #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020
1845 #define SUN4V_ERR_ATTRS_ASR 0x00000040
1846 #define SUN4V_ERR_ATTRS_ASI 0x00000080
1847 #define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100
1848 #define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600
1849 #define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9
1850 #define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000
1851 #define SUN4V_ERR_ATTRS_MODE_SHFT 24
1852 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1854 #define SUN4V_ERR_SPSTATE_FAULTED 0
1855 #define SUN4V_ERR_SPSTATE_AVAILABLE 1
1856 #define SUN4V_ERR_SPSTATE_NOT_PRESENT 2
1858 #define SUN4V_ERR_MODE_USER 1
1859 #define SUN4V_ERR_MODE_PRIV 2
1861 /* Real address of the memory region or PIO transaction */
1862 /*0x18*/u64 err_raddr;
1864 /* Size of the operation triggering the error, in bytes */
1865 /*0x20*/u32 err_size;
1868 /*0x24*/u16 err_cpu;
1870 /* Grace periof for shutdown, in seconds */
1871 /*0x26*/u16 err_secs;
1873 /* Value of the %asi register */
1876 /*0x29*/u8 reserved_2;
1878 /* Value of the ASR register number */
1879 /*0x2a*/u16 err_asr;
1880 #define SUN4V_ERR_ASR_VALID 0x8000
1882 /*0x2c*/u32 reserved_3;
1883 /*0x30*/u64 reserved_4;
1884 /*0x38*/u64 reserved_5;
1887 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1888 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1890 static const char *sun4v_err_type_to_str(u8 type)
1892 static const char *types[SUN4V_ERR_TYPE_NUM] = {
1894 "uncorrected resumable",
1895 "precise nonresumable",
1896 "deferred nonresumable",
1902 if (type < SUN4V_ERR_TYPE_NUM)
1908 static void sun4v_emit_err_attr_strings(u32 attrs)
1910 static const char *attr_names[] = {
1921 static const char *sp_states[] = {
1925 "sp-state-reserved",
1927 static const char *modes[] = {
1936 for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1937 if (attrs & (1U << i)) {
1938 const char *s = attr_names[i];
1944 sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1945 SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1946 pr_cont("%s ", sp_states[sp_state]);
1948 mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1949 SUN4V_ERR_ATTRS_MODE_SHFT);
1950 pr_cont("%s ", modes[mode]);
1952 if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1953 pr_cont("res-queue-full ");
1956 /* When the report contains a real-address of "-1" it means that the
1957 * hardware did not provide the address. So we compute the effective
1958 * address of the load or store instruction at regs->tpc and report
1959 * that. Usually when this happens it's a PIO and in such a case we
1960 * are using physical addresses with bypass ASIs anyways, so what we
1961 * report here is exactly what we want.
1963 static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1968 if (!(regs->tstate & TSTATE_PRIV))
1971 insn = *(unsigned int *) regs->tpc;
1973 addr = compute_effective_address(regs, insn, 0);
1975 printk("%s: insn effective address [0x%016llx]\n",
1979 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1980 int cpu, const char *pfx, atomic_t *ocnt)
1982 u64 *raw_ptr = (u64 *) ent;
1986 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1987 printk("%s: TPC [0x%016lx] <%pS>\n",
1988 pfx, regs->tpc, (void *) regs->tpc);
1990 printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1991 pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1992 printk("%s: %016llx:%016llx:%016llx:%016llx]\n",
1993 pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1995 printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1996 pfx, ent->err_handle, ent->err_stick);
1998 printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
2000 attrs = ent->err_attrs;
2001 printk("%s: attrs [0x%08x] < ", pfx, attrs);
2002 sun4v_emit_err_attr_strings(attrs);
2005 /* Various fields in the error report are only valid if
2006 * certain attribute bits are set.
2008 if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
2009 SUN4V_ERR_ATTRS_PIO |
2010 SUN4V_ERR_ATTRS_ASI)) {
2011 printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
2013 if (ent->err_raddr == ~(u64)0)
2014 sun4v_report_real_raddr(pfx, regs);
2017 if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
2018 printk("%s: size [0x%x]\n", pfx, ent->err_size);
2020 if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
2021 SUN4V_ERR_ATTRS_INT_REGISTERS |
2022 SUN4V_ERR_ATTRS_FPU_REGISTERS |
2023 SUN4V_ERR_ATTRS_PRIV_REG))
2024 printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
2026 if (attrs & SUN4V_ERR_ATTRS_ASI)
2027 printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
2029 if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
2030 SUN4V_ERR_ATTRS_FPU_REGISTERS |
2031 SUN4V_ERR_ATTRS_PRIV_REG)) &&
2032 (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
2033 printk("%s: reg [0x%04x]\n",
2034 pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
2038 if ((cnt = atomic_read(ocnt)) != 0) {
2039 atomic_set(ocnt, 0);
2041 printk("%s: Queue overflowed %d times.\n",
2046 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2047 * Log the event and clear the first word of the entry.
2049 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2051 enum ctx_state prev_state = exception_enter();
2052 struct sun4v_error_entry *ent, local_copy;
2053 struct trap_per_cpu *tb;
2054 unsigned long paddr;
2059 tb = &trap_block[cpu];
2060 paddr = tb->resum_kernel_buf_pa + offset;
2063 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2065 /* We have a local copy now, so release the entry. */
2066 ent->err_handle = 0;
2071 if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2072 /* We should really take the seconds field of
2073 * the error report and use it for the shutdown
2074 * invocation, but for now do the same thing we
2075 * do for a DS shutdown request.
2077 pr_info("Shutdown request, %u seconds...\n",
2078 local_copy.err_secs);
2079 orderly_poweroff(true);
2083 sun4v_log_error(regs, &local_copy, cpu,
2084 KERN_ERR "RESUMABLE ERROR",
2085 &sun4v_resum_oflow_cnt);
2087 exception_exit(prev_state);
2090 /* If we try to printk() we'll probably make matters worse, by trying
2091 * to retake locks this cpu already holds or causing more errors. So
2092 * just bump a counter, and we'll report these counter bumps above.
2094 void sun4v_resum_overflow(struct pt_regs *regs)
2096 atomic_inc(&sun4v_resum_oflow_cnt);
2099 /* Given a set of registers, get the virtual addressi that was being accessed
2100 * by the faulting instructions at tpc.
2102 static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2106 if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2107 return compute_effective_address(regs, insn,
2108 (insn >> 25) & 0x1f);
2113 /* Attempt to handle non-resumable errors generated from userspace.
2114 * Returns true if the signal was handled, false otherwise.
2116 bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2117 struct sun4v_error_entry *ent) {
2119 unsigned int attrs = ent->err_attrs;
2121 if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2122 unsigned long addr = ent->err_raddr;
2125 if (addr == ~(u64)0) {
2126 /* This seems highly unlikely to ever occur */
2127 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2129 unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2132 /* Break the unfortunate news. */
2133 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2135 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
2138 while (page_cnt-- > 0) {
2139 if (pfn_valid(addr >> PAGE_SHIFT))
2140 get_page(pfn_to_page(addr >> PAGE_SHIFT));
2144 info.si_signo = SIGKILL;
2147 force_sig_info(info.si_signo, &info, current);
2151 if (attrs & SUN4V_ERR_ATTRS_PIO) {
2154 info.si_signo = SIGBUS;
2155 info.si_code = BUS_ADRERR;
2156 info.si_addr = (void __user *)sun4v_get_vaddr(regs);
2157 force_sig_info(info.si_signo, &info, current);
2162 /* Default to doing nothing */
2166 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2167 * Log the event, clear the first word of the entry, and die.
2169 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2171 struct sun4v_error_entry *ent, local_copy;
2172 struct trap_per_cpu *tb;
2173 unsigned long paddr;
2178 tb = &trap_block[cpu];
2179 paddr = tb->nonresum_kernel_buf_pa + offset;
2182 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2184 /* We have a local copy now, so release the entry. */
2185 ent->err_handle = 0;
2190 if (!(regs->tstate & TSTATE_PRIV) &&
2191 sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2192 /* DON'T PANIC: This userspace error was handled. */
2197 /* Check for the special PCI poke sequence. */
2198 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2199 pci_poke_faulted = 1;
2201 regs->tnpc = regs->tpc + 4;
2206 sun4v_log_error(regs, &local_copy, cpu,
2207 KERN_EMERG "NON-RESUMABLE ERROR",
2208 &sun4v_nonresum_oflow_cnt);
2210 panic("Non-resumable error.");
2213 /* If we try to printk() we'll probably make matters worse, by trying
2214 * to retake locks this cpu already holds or causing more errors. So
2215 * just bump a counter, and we'll report these counter bumps above.
2217 void sun4v_nonresum_overflow(struct pt_regs *regs)
2219 /* XXX Actually even this can make not that much sense. Perhaps
2220 * XXX we should just pull the plug and panic directly from here?
2222 atomic_inc(&sun4v_nonresum_oflow_cnt);
2225 static void sun4v_tlb_error(struct pt_regs *regs)
2227 die_if_kernel("TLB/TSB error", regs);
2230 unsigned long sun4v_err_itlb_vaddr;
2231 unsigned long sun4v_err_itlb_ctx;
2232 unsigned long sun4v_err_itlb_pte;
2233 unsigned long sun4v_err_itlb_error;
2235 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2237 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2239 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2241 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2242 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2243 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2244 (void *) regs->u_regs[UREG_I7]);
2245 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2246 "pte[%lx] error[%lx]\n",
2247 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2248 sun4v_err_itlb_pte, sun4v_err_itlb_error);
2250 sun4v_tlb_error(regs);
2253 unsigned long sun4v_err_dtlb_vaddr;
2254 unsigned long sun4v_err_dtlb_ctx;
2255 unsigned long sun4v_err_dtlb_pte;
2256 unsigned long sun4v_err_dtlb_error;
2258 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2260 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2262 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2264 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2265 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2266 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2267 (void *) regs->u_regs[UREG_I7]);
2268 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2269 "pte[%lx] error[%lx]\n",
2270 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2271 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2273 sun4v_tlb_error(regs);
2276 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2278 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2282 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2284 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2288 static void do_fpe_common(struct pt_regs *regs)
2290 if (regs->tstate & TSTATE_PRIV) {
2291 regs->tpc = regs->tnpc;
2294 unsigned long fsr = current_thread_info()->xfsr[0];
2297 if (test_thread_flag(TIF_32BIT)) {
2298 regs->tpc &= 0xffffffff;
2299 regs->tnpc &= 0xffffffff;
2301 info.si_signo = SIGFPE;
2303 info.si_addr = (void __user *)regs->tpc;
2305 info.si_code = FPE_FIXME;
2306 if ((fsr & 0x1c000) == (1 << 14)) {
2308 info.si_code = FPE_FLTINV;
2309 else if (fsr & 0x08)
2310 info.si_code = FPE_FLTOVF;
2311 else if (fsr & 0x04)
2312 info.si_code = FPE_FLTUND;
2313 else if (fsr & 0x02)
2314 info.si_code = FPE_FLTDIV;
2315 else if (fsr & 0x01)
2316 info.si_code = FPE_FLTRES;
2318 force_sig_info(SIGFPE, &info, current);
2322 void do_fpieee(struct pt_regs *regs)
2324 enum ctx_state prev_state = exception_enter();
2326 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2327 0, 0x24, SIGFPE) == NOTIFY_STOP)
2330 do_fpe_common(regs);
2332 exception_exit(prev_state);
2335 void do_fpother(struct pt_regs *regs)
2337 enum ctx_state prev_state = exception_enter();
2338 struct fpustate *f = FPUSTATE;
2341 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2342 0, 0x25, SIGFPE) == NOTIFY_STOP)
2345 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2346 case (2 << 14): /* unfinished_FPop */
2347 case (3 << 14): /* unimplemented_FPop */
2348 ret = do_mathemu(regs, f, false);
2353 do_fpe_common(regs);
2355 exception_exit(prev_state);
2358 void do_tof(struct pt_regs *regs)
2360 enum ctx_state prev_state = exception_enter();
2363 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2364 0, 0x26, SIGEMT) == NOTIFY_STOP)
2367 if (regs->tstate & TSTATE_PRIV)
2368 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2369 if (test_thread_flag(TIF_32BIT)) {
2370 regs->tpc &= 0xffffffff;
2371 regs->tnpc &= 0xffffffff;
2373 info.si_signo = SIGEMT;
2375 info.si_code = EMT_TAGOVF;
2376 info.si_addr = (void __user *)regs->tpc;
2378 force_sig_info(SIGEMT, &info, current);
2380 exception_exit(prev_state);
2383 void do_div0(struct pt_regs *regs)
2385 enum ctx_state prev_state = exception_enter();
2388 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2389 0, 0x28, SIGFPE) == NOTIFY_STOP)
2392 if (regs->tstate & TSTATE_PRIV)
2393 die_if_kernel("TL0: Kernel divide by zero.", regs);
2394 if (test_thread_flag(TIF_32BIT)) {
2395 regs->tpc &= 0xffffffff;
2396 regs->tnpc &= 0xffffffff;
2398 info.si_signo = SIGFPE;
2400 info.si_code = FPE_INTDIV;
2401 info.si_addr = (void __user *)regs->tpc;
2403 force_sig_info(SIGFPE, &info, current);
2405 exception_exit(prev_state);
2408 static void instruction_dump(unsigned int *pc)
2412 if ((((unsigned long) pc) & 3))
2415 printk("Instruction DUMP:");
2416 for (i = -3; i < 6; i++)
2417 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2421 static void user_instruction_dump(unsigned int __user *pc)
2424 unsigned int buf[9];
2426 if ((((unsigned long) pc) & 3))
2429 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2432 printk("Instruction DUMP:");
2433 for (i = 0; i < 9; i++)
2434 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2438 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2440 unsigned long fp, ksp;
2441 struct thread_info *tp;
2443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2447 ksp = (unsigned long) _ksp;
2450 tp = task_thread_info(tsk);
2453 asm("mov %%fp, %0" : "=r" (ksp));
2457 if (tp == current_thread_info())
2460 fp = ksp + STACK_BIAS;
2462 printk("Call Trace:\n");
2464 struct sparc_stackf *sf;
2465 struct pt_regs *regs;
2468 if (!kstack_valid(tp, fp))
2470 sf = (struct sparc_stackf *) fp;
2471 regs = (struct pt_regs *) (sf + 1);
2473 if (kstack_is_trap_frame(tp, regs)) {
2474 if (!(regs->tstate & TSTATE_PRIV))
2477 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2479 pc = sf->callers_pc;
2480 fp = (unsigned long)sf->fp + STACK_BIAS;
2483 printk(" [%016lx] %pS\n", pc, (void *) pc);
2484 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2485 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2486 int index = tsk->curr_ret_stack;
2487 if (tsk->ret_stack && index >= graph) {
2488 pc = tsk->ret_stack[index - graph].ret;
2489 printk(" [%016lx] %pS\n", pc, (void *) pc);
2494 } while (++count < 16);
2497 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2499 unsigned long fp = rw->ins[6];
2504 return (struct reg_window *) (fp + STACK_BIAS);
2507 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2509 static int die_counter;
2512 /* Amuse the user. */
2515 " \"@'/ .. \\`@\"\n"
2519 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2520 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2521 __asm__ __volatile__("flushw");
2523 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2524 if (regs->tstate & TSTATE_PRIV) {
2525 struct thread_info *tp = current_thread_info();
2526 struct reg_window *rw = (struct reg_window *)
2527 (regs->u_regs[UREG_FP] + STACK_BIAS);
2529 /* Stop the back trace when we hit userland or we
2530 * find some badly aligned kernel stack.
2534 kstack_valid(tp, (unsigned long) rw)) {
2535 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2536 (void *) rw->ins[7]);
2538 rw = kernel_stack_up(rw);
2540 instruction_dump ((unsigned int *) regs->tpc);
2542 if (test_thread_flag(TIF_32BIT)) {
2543 regs->tpc &= 0xffffffff;
2544 regs->tnpc &= 0xffffffff;
2546 user_instruction_dump ((unsigned int __user *) regs->tpc);
2549 panic("Fatal exception");
2550 if (regs->tstate & TSTATE_PRIV)
2554 EXPORT_SYMBOL(die_if_kernel);
2556 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2557 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2559 void do_illegal_instruction(struct pt_regs *regs)
2561 enum ctx_state prev_state = exception_enter();
2562 unsigned long pc = regs->tpc;
2563 unsigned long tstate = regs->tstate;
2567 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2568 0, 0x10, SIGILL) == NOTIFY_STOP)
2571 if (tstate & TSTATE_PRIV)
2572 die_if_kernel("Kernel illegal instruction", regs);
2573 if (test_thread_flag(TIF_32BIT))
2575 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2576 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2577 if (handle_popc(insn, regs))
2579 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2580 if (handle_ldf_stq(insn, regs))
2582 } else if (tlb_type == hypervisor) {
2583 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2584 if (!vis_emul(regs, insn))
2587 struct fpustate *f = FPUSTATE;
2589 /* On UltraSPARC T2 and later, FPU insns which
2590 * are not implemented in HW signal an illegal
2591 * instruction trap and do not set the FP Trap
2592 * Trap in the %fsr to unimplemented_FPop.
2594 if (do_mathemu(regs, f, true))
2599 info.si_signo = SIGILL;
2601 info.si_code = ILL_ILLOPC;
2602 info.si_addr = (void __user *)pc;
2604 force_sig_info(SIGILL, &info, current);
2606 exception_exit(prev_state);
2609 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2611 enum ctx_state prev_state = exception_enter();
2614 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2615 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2618 if (regs->tstate & TSTATE_PRIV) {
2619 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2622 if (is_no_fault_exception(regs))
2625 info.si_signo = SIGBUS;
2627 info.si_code = BUS_ADRALN;
2628 info.si_addr = (void __user *)sfar;
2630 force_sig_info(SIGBUS, &info, current);
2632 exception_exit(prev_state);
2635 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2639 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2640 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2643 if (regs->tstate & TSTATE_PRIV) {
2644 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2647 if (is_no_fault_exception(regs))
2650 info.si_signo = SIGBUS;
2652 info.si_code = BUS_ADRALN;
2653 info.si_addr = (void __user *) addr;
2655 force_sig_info(SIGBUS, &info, current);
2658 void do_privop(struct pt_regs *regs)
2660 enum ctx_state prev_state = exception_enter();
2663 if (notify_die(DIE_TRAP, "privileged operation", regs,
2664 0, 0x11, SIGILL) == NOTIFY_STOP)
2667 if (test_thread_flag(TIF_32BIT)) {
2668 regs->tpc &= 0xffffffff;
2669 regs->tnpc &= 0xffffffff;
2671 info.si_signo = SIGILL;
2673 info.si_code = ILL_PRVOPC;
2674 info.si_addr = (void __user *)regs->tpc;
2676 force_sig_info(SIGILL, &info, current);
2678 exception_exit(prev_state);
2681 void do_privact(struct pt_regs *regs)
2686 /* Trap level 1 stuff or other traps we should never see... */
2687 void do_cee(struct pt_regs *regs)
2690 die_if_kernel("TL0: Cache Error Exception", regs);
2693 void do_div0_tl1(struct pt_regs *regs)
2696 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2697 die_if_kernel("TL1: DIV0 Exception", regs);
2700 void do_fpieee_tl1(struct pt_regs *regs)
2703 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2704 die_if_kernel("TL1: FPU IEEE Exception", regs);
2707 void do_fpother_tl1(struct pt_regs *regs)
2710 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2711 die_if_kernel("TL1: FPU Other Exception", regs);
2714 void do_ill_tl1(struct pt_regs *regs)
2717 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2718 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2721 void do_irq_tl1(struct pt_regs *regs)
2724 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2725 die_if_kernel("TL1: IRQ Exception", regs);
2728 void do_lddfmna_tl1(struct pt_regs *regs)
2731 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2732 die_if_kernel("TL1: LDDF Exception", regs);
2735 void do_stdfmna_tl1(struct pt_regs *regs)
2738 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2739 die_if_kernel("TL1: STDF Exception", regs);
2742 void do_paw(struct pt_regs *regs)
2745 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2748 void do_paw_tl1(struct pt_regs *regs)
2751 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2752 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2755 void do_vaw(struct pt_regs *regs)
2758 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2761 void do_vaw_tl1(struct pt_regs *regs)
2764 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2765 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2768 void do_tof_tl1(struct pt_regs *regs)
2771 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2772 die_if_kernel("TL1: Tag Overflow Exception", regs);
2775 void do_getpsr(struct pt_regs *regs)
2777 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2778 regs->tpc = regs->tnpc;
2780 if (test_thread_flag(TIF_32BIT)) {
2781 regs->tpc &= 0xffffffff;
2782 regs->tnpc &= 0xffffffff;
2786 u64 cpu_mondo_counter[NR_CPUS] = {0};
2787 struct trap_per_cpu trap_block[NR_CPUS];
2788 EXPORT_SYMBOL(trap_block);
2790 /* This can get invoked before sched_init() so play it super safe
2791 * and use hard_smp_processor_id().
2793 void notrace init_cur_cpu_trap(struct thread_info *t)
2795 int cpu = hard_smp_processor_id();
2796 struct trap_per_cpu *p = &trap_block[cpu];
2802 extern void thread_info_offsets_are_bolixed_dave(void);
2803 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2804 extern void tsb_config_offsets_are_bolixed_dave(void);
2806 /* Only invoked on boot processor. */
2807 void __init trap_init(void)
2809 /* Compile time sanity check. */
2810 BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2811 TI_FLAGS != offsetof(struct thread_info, flags) ||
2812 TI_CPU != offsetof(struct thread_info, cpu) ||
2813 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2814 TI_KSP != offsetof(struct thread_info, ksp) ||
2815 TI_FAULT_ADDR != offsetof(struct thread_info,
2817 TI_KREGS != offsetof(struct thread_info, kregs) ||
2818 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2819 TI_REG_WINDOW != offsetof(struct thread_info,
2821 TI_RWIN_SPTRS != offsetof(struct thread_info,
2823 TI_GSR != offsetof(struct thread_info, gsr) ||
2824 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2825 TI_PRE_COUNT != offsetof(struct thread_info,
2827 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2828 TI_CURRENT_DS != offsetof(struct thread_info,
2830 TI_KUNA_REGS != offsetof(struct thread_info,
2832 TI_KUNA_INSN != offsetof(struct thread_info,
2834 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2835 (TI_FPREGS & (64 - 1)));
2837 BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2839 (TRAP_PER_CPU_PGD_PADDR !=
2840 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2841 (TRAP_PER_CPU_CPU_MONDO_PA !=
2842 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2843 (TRAP_PER_CPU_DEV_MONDO_PA !=
2844 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2845 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2846 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2847 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2848 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2849 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2850 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2851 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2852 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2853 (TRAP_PER_CPU_FAULT_INFO !=
2854 offsetof(struct trap_per_cpu, fault_info)) ||
2855 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2856 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2857 (TRAP_PER_CPU_CPU_LIST_PA !=
2858 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2859 (TRAP_PER_CPU_TSB_HUGE !=
2860 offsetof(struct trap_per_cpu, tsb_huge)) ||
2861 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2862 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2863 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2864 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2865 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2866 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2867 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2868 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2869 (TRAP_PER_CPU_RESUM_QMASK !=
2870 offsetof(struct trap_per_cpu, resum_qmask)) ||
2871 (TRAP_PER_CPU_NONRESUM_QMASK !=
2872 offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2873 (TRAP_PER_CPU_PER_CPU_BASE !=
2874 offsetof(struct trap_per_cpu, __per_cpu_base)));
2876 BUILD_BUG_ON((TSB_CONFIG_TSB !=
2877 offsetof(struct tsb_config, tsb)) ||
2878 (TSB_CONFIG_RSS_LIMIT !=
2879 offsetof(struct tsb_config, tsb_rss_limit)) ||
2880 (TSB_CONFIG_NENTRIES !=
2881 offsetof(struct tsb_config, tsb_nentries)) ||
2882 (TSB_CONFIG_REG_VAL !=
2883 offsetof(struct tsb_config, tsb_reg_val)) ||
2884 (TSB_CONFIG_MAP_VADDR !=
2885 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2886 (TSB_CONFIG_MAP_PTE !=
2887 offsetof(struct tsb_config, tsb_map_pte)));
2889 /* Attach to the address space of init_task. On SMP we
2890 * do this in smp.c:smp_callin for other cpus.
2893 current->active_mm = &init_mm;