2 * Machine check exception handling.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
23 #define pr_fmt(fmt) "mce: " fmt
25 #include <linux/hardirq.h>
26 #include <linux/types.h>
27 #include <linux/ptrace.h>
28 #include <linux/percpu.h>
29 #include <linux/export.h>
30 #include <linux/irq_work.h>
32 #include <asm/machdep.h>
35 static DEFINE_PER_CPU(int, mce_nest_count);
36 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
38 /* Queue for delayed MCE events. */
39 static DEFINE_PER_CPU(int, mce_queue_count);
40 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
42 /* Queue for delayed MCE UE events. */
43 static DEFINE_PER_CPU(int, mce_ue_count);
44 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
47 static void machine_check_process_queued_event(struct irq_work *work);
48 static void machine_check_ue_irq_work(struct irq_work *work);
49 void machine_check_ue_event(struct machine_check_event *evt);
50 static void machine_process_ue_event(struct work_struct *work);
52 static struct irq_work mce_event_process_work = {
53 .func = machine_check_process_queued_event,
56 static struct irq_work mce_ue_event_irq_work = {
57 .func = machine_check_ue_irq_work,
60 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
62 static void mce_set_error_info(struct machine_check_event *mce,
63 struct mce_error_info *mce_err)
65 mce->error_type = mce_err->error_type;
66 switch (mce_err->error_type) {
67 case MCE_ERROR_TYPE_UE:
68 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
70 case MCE_ERROR_TYPE_SLB:
71 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
73 case MCE_ERROR_TYPE_ERAT:
74 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
76 case MCE_ERROR_TYPE_TLB:
77 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
79 case MCE_ERROR_TYPE_USER:
80 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
82 case MCE_ERROR_TYPE_RA:
83 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
85 case MCE_ERROR_TYPE_LINK:
86 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
88 case MCE_ERROR_TYPE_UNKNOWN:
95 * Decode and save high level MCE information into per cpu buffer which
96 * is an array of machine_check_event structure.
98 void save_mce_event(struct pt_regs *regs, long handled,
99 struct mce_error_info *mce_err,
100 uint64_t nip, uint64_t addr, uint64_t phys_addr)
102 int index = __this_cpu_inc_return(mce_nest_count) - 1;
103 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
106 * Return if we don't have enough space to log mce event.
107 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
108 * the check below will stop buffer overrun.
110 if (index >= MAX_MC_EVT)
113 /* Populate generic machine check info */
114 mce->version = MCE_V1;
116 mce->srr1 = regs->msr;
117 mce->gpr3 = regs->gpr[3];
120 /* Mark it recovered if we have handled it and MSR(RI=1). */
121 if (handled && (regs->msr & MSR_RI))
122 mce->disposition = MCE_DISPOSITION_RECOVERED;
124 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
126 mce->initiator = mce_err->initiator;
127 mce->severity = mce_err->severity;
130 * Populate the mce error_type and type-specific error_type.
132 mce_set_error_info(mce, mce_err);
137 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
138 mce->u.tlb_error.effective_address_provided = true;
139 mce->u.tlb_error.effective_address = addr;
140 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
141 mce->u.slb_error.effective_address_provided = true;
142 mce->u.slb_error.effective_address = addr;
143 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
144 mce->u.erat_error.effective_address_provided = true;
145 mce->u.erat_error.effective_address = addr;
146 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
147 mce->u.user_error.effective_address_provided = true;
148 mce->u.user_error.effective_address = addr;
149 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
150 mce->u.ra_error.effective_address_provided = true;
151 mce->u.ra_error.effective_address = addr;
152 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
153 mce->u.link_error.effective_address_provided = true;
154 mce->u.link_error.effective_address = addr;
155 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
156 mce->u.ue_error.effective_address_provided = true;
157 mce->u.ue_error.effective_address = addr;
158 if (phys_addr != ULONG_MAX) {
159 mce->u.ue_error.physical_address_provided = true;
160 mce->u.ue_error.physical_address = phys_addr;
161 machine_check_ue_event(mce);
169 * mce Pointer to machine_check_event structure to be filled.
170 * release Flag to indicate whether to free the event slot or not.
171 * 0 <= do not release the mce event. Caller will invoke
172 * release_mce_event() once event has been consumed.
173 * 1 <= release the slot.
178 * get_mce_event() will be called by platform specific machine check
179 * handle routine and in KVM.
180 * When we call get_mce_event(), we are still in interrupt context and
181 * preemption will not be scheduled until ret_from_expect() routine
184 int get_mce_event(struct machine_check_event *mce, bool release)
186 int index = __this_cpu_read(mce_nest_count) - 1;
187 struct machine_check_event *mc_evt;
194 /* Check if we have MCE info to process. */
195 if (index < MAX_MC_EVT) {
196 mc_evt = this_cpu_ptr(&mce_event[index]);
197 /* Copy the event structure and release the original */
204 /* Decrement the count to free the slot. */
206 __this_cpu_dec(mce_nest_count);
211 void release_mce_event(void)
213 get_mce_event(NULL, true);
216 static void machine_check_ue_irq_work(struct irq_work *work)
218 schedule_work(&mce_ue_event_work);
222 * Queue up the MCE event which then can be handled later.
224 void machine_check_ue_event(struct machine_check_event *evt)
228 index = __this_cpu_inc_return(mce_ue_count) - 1;
229 /* If queue is full, just return for now. */
230 if (index >= MAX_MC_EVT) {
231 __this_cpu_dec(mce_ue_count);
234 memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
236 /* Queue work to process this event later. */
237 irq_work_queue(&mce_ue_event_irq_work);
241 * Queue up the MCE event which then can be handled later.
243 void machine_check_queue_event(void)
246 struct machine_check_event evt;
248 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
251 index = __this_cpu_inc_return(mce_queue_count) - 1;
252 /* If queue is full, just return for now. */
253 if (index >= MAX_MC_EVT) {
254 __this_cpu_dec(mce_queue_count);
257 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
259 /* Queue irq work to process this event later. */
260 irq_work_queue(&mce_event_process_work);
263 * process pending MCE event from the mce event queue. This function will be
264 * called during syscall exit.
266 static void machine_process_ue_event(struct work_struct *work)
269 struct machine_check_event *evt;
271 while (__this_cpu_read(mce_ue_count) > 0) {
272 index = __this_cpu_read(mce_ue_count) - 1;
273 evt = this_cpu_ptr(&mce_ue_event_queue[index]);
274 #ifdef CONFIG_MEMORY_FAILURE
276 * This should probably queued elsewhere, but
279 if (evt->error_type == MCE_ERROR_TYPE_UE) {
280 if (evt->u.ue_error.physical_address_provided) {
283 pfn = evt->u.ue_error.physical_address >>
285 memory_failure(pfn, 0);
287 pr_warn("Failed to identify bad address from "
288 "where the uncorrectable error (UE) "
292 __this_cpu_dec(mce_ue_count);
296 * process pending MCE event from the mce event queue. This function will be
297 * called during syscall exit.
299 static void machine_check_process_queued_event(struct irq_work *work)
302 struct machine_check_event *evt;
304 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
307 * For now just print it to console.
308 * TODO: log this error event to FSP or nvram.
310 while (__this_cpu_read(mce_queue_count) > 0) {
311 index = __this_cpu_read(mce_queue_count) - 1;
312 evt = this_cpu_ptr(&mce_event_queue[index]);
313 machine_check_print_event_info(evt, false);
314 __this_cpu_dec(mce_queue_count);
318 void machine_check_print_event_info(struct machine_check_event *evt,
321 const char *level, *sevstr, *subtype;
322 static const char *mc_ue_types[] = {
325 "Page table walk ifetch",
327 "Page table walk Load/Store",
329 static const char *mc_slb_types[] = {
334 static const char *mc_erat_types[] = {
339 static const char *mc_tlb_types[] = {
344 static const char *mc_user_types[] = {
348 static const char *mc_ra_types[] = {
350 "Instruction fetch (bad)",
351 "Instruction fetch (foreign)",
352 "Page table walk ifetch (bad)",
353 "Page table walk ifetch (foreign)",
356 "Page table walk Load/Store (bad)",
357 "Page table walk Load/Store (foreign)",
358 "Load/Store (foreign)",
360 static const char *mc_link_types[] = {
362 "Instruction fetch (timeout)",
363 "Page table walk ifetch (timeout)",
366 "Page table walk Load/Store (timeout)",
369 /* Print things out */
370 if (evt->version != MCE_V1) {
371 pr_err("Machine Check Exception, Unknown event version %d !\n",
375 switch (evt->severity) {
376 case MCE_SEV_NO_ERROR:
380 case MCE_SEV_WARNING:
381 level = KERN_WARNING;
384 case MCE_SEV_ERROR_SYNC:
395 printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
396 evt->disposition == MCE_DISPOSITION_RECOVERED ?
397 "Recovered" : "Not recovered");
400 printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
401 evt->srr0, current->pid, current->comm);
403 printk("%s NIP [%016llx]: %pS\n", level, evt->srr0,
407 printk("%s Initiator: %s\n", level,
408 evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
409 switch (evt->error_type) {
410 case MCE_ERROR_TYPE_UE:
411 subtype = evt->u.ue_error.ue_error_type <
412 ARRAY_SIZE(mc_ue_types) ?
413 mc_ue_types[evt->u.ue_error.ue_error_type]
415 printk("%s Error type: UE [%s]\n", level, subtype);
416 if (evt->u.ue_error.effective_address_provided)
417 printk("%s Effective address: %016llx\n",
418 level, evt->u.ue_error.effective_address);
419 if (evt->u.ue_error.physical_address_provided)
420 printk("%s Physical address: %016llx\n",
421 level, evt->u.ue_error.physical_address);
423 case MCE_ERROR_TYPE_SLB:
424 subtype = evt->u.slb_error.slb_error_type <
425 ARRAY_SIZE(mc_slb_types) ?
426 mc_slb_types[evt->u.slb_error.slb_error_type]
428 printk("%s Error type: SLB [%s]\n", level, subtype);
429 if (evt->u.slb_error.effective_address_provided)
430 printk("%s Effective address: %016llx\n",
431 level, evt->u.slb_error.effective_address);
433 case MCE_ERROR_TYPE_ERAT:
434 subtype = evt->u.erat_error.erat_error_type <
435 ARRAY_SIZE(mc_erat_types) ?
436 mc_erat_types[evt->u.erat_error.erat_error_type]
438 printk("%s Error type: ERAT [%s]\n", level, subtype);
439 if (evt->u.erat_error.effective_address_provided)
440 printk("%s Effective address: %016llx\n",
441 level, evt->u.erat_error.effective_address);
443 case MCE_ERROR_TYPE_TLB:
444 subtype = evt->u.tlb_error.tlb_error_type <
445 ARRAY_SIZE(mc_tlb_types) ?
446 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
448 printk("%s Error type: TLB [%s]\n", level, subtype);
449 if (evt->u.tlb_error.effective_address_provided)
450 printk("%s Effective address: %016llx\n",
451 level, evt->u.tlb_error.effective_address);
453 case MCE_ERROR_TYPE_USER:
454 subtype = evt->u.user_error.user_error_type <
455 ARRAY_SIZE(mc_user_types) ?
456 mc_user_types[evt->u.user_error.user_error_type]
458 printk("%s Error type: User [%s]\n", level, subtype);
459 if (evt->u.user_error.effective_address_provided)
460 printk("%s Effective address: %016llx\n",
461 level, evt->u.user_error.effective_address);
463 case MCE_ERROR_TYPE_RA:
464 subtype = evt->u.ra_error.ra_error_type <
465 ARRAY_SIZE(mc_ra_types) ?
466 mc_ra_types[evt->u.ra_error.ra_error_type]
468 printk("%s Error type: Real address [%s]\n", level, subtype);
469 if (evt->u.ra_error.effective_address_provided)
470 printk("%s Effective address: %016llx\n",
471 level, evt->u.ra_error.effective_address);
473 case MCE_ERROR_TYPE_LINK:
474 subtype = evt->u.link_error.link_error_type <
475 ARRAY_SIZE(mc_link_types) ?
476 mc_link_types[evt->u.link_error.link_error_type]
478 printk("%s Error type: Link [%s]\n", level, subtype);
479 if (evt->u.link_error.effective_address_provided)
480 printk("%s Effective address: %016llx\n",
481 level, evt->u.link_error.effective_address);
484 case MCE_ERROR_TYPE_UNKNOWN:
485 printk("%s Error type: Unknown\n", level);
489 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
492 * This function is called in real mode. Strictly no printk's please.
494 * regs->nip and regs->msr contains srr0 and ssr1.
496 long machine_check_early(struct pt_regs *regs)
500 __this_cpu_inc(irq_stat.mce_exceptions);
502 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
503 handled = cur_cpu_spec->machine_check_early(regs);
507 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
510 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
511 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
512 } hmer_debug_trig_function;
514 static int init_debug_trig_function(void)
517 struct device_node *cpun;
518 struct property *prop = NULL;
521 /* First look in the device tree */
523 cpun = of_get_cpu_node(smp_processor_id(), NULL);
525 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
527 if (strcmp(str, "bit17-vector-ci-load") == 0)
528 hmer_debug_trig_function = DTRIG_VECTOR_CI;
529 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
530 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
536 /* If we found the property, don't look at PVR */
540 pvr = mfspr(SPRN_PVR);
541 /* Check for POWER9 Nimbus (scale-out) */
542 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
543 /* DD2.2 and later */
544 if ((pvr & 0xfff) >= 0x202)
545 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
546 /* DD2.0 and DD2.1 - used for vector CI load emulation */
547 else if ((pvr & 0xfff) >= 0x200)
548 hmer_debug_trig_function = DTRIG_VECTOR_CI;
552 switch (hmer_debug_trig_function) {
553 case DTRIG_VECTOR_CI:
554 pr_debug("HMI debug trigger used for vector CI load\n");
556 case DTRIG_SUSPEND_ESCAPE:
557 pr_debug("HMI debug trigger used for TM suspend escape\n");
564 __initcall(init_debug_trig_function);
567 * Handle HMIs that occur as a result of a debug trigger.
569 * -1 means this is not a HMI cause that we know about
570 * 0 means no further handling is required
571 * 1 means further handling is required
573 long hmi_handle_debugtrig(struct pt_regs *regs)
575 unsigned long hmer = mfspr(SPRN_HMER);
578 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
579 if (!((hmer & HMER_DEBUG_TRIG)
580 && hmer_debug_trig_function != DTRIG_UNKNOWN))
583 hmer &= ~HMER_DEBUG_TRIG;
584 /* HMER is a write-AND register */
585 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
587 switch (hmer_debug_trig_function) {
588 case DTRIG_VECTOR_CI:
590 * Now to avoid problems with soft-disable we
591 * only do the emulation if we are coming from
594 if (regs && user_mode(regs))
595 ret = local_paca->hmi_p9_special_emu = 1;
604 * See if any other HMI causes remain to be handled
606 if (hmer & mfspr(SPRN_HMEER))
615 long hmi_exception_realmode(struct pt_regs *regs)
619 __this_cpu_inc(irq_stat.hmi_exceptions);
621 ret = hmi_handle_debugtrig(regs);
625 wait_for_subcore_guest_exit();
627 if (ppc_md.hmi_exception_early)
628 ppc_md.hmi_exception_early(regs);
630 wait_for_tb_resync();