2 * Machine check exception handling CPU-side for power7 and power8
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
23 #define pr_fmt(fmt) "mce_power: " fmt
25 #include <linux/types.h>
26 #include <linux/ptrace.h>
29 #include <asm/machdep.h>
30 #include <asm/pgtable.h>
31 #include <asm/pte-walk.h>
32 #include <asm/sstep.h>
33 #include <asm/exception-64s.h>
36 * Convert an address related to an mm to a PFN. NOTE: we are in real
37 * mode, we could potentially race with page table updates.
39 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
43 unsigned long pfn, flags;
51 local_irq_save(flags);
52 ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
54 if (!ptep || pte_special(*ptep)) {
59 if (shift <= PAGE_SHIFT)
62 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
63 pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
67 local_irq_restore(flags);
71 /* flush SLBs and reload */
72 #ifdef CONFIG_PPC_BOOK3S_64
73 static void flush_and_reload_slb(void)
75 /* Invalidate all SLBs */
76 slb_flush_all_realmode();
78 #ifdef CONFIG_KVM_BOOK3S_HANDLER
80 * If machine check is hit when in guest or in transition, we will
81 * only flush the SLBs and continue.
83 if (get_paca()->kvm_hstate.in_guest)
86 if (early_radix_enabled())
90 * This probably shouldn't happen, but it may be possible it's
91 * called in early boot before SLB shadows are allocated.
93 if (!get_slb_shadow())
96 slb_restore_bolted_realmode();
100 static void flush_erat(void)
102 #ifdef CONFIG_PPC_BOOK3S_64
103 if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
104 flush_and_reload_slb();
108 /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */
109 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
112 #define MCE_FLUSH_SLB 1
113 #define MCE_FLUSH_TLB 2
114 #define MCE_FLUSH_ERAT 3
116 static int mce_flush(int what)
118 #ifdef CONFIG_PPC_BOOK3S_64
119 if (what == MCE_FLUSH_SLB) {
120 flush_and_reload_slb();
124 if (what == MCE_FLUSH_ERAT) {
128 if (what == MCE_FLUSH_TLB) {
136 #define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
138 struct mce_ierror_table {
139 unsigned long srr1_mask;
140 unsigned long srr1_value;
141 bool nip_valid; /* nip is a valid indicator of faulting address */
142 unsigned int error_type;
143 unsigned int error_subtype;
144 unsigned int initiator;
145 unsigned int severity;
148 static const struct mce_ierror_table mce_p7_ierror_table[] = {
149 { 0x00000000001c0000, 0x0000000000040000, true,
150 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
151 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
152 { 0x00000000001c0000, 0x0000000000080000, true,
153 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
154 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
155 { 0x00000000001c0000, 0x00000000000c0000, true,
156 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
157 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
158 { 0x00000000001c0000, 0x0000000000100000, true,
159 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
160 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
161 { 0x00000000001c0000, 0x0000000000140000, true,
162 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
163 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
164 { 0x00000000001c0000, 0x0000000000180000, true,
165 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
166 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
167 { 0x00000000001c0000, 0x00000000001c0000, true,
168 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
169 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
170 { 0, 0, 0, 0, 0, 0 } };
172 static const struct mce_ierror_table mce_p8_ierror_table[] = {
173 { 0x00000000081c0000, 0x0000000000040000, true,
174 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
175 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
176 { 0x00000000081c0000, 0x0000000000080000, true,
177 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
178 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
179 { 0x00000000081c0000, 0x00000000000c0000, true,
180 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
181 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
182 { 0x00000000081c0000, 0x0000000000100000, true,
183 MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
184 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
185 { 0x00000000081c0000, 0x0000000000140000, true,
186 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
187 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
188 { 0x00000000081c0000, 0x0000000000180000, true,
189 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
190 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
191 { 0x00000000081c0000, 0x00000000001c0000, true,
192 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
193 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
194 { 0x00000000081c0000, 0x0000000008000000, true,
195 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
196 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
197 { 0x00000000081c0000, 0x0000000008040000, true,
198 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
199 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
200 { 0, 0, 0, 0, 0, 0 } };
202 static const struct mce_ierror_table mce_p9_ierror_table[] = {
203 { 0x00000000081c0000, 0x0000000000040000, true,
204 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
205 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
206 { 0x00000000081c0000, 0x0000000000080000, true,
207 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
208 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
209 { 0x00000000081c0000, 0x00000000000c0000, true,
210 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
211 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
212 { 0x00000000081c0000, 0x0000000000100000, true,
213 MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
214 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
215 { 0x00000000081c0000, 0x0000000000140000, true,
216 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
217 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
218 { 0x00000000081c0000, 0x0000000000180000, true,
219 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
220 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
221 { 0x00000000081c0000, 0x00000000001c0000, true,
222 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN,
223 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
224 { 0x00000000081c0000, 0x0000000008000000, true,
225 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
226 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
227 { 0x00000000081c0000, 0x0000000008040000, true,
228 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
229 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
230 { 0x00000000081c0000, 0x00000000080c0000, true,
231 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH,
232 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
233 { 0x00000000081c0000, 0x0000000008100000, true,
234 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH,
235 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
236 { 0x00000000081c0000, 0x0000000008140000, false,
237 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE,
238 MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
239 { 0x00000000081c0000, 0x0000000008180000, false,
240 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
241 MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
242 { 0x00000000081c0000, 0x00000000081c0000, true,
243 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
244 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
245 { 0, 0, 0, 0, 0, 0 } };
247 struct mce_derror_table {
248 unsigned long dsisr_value;
249 bool dar_valid; /* dar is a valid indicator of faulting address */
250 unsigned int error_type;
251 unsigned int error_subtype;
252 unsigned int initiator;
253 unsigned int severity;
256 static const struct mce_derror_table mce_p7_derror_table[] = {
258 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
259 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
261 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
262 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
264 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
265 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
267 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
268 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
270 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
271 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
273 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
274 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
276 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
277 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
278 { 0, false, 0, 0, 0, 0 } };
280 static const struct mce_derror_table mce_p8_derror_table[] = {
282 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
283 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
285 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
286 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
288 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
289 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
291 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
292 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
294 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
295 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
297 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
298 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
300 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
301 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
303 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
304 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
306 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
307 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
308 { 0, false, 0, 0, 0, 0 } };
310 static const struct mce_derror_table mce_p9_derror_table[] = {
312 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
313 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
315 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
316 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
318 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
319 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
321 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
322 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
324 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
325 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
327 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
328 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
330 MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
331 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
333 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
334 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
336 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
337 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
339 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD,
340 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
342 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
343 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
345 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
346 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
348 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN,
349 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
350 { 0, false, 0, 0, 0, 0 } };
352 static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
356 * Carefully look at the NIP to determine
357 * the instruction to analyse. Reading the NIP
358 * in real-mode is tricky and can lead to recursive
362 unsigned long pfn, instr_addr;
363 struct instruction_op op;
364 struct pt_regs tmp = *regs;
366 pfn = addr_to_pfn(regs, regs->nip);
367 if (pfn != ULONG_MAX) {
368 instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
369 instr = *(unsigned int *)(instr_addr);
370 if (!analyse_instr(&op, &tmp, instr)) {
371 pfn = addr_to_pfn(regs, op.ea);
373 *phys_addr = (pfn << PAGE_SHIFT);
377 * analyse_instr() might fail if the instruction
378 * is not a load/store, although this is unexpected
379 * for load/store errors or if we got the NIP
387 static int mce_handle_ierror(struct pt_regs *regs,
388 const struct mce_ierror_table table[],
389 struct mce_error_info *mce_err, uint64_t *addr,
392 uint64_t srr1 = regs->msr;
398 for (i = 0; table[i].srr1_mask; i++) {
399 if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
402 /* attempt to correct the error */
403 switch (table[i].error_type) {
404 case MCE_ERROR_TYPE_SLB:
405 handled = mce_flush(MCE_FLUSH_SLB);
407 case MCE_ERROR_TYPE_ERAT:
408 handled = mce_flush(MCE_FLUSH_ERAT);
410 case MCE_ERROR_TYPE_TLB:
411 handled = mce_flush(MCE_FLUSH_TLB);
415 /* now fill in mce_error_info */
416 mce_err->error_type = table[i].error_type;
417 switch (table[i].error_type) {
418 case MCE_ERROR_TYPE_UE:
419 mce_err->u.ue_error_type = table[i].error_subtype;
421 case MCE_ERROR_TYPE_SLB:
422 mce_err->u.slb_error_type = table[i].error_subtype;
424 case MCE_ERROR_TYPE_ERAT:
425 mce_err->u.erat_error_type = table[i].error_subtype;
427 case MCE_ERROR_TYPE_TLB:
428 mce_err->u.tlb_error_type = table[i].error_subtype;
430 case MCE_ERROR_TYPE_USER:
431 mce_err->u.user_error_type = table[i].error_subtype;
433 case MCE_ERROR_TYPE_RA:
434 mce_err->u.ra_error_type = table[i].error_subtype;
436 case MCE_ERROR_TYPE_LINK:
437 mce_err->u.link_error_type = table[i].error_subtype;
440 mce_err->severity = table[i].severity;
441 mce_err->initiator = table[i].initiator;
442 if (table[i].nip_valid) {
444 if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
445 table[i].error_type == MCE_ERROR_TYPE_UE) {
448 if (get_paca()->in_mce < MAX_MCE_DEPTH) {
449 pfn = addr_to_pfn(regs, regs->nip);
450 if (pfn != ULONG_MAX) {
460 mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
461 mce_err->severity = MCE_SEV_ERROR_SYNC;
462 mce_err->initiator = MCE_INITIATOR_CPU;
467 static int mce_handle_derror(struct pt_regs *regs,
468 const struct mce_derror_table table[],
469 struct mce_error_info *mce_err, uint64_t *addr,
472 uint64_t dsisr = regs->dsisr;
479 for (i = 0; table[i].dsisr_value; i++) {
480 if (!(dsisr & table[i].dsisr_value))
483 /* attempt to correct the error */
484 switch (table[i].error_type) {
485 case MCE_ERROR_TYPE_SLB:
486 if (mce_flush(MCE_FLUSH_SLB))
489 case MCE_ERROR_TYPE_ERAT:
490 if (mce_flush(MCE_FLUSH_ERAT))
493 case MCE_ERROR_TYPE_TLB:
494 if (mce_flush(MCE_FLUSH_TLB))
500 * Attempt to handle multiple conditions, but only return
501 * one. Ensure uncorrectable errors are first in the table
507 /* now fill in mce_error_info */
508 mce_err->error_type = table[i].error_type;
509 switch (table[i].error_type) {
510 case MCE_ERROR_TYPE_UE:
511 mce_err->u.ue_error_type = table[i].error_subtype;
513 case MCE_ERROR_TYPE_SLB:
514 mce_err->u.slb_error_type = table[i].error_subtype;
516 case MCE_ERROR_TYPE_ERAT:
517 mce_err->u.erat_error_type = table[i].error_subtype;
519 case MCE_ERROR_TYPE_TLB:
520 mce_err->u.tlb_error_type = table[i].error_subtype;
522 case MCE_ERROR_TYPE_USER:
523 mce_err->u.user_error_type = table[i].error_subtype;
525 case MCE_ERROR_TYPE_RA:
526 mce_err->u.ra_error_type = table[i].error_subtype;
528 case MCE_ERROR_TYPE_LINK:
529 mce_err->u.link_error_type = table[i].error_subtype;
532 mce_err->severity = table[i].severity;
533 mce_err->initiator = table[i].initiator;
534 if (table[i].dar_valid)
536 else if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
537 table[i].error_type == MCE_ERROR_TYPE_UE) {
539 * We do a maximum of 4 nested MCE calls, see
540 * kernel/exception-64s.h
542 if (get_paca()->in_mce < MAX_MCE_DEPTH)
543 mce_find_instr_ea_and_phys(regs, addr,
552 mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
553 mce_err->severity = MCE_SEV_ERROR_SYNC;
554 mce_err->initiator = MCE_INITIATOR_CPU;
559 static long mce_handle_ue_error(struct pt_regs *regs)
564 * On specific SCOM read via MMIO we may get a machine check
565 * exception with SRR0 pointing inside opal. If that is the
566 * case OPAL may have recovery address to re-read SCOM data in
567 * different way and hence we can recover from this MC.
570 if (ppc_md.mce_check_early_recovery) {
571 if (ppc_md.mce_check_early_recovery(regs))
577 static long mce_handle_error(struct pt_regs *regs,
578 const struct mce_derror_table dtable[],
579 const struct mce_ierror_table itable[])
581 struct mce_error_info mce_err = { 0 };
582 uint64_t addr, phys_addr = ULONG_MAX;
583 uint64_t srr1 = regs->msr;
586 if (SRR1_MC_LOADSTORE(srr1))
587 handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
590 handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
593 if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
594 handled = mce_handle_ue_error(regs);
596 save_mce_event(regs, handled, &mce_err, regs->nip, addr, phys_addr);
601 long __machine_check_early_realmode_p7(struct pt_regs *regs)
603 /* P7 DD1 leaves top bits of DSISR undefined */
604 regs->dsisr &= 0x0000ffff;
606 return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
609 long __machine_check_early_realmode_p8(struct pt_regs *regs)
611 return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
614 long __machine_check_early_realmode_p9(struct pt_regs *regs)
617 * On POWER9 DD2.1 and below, it's possible to get a machine check
618 * caused by a paste instruction where only DSISR bit 25 is set. This
619 * will result in the MCE handler seeing an unknown event and the kernel
620 * crashing. An MCE that occurs like this is spurious, so we don't need
621 * to do anything in terms of servicing it. If there is something that
622 * needs to be serviced, the CPU will raise the MCE again with the
623 * correct DSISR so that it can be serviced properly. So detect this
624 * case and mark it as handled.
626 if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
629 return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);