1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *pci_ctl;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 static struct ecc_settings **ecc_stngs;
21 /* Device for the PCI component */
22 static struct device *pci_ctl_dev;
25 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 *FIXME: Produce a better mapping/linearisation.
31 static const struct scrubrate {
32 u32 scrubval; /* bit pattern for scrub rate */
33 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
35 { 0x01, 1600000000UL},
57 { 0x00, 0UL}, /* scrubbing off */
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 u32 *val, const char *func)
65 err = pci_read_config_dword(pdev, offset, val);
67 amd64_warn("%s: error reading F%dx%03x.\n",
68 func, PCI_FUNC(pdev->devfn), offset);
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 u32 val, const char *func)
78 err = pci_write_config_dword(pdev, offset, val);
80 amd64_warn("%s: error writing to F%dx%03x.\n",
81 func, PCI_FUNC(pdev->devfn), offset);
87 * Select DCT to which PCI cfg accesses are routed
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
94 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
101 * Depending on the family, F2 DCT reads need special handling:
103 * K8: has a single DCT only and no address offsets >= 0x100
105 * F10h: each DCT has its own set of regs
109 * F16h: has only 1 DCT
111 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 int offset, u32 *val)
118 if (dct || offset >= 0x100)
125 * Note: If ganging is enabled, barring the regs
126 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 * return 0. (cf. Section 2.8.1 F10h BKDG)
129 if (dct_ganging_enabled(pvt))
138 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 * We should select which DCT we access using F1x10C[DctCfgSel]
141 dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 f15h_select_dct(pvt, dct);
153 return amd64_read_pci_cfg(pvt->F2, offset, val);
157 * Memory scrubber control interface. For K8, memory scrubbing is handled by
158 * hardware and can involve L2 cache, dcache as well as the main memory. With
159 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
162 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164 * bytes/sec for the setting.
166 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167 * other archs, we might not have access to the caches directly.
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
173 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 * as 0x0, scrubval 0x6 as 0x1, etc.
177 if (scrubval >= 0x5 && scrubval <= 0x14) {
179 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
186 * Scan the scrub rate mapping table for a close or matching bandwidth value to
187 * issue. If requested is too big, then use last maximum value found.
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
195 * map the configured rate (new_bw) to a value specific to the AMD64
196 * memory controller and apply to register. Search for the first
197 * bandwidth entry that is greater or equal than the setting requested
198 * and program that. If at last entry, turn off DRAM scrubbing.
200 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 * by falling back to the last element in scrubrates[].
203 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
205 * skip scrub rates which aren't recommended
206 * (see F10 BKDG, F3x58)
208 if (scrubrates[i].scrubval < min_rate)
211 if (scrubrates[i].bandwidth <= new_bw)
215 scrubval = scrubrates[i].scrubval;
217 if (pvt->fam == 0x17) {
218 __f17h_set_scrubval(pvt, scrubval);
219 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 f15h_select_dct(pvt, 0);
221 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 f15h_select_dct(pvt, 1);
223 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
229 return scrubrates[i].bandwidth;
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
236 struct amd64_pvt *pvt = mci->pvt_info;
237 u32 min_scrubrate = 0x5;
242 if (pvt->fam == 0x15) {
244 if (pvt->model < 0x10)
245 f15h_select_dct(pvt, 0);
247 if (pvt->model == 0x60)
250 return __set_scrub_rate(pvt, bw, min_scrubrate);
253 static int get_scrub_rate(struct mem_ctl_info *mci)
255 struct amd64_pvt *pvt = mci->pvt_info;
256 int i, retval = -EINVAL;
262 if (pvt->model < 0x10)
263 f15h_select_dct(pvt, 0);
265 if (pvt->model == 0x60)
266 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
268 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
272 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
273 if (scrubval & BIT(0)) {
274 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
283 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
287 scrubval = scrubval & 0x001F;
289 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
290 if (scrubrates[i].scrubval == scrubval) {
291 retval = scrubrates[i].bandwidth;
299 * returns true if the SysAddr given by sys_addr matches the
300 * DRAM base/limit associated with node_id
302 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
306 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
307 * all ones if the most significant implemented address bit is 1.
308 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
309 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
310 * Application Programming.
312 addr = sys_addr & 0x000000ffffffffffull;
314 return ((addr >= get_dram_base(pvt, nid)) &&
315 (addr <= get_dram_limit(pvt, nid)));
319 * Attempt to map a SysAddr to a node. On success, return a pointer to the
320 * mem_ctl_info structure for the node that the SysAddr maps to.
322 * On failure, return NULL.
324 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
327 struct amd64_pvt *pvt;
332 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
333 * 3.4.4.2) registers to map the SysAddr to a node ID.
338 * The value of this field should be the same for all DRAM Base
339 * registers. Therefore we arbitrarily choose to read it from the
340 * register for node 0.
342 intlv_en = dram_intlv_en(pvt, 0);
345 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
346 if (base_limit_match(pvt, sys_addr, node_id))
352 if (unlikely((intlv_en != 0x01) &&
353 (intlv_en != 0x03) &&
354 (intlv_en != 0x07))) {
355 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
359 bits = (((u32) sys_addr) >> 12) & intlv_en;
361 for (node_id = 0; ; ) {
362 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
363 break; /* intlv_sel field matches */
365 if (++node_id >= DRAM_RANGES)
369 /* sanity test for sys_addr */
370 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
371 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
372 "range for node %d with node interleaving enabled.\n",
373 __func__, sys_addr, node_id);
378 return edac_mc_find((int)node_id);
381 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
382 (unsigned long)sys_addr);
388 * compute the CS base address of the @csrow on the DRAM controller @dct.
389 * For details see F2x[5C:40] in the processor's BKDG
391 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
392 u64 *base, u64 *mask)
394 u64 csbase, csmask, base_bits, mask_bits;
397 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
398 csbase = pvt->csels[dct].csbases[csrow];
399 csmask = pvt->csels[dct].csmasks[csrow];
400 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
401 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
405 * F16h and F15h, models 30h and later need two addr_shift values:
406 * 8 for high and 6 for low (cf. F16h BKDG).
408 } else if (pvt->fam == 0x16 ||
409 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
410 csbase = pvt->csels[dct].csbases[csrow];
411 csmask = pvt->csels[dct].csmasks[csrow >> 1];
413 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
414 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
417 /* poke holes for the csmask */
418 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
419 (GENMASK_ULL(30, 19) << 8));
421 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
422 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
426 csbase = pvt->csels[dct].csbases[csrow];
427 csmask = pvt->csels[dct].csmasks[csrow >> 1];
430 if (pvt->fam == 0x15)
431 base_bits = mask_bits =
432 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
434 base_bits = mask_bits =
435 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
438 *base = (csbase & base_bits) << addr_shift;
441 /* poke holes for the csmask */
442 *mask &= ~(mask_bits << addr_shift);
444 *mask |= (csmask & mask_bits) << addr_shift;
447 #define for_each_chip_select(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
450 #define chip_select_base(i, dct, pvt) \
451 pvt->csels[dct].csbases[i]
453 #define for_each_chip_select_mask(i, dct, pvt) \
454 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
457 * @input_addr is an InputAddr associated with the node given by mci. Return the
458 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
460 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
462 struct amd64_pvt *pvt;
468 for_each_chip_select(csrow, 0, pvt) {
469 if (!csrow_enabled(csrow, 0, pvt))
472 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
476 if ((input_addr & mask) == (base & mask)) {
477 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
478 (unsigned long)input_addr, csrow,
484 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
485 (unsigned long)input_addr, pvt->mc_node_id);
491 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
492 * for the node represented by mci. Info is passed back in *hole_base,
493 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
494 * info is invalid. Info may be invalid for either of the following reasons:
496 * - The revision of the node is not E or greater. In this case, the DRAM Hole
497 * Address Register does not exist.
499 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
500 * indicating that its contents are not valid.
502 * The values passed back in *hole_base, *hole_offset, and *hole_size are
503 * complete 32-bit values despite the fact that the bitfields in the DHAR
504 * only represent bits 31-24 of the base and offset values.
506 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
507 u64 *hole_offset, u64 *hole_size)
509 struct amd64_pvt *pvt = mci->pvt_info;
511 /* only revE and later have the DRAM Hole Address Register */
512 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
513 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
514 pvt->ext_model, pvt->mc_node_id);
518 /* valid for Fam10h and above */
519 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
520 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
524 if (!dhar_valid(pvt)) {
525 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
530 /* This node has Memory Hoisting */
532 /* +------------------+--------------------+--------------------+-----
533 * | memory | DRAM hole | relocated |
534 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
536 * | | | [0x100000000, |
537 * | | | (0x100000000+ |
538 * | | | (0xffffffff-x))] |
539 * +------------------+--------------------+--------------------+-----
541 * Above is a diagram of physical memory showing the DRAM hole and the
542 * relocated addresses from the DRAM hole. As shown, the DRAM hole
543 * starts at address x (the base address) and extends through address
544 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
545 * addresses in the hole so that they start at 0x100000000.
548 *hole_base = dhar_base(pvt);
549 *hole_size = (1ULL << 32) - *hole_base;
551 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
552 : k8_dhar_offset(pvt);
554 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
555 pvt->mc_node_id, (unsigned long)*hole_base,
556 (unsigned long)*hole_offset, (unsigned long)*hole_size);
560 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
563 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
564 * assumed that sys_addr maps to the node given by mci.
566 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
567 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
568 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
569 * then it is also involved in translating a SysAddr to a DramAddr. Sections
570 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
571 * These parts of the documentation are unclear. I interpret them as follows:
573 * When node n receives a SysAddr, it processes the SysAddr as follows:
575 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
576 * Limit registers for node n. If the SysAddr is not within the range
577 * specified by the base and limit values, then node n ignores the Sysaddr
578 * (since it does not map to node n). Otherwise continue to step 2 below.
580 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
581 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
582 * the range of relocated addresses (starting at 0x100000000) from the DRAM
583 * hole. If not, skip to step 3 below. Else get the value of the
584 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
585 * offset defined by this value from the SysAddr.
587 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
588 * Base register for node n. To obtain the DramAddr, subtract the base
589 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
591 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
593 struct amd64_pvt *pvt = mci->pvt_info;
594 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
597 dram_base = get_dram_base(pvt, pvt->mc_node_id);
599 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
602 if ((sys_addr >= (1ULL << 32)) &&
603 (sys_addr < ((1ULL << 32) + hole_size))) {
604 /* use DHAR to translate SysAddr to DramAddr */
605 dram_addr = sys_addr - hole_offset;
607 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
608 (unsigned long)sys_addr,
609 (unsigned long)dram_addr);
616 * Translate the SysAddr to a DramAddr as shown near the start of
617 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
618 * only deals with 40-bit values. Therefore we discard bits 63-40 of
619 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
620 * discard are all 1s. Otherwise the bits we discard are all 0s. See
621 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
622 * Programmer's Manual Volume 1 Application Programming.
624 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
626 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
627 (unsigned long)sys_addr, (unsigned long)dram_addr);
632 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
633 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
634 * for node interleaving.
636 static int num_node_interleave_bits(unsigned intlv_en)
638 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
641 BUG_ON(intlv_en > 7);
642 n = intlv_shift_table[intlv_en];
646 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
647 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
649 struct amd64_pvt *pvt;
656 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
657 * concerning translating a DramAddr to an InputAddr.
659 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
660 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
663 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
664 intlv_shift, (unsigned long)dram_addr,
665 (unsigned long)input_addr);
671 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
672 * assumed that @sys_addr maps to the node given by mci.
674 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
679 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
681 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
682 (unsigned long)sys_addr, (unsigned long)input_addr);
687 /* Map the Error address to a PAGE and PAGE OFFSET. */
688 static inline void error_address_to_page_and_offset(u64 error_address,
689 struct err_info *err)
691 err->page = (u32) (error_address >> PAGE_SHIFT);
692 err->offset = ((u32) error_address) & ~PAGE_MASK;
696 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
697 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
698 * of a node that detected an ECC memory error. mci represents the node that
699 * the error address maps to (possibly different from the node that detected
700 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
703 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
707 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
710 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
711 "address 0x%lx\n", (unsigned long)sys_addr);
715 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
718 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
721 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
723 unsigned long edac_cap = EDAC_FLAG_NONE;
727 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
729 for (i = 0; i < NUM_UMCS; i++) {
730 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
733 umc_en_mask |= BIT(i);
735 /* UMC Configuration bit 12 (DimmEccEn) */
736 if (pvt->umc[i].umc_cfg & BIT(12))
737 dimm_ecc_en_mask |= BIT(i);
740 if (umc_en_mask == dimm_ecc_en_mask)
741 edac_cap = EDAC_FLAG_SECDED;
743 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
747 if (pvt->dclr0 & BIT(bit))
748 edac_cap = EDAC_FLAG_SECDED;
754 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
756 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
758 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
760 if (pvt->dram_type == MEM_LRDDR3) {
761 u32 dcsm = pvt->csels[chan].csmasks[0];
763 * It's assumed all LRDIMMs in a DCT are going to be of
764 * same 'type' until proven otherwise. So, use a cs
765 * value of '0' here to get dcsm value.
767 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
770 edac_dbg(1, "All DIMMs support ECC:%s\n",
771 (dclr & BIT(19)) ? "yes" : "no");
774 edac_dbg(1, " PAR/ERR parity: %s\n",
775 (dclr & BIT(8)) ? "enabled" : "disabled");
777 if (pvt->fam == 0x10)
778 edac_dbg(1, " DCT 128bit mode width: %s\n",
779 (dclr & BIT(11)) ? "128b" : "64b");
781 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
782 (dclr & BIT(12)) ? "yes" : "no",
783 (dclr & BIT(13)) ? "yes" : "no",
784 (dclr & BIT(14)) ? "yes" : "no",
785 (dclr & BIT(15)) ? "yes" : "no");
788 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
790 int dimm, size0, size1, cs0, cs1;
792 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
794 for (dimm = 0; dimm < 4; dimm++) {
798 if (csrow_enabled(cs0, ctrl, pvt))
799 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
804 if (csrow_enabled(cs1, ctrl, pvt))
805 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
807 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
813 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
815 struct amd64_umc *umc;
816 u32 i, tmp, umc_base;
818 for (i = 0; i < NUM_UMCS; i++) {
819 umc_base = get_umc_base(i);
822 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
823 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
824 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
825 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
827 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
828 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
830 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
831 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
832 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
834 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
835 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
836 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
837 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
838 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
839 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
840 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
841 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
842 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
844 if (pvt->dram_type == MEM_LRDDR4) {
845 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
846 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
847 i, 1 << ((tmp >> 4) & 0x3));
850 debug_display_dimm_sizes_df(pvt, i);
853 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
854 pvt->dhar, dhar_base(pvt));
857 /* Display and decode various NB registers for debug purposes. */
858 static void __dump_misc_regs(struct amd64_pvt *pvt)
860 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
862 edac_dbg(1, " NB two channel DRAM capable: %s\n",
863 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
865 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
866 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
867 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
869 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
871 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
873 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
874 pvt->dhar, dhar_base(pvt),
875 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
876 : f10_dhar_offset(pvt));
878 debug_display_dimm_sizes(pvt, 0);
880 /* everything below this point is Fam10h and above */
884 debug_display_dimm_sizes(pvt, 1);
886 /* Only if NOT ganged does dclr1 have valid info */
887 if (!dct_ganging_enabled(pvt))
888 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
891 /* Display and decode various NB registers for debug purposes. */
892 static void dump_misc_regs(struct amd64_pvt *pvt)
895 __dump_misc_regs_df(pvt);
897 __dump_misc_regs(pvt);
899 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
901 amd64_info("using %s syndromes.\n",
902 ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
906 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
908 static void prep_chip_selects(struct amd64_pvt *pvt)
910 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
911 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
912 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
913 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
914 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
915 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
917 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
918 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
923 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
925 static void read_dct_base_mask(struct amd64_pvt *pvt)
927 int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
929 prep_chip_selects(pvt);
932 base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
933 base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
934 mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
935 mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
943 for_each_chip_select(cs, 0, pvt) {
944 int reg0 = base_reg0 + (cs * 4);
945 int reg1 = base_reg1 + (cs * 4);
946 u32 *base0 = &pvt->csels[0].csbases[cs];
947 u32 *base1 = &pvt->csels[1].csbases[cs];
950 if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
951 edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
954 if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
955 edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
958 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
959 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
965 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
966 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
967 cs, *base1, (pvt->fam == 0x10) ? reg1
972 for_each_chip_select_mask(cs, 0, pvt) {
973 int reg0 = mask_reg0 + (cs * 4);
974 int reg1 = mask_reg1 + (cs * 4);
975 u32 *mask0 = &pvt->csels[0].csmasks[cs];
976 u32 *mask1 = &pvt->csels[1].csmasks[cs];
979 if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
980 edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
983 if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
984 edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
987 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
988 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
994 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
995 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
996 cs, *mask1, (pvt->fam == 0x10) ? reg1
1002 static void determine_memory_type(struct amd64_pvt *pvt)
1004 u32 dram_ctrl, dcsm;
1008 if (pvt->ext_model >= K8_REV_F)
1011 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1015 if (pvt->dchr0 & DDR3_MODE)
1018 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1022 if (pvt->model < 0x60)
1026 * Model 0x60h needs special handling:
1028 * We use a Chip Select value of '0' to obtain dcsm.
1029 * Theoretically, it is possible to populate LRDIMMs of different
1030 * 'Rank' value on a DCT. But this is not the common case. So,
1031 * it's reasonable to assume all DIMMs are going to be of same
1032 * 'type' until proven otherwise.
1034 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1035 dcsm = pvt->csels[0].csmasks[0];
1037 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1038 pvt->dram_type = MEM_DDR4;
1039 else if (pvt->dclr0 & BIT(16))
1040 pvt->dram_type = MEM_DDR3;
1041 else if (dcsm & 0x3)
1042 pvt->dram_type = MEM_LRDDR3;
1044 pvt->dram_type = MEM_RDDR3;
1052 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1053 pvt->dram_type = MEM_LRDDR4;
1054 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1055 pvt->dram_type = MEM_RDDR4;
1057 pvt->dram_type = MEM_DDR4;
1061 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1062 pvt->dram_type = MEM_EMPTY;
1067 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1070 /* Get the number of DCT channels the memory controller is using. */
1071 static int k8_early_channel_count(struct amd64_pvt *pvt)
1075 if (pvt->ext_model >= K8_REV_F)
1076 /* RevF (NPT) and later */
1077 flag = pvt->dclr0 & WIDTH_128;
1079 /* RevE and earlier */
1080 flag = pvt->dclr0 & REVE_WIDTH_128;
1085 return (flag) ? 2 : 1;
1088 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1089 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1091 u16 mce_nid = amd_get_nb_id(m->extcpu);
1092 struct mem_ctl_info *mci;
1097 mci = edac_mc_find(mce_nid);
1101 pvt = mci->pvt_info;
1103 if (pvt->fam == 0xf) {
1108 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1111 * Erratum 637 workaround
1113 if (pvt->fam == 0x15) {
1114 u64 cc6_base, tmp_addr;
1118 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1122 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1123 intlv_en = tmp >> 21 & 0x7;
1125 /* add [47:27] + 3 trailing bits */
1126 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1128 /* reverse and add DramIntlvEn */
1129 cc6_base |= intlv_en ^ 0x7;
1131 /* pin at [47:24] */
1135 return cc6_base | (addr & GENMASK_ULL(23, 0));
1137 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1140 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1142 /* OR DramIntlvSel into bits [14:12] */
1143 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1145 /* add remaining [11:0] bits from original MC4_ADDR */
1146 tmp_addr |= addr & GENMASK_ULL(11, 0);
1148 return cc6_base | tmp_addr;
1154 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1155 unsigned int device,
1156 struct pci_dev *related)
1158 struct pci_dev *dev = NULL;
1160 while ((dev = pci_get_device(vendor, device, dev))) {
1161 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1162 (dev->bus->number == related->bus->number) &&
1163 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1170 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1172 struct amd_northbridge *nb;
1173 struct pci_dev *f1 = NULL;
1174 unsigned int pci_func;
1175 int off = range << 3;
1178 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1179 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1181 if (pvt->fam == 0xf)
1184 if (!dram_rw(pvt, range))
1187 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1188 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1190 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1191 if (pvt->fam != 0x15)
1194 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1198 if (pvt->model == 0x60)
1199 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1200 else if (pvt->model == 0x30)
1201 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1203 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1205 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1209 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1211 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1213 /* {[39:27],111b} */
1214 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1216 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1219 pvt->ranges[range].lim.hi |= llim >> 13;
1224 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1225 struct err_info *err)
1227 struct amd64_pvt *pvt = mci->pvt_info;
1229 error_address_to_page_and_offset(sys_addr, err);
1232 * Find out which node the error address belongs to. This may be
1233 * different from the node that detected the error.
1235 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1236 if (!err->src_mci) {
1237 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1238 (unsigned long)sys_addr);
1239 err->err_code = ERR_NODE;
1243 /* Now map the sys_addr to a CSROW */
1244 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1245 if (err->csrow < 0) {
1246 err->err_code = ERR_CSROW;
1250 /* CHIPKILL enabled */
1251 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1252 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1253 if (err->channel < 0) {
1255 * Syndrome didn't map, so we don't know which of the
1256 * 2 DIMMs is in error. So we need to ID 'both' of them
1259 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1260 "possible error reporting race\n",
1262 err->err_code = ERR_CHANNEL;
1267 * non-chipkill ecc mode
1269 * The k8 documentation is unclear about how to determine the
1270 * channel number when using non-chipkill memory. This method
1271 * was obtained from email communication with someone at AMD.
1272 * (Wish the email was placed in this comment - norsk)
1274 err->channel = ((sys_addr & BIT(3)) != 0);
1278 static int ddr2_cs_size(unsigned i, bool dct_width)
1284 else if (!(i & 0x1))
1287 shift = (i + 1) >> 1;
1289 return 128 << (shift + !!dct_width);
1292 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1293 unsigned cs_mode, int cs_mask_nr)
1295 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1297 if (pvt->ext_model >= K8_REV_F) {
1298 WARN_ON(cs_mode > 11);
1299 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1301 else if (pvt->ext_model >= K8_REV_D) {
1303 WARN_ON(cs_mode > 10);
1306 * the below calculation, besides trying to win an obfuscated C
1307 * contest, maps cs_mode values to DIMM chip select sizes. The
1310 * cs_mode CS size (mb)
1311 * ======= ============
1324 * Basically, it calculates a value with which to shift the
1325 * smallest CS size of 32MB.
1327 * ddr[23]_cs_size have a similar purpose.
1329 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1331 return 32 << (cs_mode - diff);
1334 WARN_ON(cs_mode > 6);
1335 return 32 << cs_mode;
1340 * Get the number of DCT channels in use.
1343 * number of Memory Channels in operation
1345 * contents of the DCL0_LOW register
1347 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1349 int i, j, channels = 0;
1351 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1352 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1356 * Need to check if in unganged mode: In such, there are 2 channels,
1357 * but they are not in 128 bit mode and thus the above 'dclr0' status
1360 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1361 * their CSEnable bit on. If so, then SINGLE DIMM case.
1363 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1366 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1367 * is more than just one DIMM present in unganged mode. Need to check
1368 * both controllers since DIMMs can be placed in either one.
1370 for (i = 0; i < 2; i++) {
1371 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1373 for (j = 0; j < 4; j++) {
1374 if (DBAM_DIMM(j, dbam) > 0) {
1384 amd64_info("MCT channel count: %d\n", channels);
1389 static int f17_early_channel_count(struct amd64_pvt *pvt)
1391 int i, channels = 0;
1393 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1394 for (i = 0; i < NUM_UMCS; i++)
1395 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1397 amd64_info("MCT channel count: %d\n", channels);
1402 static int ddr3_cs_size(unsigned i, bool dct_width)
1407 if (i == 0 || i == 3 || i == 4)
1413 else if (!(i & 0x1))
1416 shift = (i + 1) >> 1;
1419 cs_size = (128 * (1 << !!dct_width)) << shift;
1424 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1429 if (i < 4 || i == 6)
1433 else if (!(i & 0x1))
1436 shift = (i + 1) >> 1;
1439 cs_size = rank_multiply * (128 << shift);
1444 static int ddr4_cs_size(unsigned i)
1453 /* Min cs_size = 1G */
1454 cs_size = 1024 * (1 << (i >> 1));
1459 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1460 unsigned cs_mode, int cs_mask_nr)
1462 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1464 WARN_ON(cs_mode > 11);
1466 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1467 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1469 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1473 * F15h supports only 64bit DCT interfaces
1475 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1476 unsigned cs_mode, int cs_mask_nr)
1478 WARN_ON(cs_mode > 12);
1480 return ddr3_cs_size(cs_mode, false);
1483 /* F15h M60h supports DDR4 mapping as well.. */
1484 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1485 unsigned cs_mode, int cs_mask_nr)
1488 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1490 WARN_ON(cs_mode > 12);
1492 if (pvt->dram_type == MEM_DDR4) {
1496 cs_size = ddr4_cs_size(cs_mode);
1497 } else if (pvt->dram_type == MEM_LRDDR3) {
1498 unsigned rank_multiply = dcsm & 0xf;
1500 if (rank_multiply == 3)
1502 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1504 /* Minimum cs size is 512mb for F15hM60h*/
1508 cs_size = ddr3_cs_size(cs_mode, false);
1515 * F16h and F15h model 30h have only limited cs_modes.
1517 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1518 unsigned cs_mode, int cs_mask_nr)
1520 WARN_ON(cs_mode > 12);
1522 if (cs_mode == 6 || cs_mode == 8 ||
1523 cs_mode == 9 || cs_mode == 12)
1526 return ddr3_cs_size(cs_mode, false);
1529 static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1530 unsigned int cs_mode, int csrow_nr)
1532 u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
1534 /* Each mask is used for every two base addresses. */
1535 u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
1537 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1538 u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
1540 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
1542 /* Return size in MBs. */
1546 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1549 if (pvt->fam == 0xf)
1552 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1553 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1554 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1556 edac_dbg(0, " DCTs operate in %s mode\n",
1557 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1559 if (!dct_ganging_enabled(pvt))
1560 edac_dbg(0, " Address range split per DCT: %s\n",
1561 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1563 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1564 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1565 (dct_memory_cleared(pvt) ? "yes" : "no"));
1567 edac_dbg(0, " channel interleave: %s, "
1568 "interleave bits selector: 0x%x\n",
1569 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1570 dct_sel_interleave_addr(pvt));
1573 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1577 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1578 * 2.10.12 Memory Interleaving Modes).
1580 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1581 u8 intlv_en, int num_dcts_intlv,
1588 return (u8)(dct_sel);
1590 if (num_dcts_intlv == 2) {
1591 select = (sys_addr >> 8) & 0x3;
1592 channel = select ? 0x3 : 0;
1593 } else if (num_dcts_intlv == 4) {
1594 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1595 switch (intlv_addr) {
1597 channel = (sys_addr >> 8) & 0x3;
1600 channel = (sys_addr >> 9) & 0x3;
1608 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1609 * Interleaving Modes.
1611 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1612 bool hi_range_sel, u8 intlv_en)
1614 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1616 if (dct_ganging_enabled(pvt))
1620 return dct_sel_high;
1623 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1625 if (dct_interleave_enabled(pvt)) {
1626 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1628 /* return DCT select function: 0=DCT0, 1=DCT1 */
1630 return sys_addr >> 6 & 1;
1632 if (intlv_addr & 0x2) {
1633 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1634 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1636 return ((sys_addr >> shift) & 1) ^ temp;
1639 if (intlv_addr & 0x4) {
1640 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1642 return (sys_addr >> shift) & 1;
1645 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1648 if (dct_high_range_enabled(pvt))
1649 return ~dct_sel_high & 1;
1654 /* Convert the sys_addr to the normalized DCT address */
1655 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1656 u64 sys_addr, bool hi_rng,
1657 u32 dct_sel_base_addr)
1660 u64 dram_base = get_dram_base(pvt, range);
1661 u64 hole_off = f10_dhar_offset(pvt);
1662 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1667 * base address of high range is below 4Gb
1668 * (bits [47:27] at [31:11])
1669 * DRAM address space on this DCT is hoisted above 4Gb &&
1672 * remove hole offset from sys_addr
1674 * remove high range offset from sys_addr
1676 if ((!(dct_sel_base_addr >> 16) ||
1677 dct_sel_base_addr < dhar_base(pvt)) &&
1679 (sys_addr >= BIT_64(32)))
1680 chan_off = hole_off;
1682 chan_off = dct_sel_base_off;
1686 * we have a valid hole &&
1691 * remove dram base to normalize to DCT address
1693 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1694 chan_off = hole_off;
1696 chan_off = dram_base;
1699 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1703 * checks if the csrow passed in is marked as SPARED, if so returns the new
1706 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1710 if (online_spare_swap_done(pvt, dct) &&
1711 csrow == online_spare_bad_dramcs(pvt, dct)) {
1713 for_each_chip_select(tmp_cs, dct, pvt) {
1714 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1724 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1725 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1728 * -EINVAL: NOT FOUND
1729 * 0..csrow = Chip-Select Row
1731 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1733 struct mem_ctl_info *mci;
1734 struct amd64_pvt *pvt;
1735 u64 cs_base, cs_mask;
1736 int cs_found = -EINVAL;
1739 mci = edac_mc_find(nid);
1743 pvt = mci->pvt_info;
1745 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1747 for_each_chip_select(csrow, dct, pvt) {
1748 if (!csrow_enabled(csrow, dct, pvt))
1751 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1753 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1754 csrow, cs_base, cs_mask);
1758 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1759 (in_addr & cs_mask), (cs_base & cs_mask));
1761 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1762 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1766 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1768 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1776 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1777 * swapped with a region located at the bottom of memory so that the GPU can use
1778 * the interleaved region and thus two channels.
1780 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1782 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1784 if (pvt->fam == 0x10) {
1785 /* only revC3 and revE have that feature */
1786 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1790 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1792 if (!(swap_reg & 0x1))
1795 swap_base = (swap_reg >> 3) & 0x7f;
1796 swap_limit = (swap_reg >> 11) & 0x7f;
1797 rgn_size = (swap_reg >> 20) & 0x7f;
1798 tmp_addr = sys_addr >> 27;
1800 if (!(sys_addr >> 34) &&
1801 (((tmp_addr >= swap_base) &&
1802 (tmp_addr <= swap_limit)) ||
1803 (tmp_addr < rgn_size)))
1804 return sys_addr ^ (u64)swap_base << 27;
1809 /* For a given @dram_range, check if @sys_addr falls within it. */
1810 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1811 u64 sys_addr, int *chan_sel)
1813 int cs_found = -EINVAL;
1817 bool high_range = false;
1819 u8 node_id = dram_dst_node(pvt, range);
1820 u8 intlv_en = dram_intlv_en(pvt, range);
1821 u32 intlv_sel = dram_intlv_sel(pvt, range);
1823 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1824 range, sys_addr, get_dram_limit(pvt, range));
1826 if (dhar_valid(pvt) &&
1827 dhar_base(pvt) <= sys_addr &&
1828 sys_addr < BIT_64(32)) {
1829 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1834 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1837 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1839 dct_sel_base = dct_sel_baseaddr(pvt);
1842 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1843 * select between DCT0 and DCT1.
1845 if (dct_high_range_enabled(pvt) &&
1846 !dct_ganging_enabled(pvt) &&
1847 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1850 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1852 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1853 high_range, dct_sel_base);
1855 /* Remove node interleaving, see F1x120 */
1857 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1858 (chan_addr & 0xfff);
1860 /* remove channel interleave */
1861 if (dct_interleave_enabled(pvt) &&
1862 !dct_high_range_enabled(pvt) &&
1863 !dct_ganging_enabled(pvt)) {
1865 if (dct_sel_interleave_addr(pvt) != 1) {
1866 if (dct_sel_interleave_addr(pvt) == 0x3)
1868 chan_addr = ((chan_addr >> 10) << 9) |
1869 (chan_addr & 0x1ff);
1871 /* A[6] or hash 6 */
1872 chan_addr = ((chan_addr >> 7) << 6) |
1876 chan_addr = ((chan_addr >> 13) << 12) |
1877 (chan_addr & 0xfff);
1880 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1882 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1885 *chan_sel = channel;
1890 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1891 u64 sys_addr, int *chan_sel)
1893 int cs_found = -EINVAL;
1894 int num_dcts_intlv = 0;
1895 u64 chan_addr, chan_offset;
1896 u64 dct_base, dct_limit;
1897 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1898 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1900 u64 dhar_offset = f10_dhar_offset(pvt);
1901 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1902 u8 node_id = dram_dst_node(pvt, range);
1903 u8 intlv_en = dram_intlv_en(pvt, range);
1905 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1906 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1908 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1909 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1911 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1912 range, sys_addr, get_dram_limit(pvt, range));
1914 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1915 !(get_dram_limit(pvt, range) >= sys_addr))
1918 if (dhar_valid(pvt) &&
1919 dhar_base(pvt) <= sys_addr &&
1920 sys_addr < BIT_64(32)) {
1921 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1926 /* Verify sys_addr is within DCT Range. */
1927 dct_base = (u64) dct_sel_baseaddr(pvt);
1928 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1930 if (!(dct_cont_base_reg & BIT(0)) &&
1931 !(dct_base <= (sys_addr >> 27) &&
1932 dct_limit >= (sys_addr >> 27)))
1935 /* Verify number of dct's that participate in channel interleaving. */
1936 num_dcts_intlv = (int) hweight8(intlv_en);
1938 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1941 if (pvt->model >= 0x60)
1942 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
1944 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1945 num_dcts_intlv, dct_sel);
1947 /* Verify we stay within the MAX number of channels allowed */
1951 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1953 /* Get normalized DCT addr */
1954 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1955 chan_offset = dhar_offset;
1957 chan_offset = dct_base << 27;
1959 chan_addr = sys_addr - chan_offset;
1961 /* remove channel interleave */
1962 if (num_dcts_intlv == 2) {
1963 if (intlv_addr == 0x4)
1964 chan_addr = ((chan_addr >> 9) << 8) |
1966 else if (intlv_addr == 0x5)
1967 chan_addr = ((chan_addr >> 10) << 9) |
1968 (chan_addr & 0x1ff);
1972 } else if (num_dcts_intlv == 4) {
1973 if (intlv_addr == 0x4)
1974 chan_addr = ((chan_addr >> 10) << 8) |
1976 else if (intlv_addr == 0x5)
1977 chan_addr = ((chan_addr >> 11) << 9) |
1978 (chan_addr & 0x1ff);
1983 if (dct_offset_en) {
1984 amd64_read_pci_cfg(pvt->F1,
1985 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1987 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
1990 f15h_select_dct(pvt, channel);
1992 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1996 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1997 * there is support for 4 DCT's, but only 2 are currently functional.
1998 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1999 * pvt->csels[1]. So we need to use '1' here to get correct info.
2000 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2002 alias_channel = (channel == 3) ? 1 : channel;
2004 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2007 *chan_sel = alias_channel;
2012 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2016 int cs_found = -EINVAL;
2019 for (range = 0; range < DRAM_RANGES; range++) {
2020 if (!dram_rw(pvt, range))
2023 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2024 cs_found = f15_m30h_match_to_this_node(pvt, range,
2028 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2029 (get_dram_limit(pvt, range) >= sys_addr)) {
2030 cs_found = f1x_match_to_this_node(pvt, range,
2031 sys_addr, chan_sel);
2040 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2041 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2043 * The @sys_addr is usually an error address received from the hardware
2046 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2047 struct err_info *err)
2049 struct amd64_pvt *pvt = mci->pvt_info;
2051 error_address_to_page_and_offset(sys_addr, err);
2053 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2054 if (err->csrow < 0) {
2055 err->err_code = ERR_CSROW;
2060 * We need the syndromes for channel detection only when we're
2061 * ganged. Otherwise @chan should already contain the channel at
2064 if (dct_ganging_enabled(pvt))
2065 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2069 * debug routine to display the memory sizes of all logical DIMMs and its
2072 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2074 int dimm, size0, size1;
2075 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2076 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2078 if (pvt->fam == 0xf) {
2079 /* K8 families < revF not supported yet */
2080 if (pvt->ext_model < K8_REV_F)
2086 if (pvt->fam == 0x10) {
2087 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2089 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2090 pvt->csels[1].csbases :
2091 pvt->csels[0].csbases;
2094 dcsb = pvt->csels[1].csbases;
2096 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2099 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2101 /* Dump memory sizes for DIMM and its CSROWs */
2102 for (dimm = 0; dimm < 4; dimm++) {
2105 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2107 * For F15m60h, we need multiplier for LRDIMM cs_size
2108 * calculation. We pass dimm value to the dbam_to_cs
2109 * mapper so we can find the multiplier from the
2110 * corresponding DCSM.
2112 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2113 DBAM_DIMM(dimm, dbam),
2117 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2118 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2119 DBAM_DIMM(dimm, dbam),
2122 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2124 dimm * 2 + 1, size1);
2128 static struct amd64_family_type family_types[] = {
2131 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2132 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2134 .early_channel_count = k8_early_channel_count,
2135 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2136 .dbam_to_cs = k8_dbam_to_chip_select,
2141 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2142 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2144 .early_channel_count = f1x_early_channel_count,
2145 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2146 .dbam_to_cs = f10_dbam_to_chip_select,
2151 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2152 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2154 .early_channel_count = f1x_early_channel_count,
2155 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2156 .dbam_to_cs = f15_dbam_to_chip_select,
2160 .ctl_name = "F15h_M30h",
2161 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2162 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2164 .early_channel_count = f1x_early_channel_count,
2165 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2166 .dbam_to_cs = f16_dbam_to_chip_select,
2170 .ctl_name = "F15h_M60h",
2171 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2172 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2174 .early_channel_count = f1x_early_channel_count,
2175 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2176 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2181 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2182 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2184 .early_channel_count = f1x_early_channel_count,
2185 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2186 .dbam_to_cs = f16_dbam_to_chip_select,
2190 .ctl_name = "F16h_M30h",
2191 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2192 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2194 .early_channel_count = f1x_early_channel_count,
2195 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2196 .dbam_to_cs = f16_dbam_to_chip_select,
2201 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2202 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2204 .early_channel_count = f17_early_channel_count,
2205 .dbam_to_cs = f17_base_addr_to_cs_size,
2209 .ctl_name = "F17h_M10h",
2210 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2211 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2213 .early_channel_count = f17_early_channel_count,
2214 .dbam_to_cs = f17_base_addr_to_cs_size,
2220 * These are tables of eigenvectors (one per line) which can be used for the
2221 * construction of the syndrome tables. The modified syndrome search algorithm
2222 * uses those to find the symbol in error and thus the DIMM.
2224 * Algorithm courtesy of Ross LaFetra from AMD.
2226 static const u16 x4_vectors[] = {
2227 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2228 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2229 0x0001, 0x0002, 0x0004, 0x0008,
2230 0x1013, 0x3032, 0x4044, 0x8088,
2231 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2232 0x4857, 0xc4fe, 0x13cc, 0x3288,
2233 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2234 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2235 0x15c1, 0x2a42, 0x89ac, 0x4758,
2236 0x2b03, 0x1602, 0x4f0c, 0xca08,
2237 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2238 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2239 0x2b87, 0x164e, 0x642c, 0xdc18,
2240 0x40b9, 0x80de, 0x1094, 0x20e8,
2241 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2242 0x11c1, 0x2242, 0x84ac, 0x4c58,
2243 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2244 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2245 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2246 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2247 0x16b3, 0x3d62, 0x4f34, 0x8518,
2248 0x1e2f, 0x391a, 0x5cac, 0xf858,
2249 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2250 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2251 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2252 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2253 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2254 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2255 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2256 0x185d, 0x2ca6, 0x7914, 0x9e28,
2257 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2258 0x4199, 0x82ee, 0x19f4, 0x2e58,
2259 0x4807, 0xc40e, 0x130c, 0x3208,
2260 0x1905, 0x2e0a, 0x5804, 0xac08,
2261 0x213f, 0x132a, 0xadfc, 0x5ba8,
2262 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2265 static const u16 x8_vectors[] = {
2266 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2267 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2268 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2269 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2270 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2271 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2272 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2273 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2274 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2275 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2276 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2277 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2278 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2279 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2280 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2281 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2282 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2283 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2284 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2287 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2290 unsigned int i, err_sym;
2292 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2294 unsigned v_idx = err_sym * v_dim;
2295 unsigned v_end = (err_sym + 1) * v_dim;
2297 /* walk over all 16 bits of the syndrome */
2298 for (i = 1; i < (1U << 16); i <<= 1) {
2300 /* if bit is set in that eigenvector... */
2301 if (v_idx < v_end && vectors[v_idx] & i) {
2302 u16 ev_comp = vectors[v_idx++];
2304 /* ... and bit set in the modified syndrome, */
2314 /* can't get to zero, move to next symbol */
2319 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2323 static int map_err_sym_to_channel(int err_sym, int sym_size)
2336 return err_sym >> 4;
2342 /* imaginary bits not in a DIMM */
2344 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2356 return err_sym >> 3;
2362 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2364 struct amd64_pvt *pvt = mci->pvt_info;
2367 if (pvt->ecc_sym_sz == 8)
2368 err_sym = decode_syndrome(syndrome, x8_vectors,
2369 ARRAY_SIZE(x8_vectors),
2371 else if (pvt->ecc_sym_sz == 4)
2372 err_sym = decode_syndrome(syndrome, x4_vectors,
2373 ARRAY_SIZE(x4_vectors),
2376 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2380 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2383 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2386 enum hw_event_mc_err_type err_type;
2390 err_type = HW_EVENT_ERR_CORRECTED;
2391 else if (ecc_type == 1)
2392 err_type = HW_EVENT_ERR_UNCORRECTED;
2393 else if (ecc_type == 3)
2394 err_type = HW_EVENT_ERR_DEFERRED;
2396 WARN(1, "Something is rotten in the state of Denmark.\n");
2400 switch (err->err_code) {
2405 string = "Failed to map error addr to a node";
2408 string = "Failed to map error addr to a csrow";
2411 string = "Unknown syndrome - possible error reporting race";
2414 string = "MCA_SYND not valid - unknown syndrome and csrow";
2417 string = "Cannot decode normalized address";
2420 string = "WTF error";
2424 edac_mc_handle_error(err_type, mci, 1,
2425 err->page, err->offset, err->syndrome,
2426 err->csrow, err->channel, -1,
2430 static inline void decode_bus_error(int node_id, struct mce *m)
2432 struct mem_ctl_info *mci;
2433 struct amd64_pvt *pvt;
2434 u8 ecc_type = (m->status >> 45) & 0x3;
2435 u8 xec = XEC(m->status, 0x1f);
2436 u16 ec = EC(m->status);
2438 struct err_info err;
2440 mci = edac_mc_find(node_id);
2444 pvt = mci->pvt_info;
2446 /* Bail out early if this was an 'observed' error */
2447 if (PP(ec) == NBSL_PP_OBS)
2450 /* Do only ECC errors */
2451 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2454 memset(&err, 0, sizeof(err));
2456 sys_addr = get_error_address(pvt, m);
2459 err.syndrome = extract_syndrome(m->status);
2461 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2463 __log_ecc_error(mci, &err, ecc_type);
2467 * To find the UMC channel represented by this bank we need to match on its
2468 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2471 static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
2473 u32 umc_instance_id[] = {0x50f00, 0x150f00};
2474 u32 instance_id = m->ipid & GENMASK(31, 0);
2475 int i, channel = -1;
2477 for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
2478 if (umc_instance_id[i] == instance_id)
2484 static void decode_umc_error(int node_id, struct mce *m)
2486 u8 ecc_type = (m->status >> 45) & 0x3;
2487 struct mem_ctl_info *mci;
2488 struct amd64_pvt *pvt;
2489 struct err_info err;
2492 mci = edac_mc_find(node_id);
2496 pvt = mci->pvt_info;
2498 memset(&err, 0, sizeof(err));
2500 if (m->status & MCI_STATUS_DEFERRED)
2503 err.channel = find_umc_channel(pvt, m);
2504 if (err.channel < 0) {
2505 err.err_code = ERR_CHANNEL;
2509 if (!(m->status & MCI_STATUS_SYNDV)) {
2510 err.err_code = ERR_SYND;
2514 if (ecc_type == 2) {
2515 u8 length = (m->synd >> 18) & 0x3f;
2518 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2520 err.err_code = ERR_CHANNEL;
2523 err.csrow = m->synd & 0x7;
2525 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2526 err.err_code = ERR_NORM_ADDR;
2530 error_address_to_page_and_offset(sys_addr, &err);
2533 __log_ecc_error(mci, &err, ecc_type);
2537 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2538 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2539 * Reserve F0 and F6 on systems with a UMC.
2542 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2545 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2547 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2551 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2553 pci_dev_put(pvt->F0);
2556 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2561 pci_ctl_dev = &pvt->F0->dev;
2563 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2564 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2565 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2570 /* Reserve the ADDRESS MAP Device */
2571 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2573 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2577 /* Reserve the DCT Device */
2578 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2580 pci_dev_put(pvt->F1);
2583 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2588 pci_ctl_dev = &pvt->F2->dev;
2590 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2591 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2592 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2597 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2600 pci_dev_put(pvt->F0);
2601 pci_dev_put(pvt->F6);
2603 pci_dev_put(pvt->F1);
2604 pci_dev_put(pvt->F2);
2608 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2610 pvt->ecc_sym_sz = 4;
2615 for (i = 0; i < NUM_UMCS; i++) {
2616 /* Check enabled channels only: */
2617 if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
2618 (pvt->umc[i].ecc_ctrl & BIT(7))) {
2619 pvt->ecc_sym_sz = 8;
2627 if (pvt->fam >= 0x10) {
2630 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2631 /* F16h has only DCT0, so no need to read dbam1. */
2632 if (pvt->fam != 0x16)
2633 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2635 /* F10h, revD and later can do x8 ECC too. */
2636 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2637 pvt->ecc_sym_sz = 8;
2642 * Retrieve the hardware registers of the memory controller.
2644 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2646 u8 nid = pvt->mc_node_id;
2647 struct amd64_umc *umc;
2650 /* Read registers from each UMC */
2651 for (i = 0; i < NUM_UMCS; i++) {
2653 umc_base = get_umc_base(i);
2656 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2657 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2658 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2659 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2660 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2665 * Retrieve the hardware registers of the memory controller (this includes the
2666 * 'Address Map' and 'Misc' device regs)
2668 static void read_mc_regs(struct amd64_pvt *pvt)
2674 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2675 * those are Read-As-Zero.
2677 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2678 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2680 /* Check first whether TOP_MEM2 is enabled: */
2681 rdmsrl(MSR_K8_SYSCFG, msr_val);
2682 if (msr_val & BIT(21)) {
2683 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2684 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2686 edac_dbg(0, " TOP_MEM2 disabled\n");
2690 __read_mc_regs_df(pvt);
2691 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2696 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2698 read_dram_ctl_register(pvt);
2700 for (range = 0; range < DRAM_RANGES; range++) {
2703 /* read settings for this DRAM range */
2704 read_dram_base_limit_regs(pvt, range);
2706 rw = dram_rw(pvt, range);
2710 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2712 get_dram_base(pvt, range),
2713 get_dram_limit(pvt, range));
2715 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2716 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2717 (rw & 0x1) ? "R" : "-",
2718 (rw & 0x2) ? "W" : "-",
2719 dram_intlv_sel(pvt, range),
2720 dram_dst_node(pvt, range));
2723 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2724 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2726 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2728 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2729 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2731 if (!dct_ganging_enabled(pvt)) {
2732 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2733 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2737 read_dct_base_mask(pvt);
2739 determine_memory_type(pvt);
2740 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2742 determine_ecc_sym_sz(pvt);
2744 dump_misc_regs(pvt);
2748 * NOTE: CPU Revision Dependent code
2751 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2752 * k8 private pointer to -->
2753 * DRAM Bank Address mapping register
2755 * DCL register where dual_channel_active is
2757 * The DBAM register consists of 4 sets of 4 bits each definitions:
2760 * 0-3 CSROWs 0 and 1
2761 * 4-7 CSROWs 2 and 3
2762 * 8-11 CSROWs 4 and 5
2763 * 12-15 CSROWs 6 and 7
2765 * Values range from: 0 to 15
2766 * The meaning of the values depends on CPU revision and dual-channel state,
2767 * see relevant BKDG more info.
2769 * The memory controller provides for total of only 8 CSROWs in its current
2770 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2771 * single channel or two (2) DIMMs in dual channel mode.
2773 * The following code logic collapses the various tables for CSROW based on CPU
2777 * The number of PAGE_SIZE pages on the specified CSROW number it
2781 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2783 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2784 int csrow_nr = csrow_nr_orig;
2785 u32 cs_mode, nr_pages;
2790 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2792 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2793 nr_pages <<= 20 - PAGE_SHIFT;
2795 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2796 csrow_nr_orig, dct, cs_mode);
2797 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2803 * Initialize the array of csrow attribute instances, based on the values
2804 * from pci config hardware registers.
2806 static int init_csrows(struct mem_ctl_info *mci)
2808 struct amd64_pvt *pvt = mci->pvt_info;
2809 enum edac_type edac_mode = EDAC_NONE;
2810 struct csrow_info *csrow;
2811 struct dimm_info *dimm;
2812 int i, j, empty = 1;
2817 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2821 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2822 pvt->mc_node_id, val,
2823 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2827 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2829 for_each_chip_select(i, 0, pvt) {
2830 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2831 bool row_dct1 = false;
2833 if (pvt->fam != 0xf)
2834 row_dct1 = !!csrow_enabled(i, 1, pvt);
2836 if (!row_dct0 && !row_dct1)
2839 csrow = mci->csrows[i];
2842 edac_dbg(1, "MC node: %d, csrow: %d\n",
2843 pvt->mc_node_id, i);
2846 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2847 csrow->channels[0]->dimm->nr_pages = nr_pages;
2850 /* K8 has only one DCT */
2851 if (pvt->fam != 0xf && row_dct1) {
2852 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2854 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2855 nr_pages += row_dct1_pages;
2858 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2860 /* Determine DIMM ECC mode: */
2862 if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
2863 edac_mode = EDAC_S4ECD4ED;
2864 else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
2865 edac_mode = EDAC_SECDED;
2867 } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
2868 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
2873 for (j = 0; j < pvt->channel_count; j++) {
2874 dimm = csrow->channels[j]->dimm;
2875 dimm->mtype = pvt->dram_type;
2876 dimm->edac_mode = edac_mode;
2884 /* get all cores on this DCT */
2885 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2889 for_each_online_cpu(cpu)
2890 if (amd_get_nb_id(cpu) == nid)
2891 cpumask_set_cpu(cpu, mask);
2894 /* check MCG_CTL on all the cpus on this node */
2895 static bool nb_mce_bank_enabled_on_node(u16 nid)
2901 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2902 amd64_warn("%s: Error allocating mask\n", __func__);
2906 get_cpus_on_this_dct_cpumask(mask, nid);
2908 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2910 for_each_cpu(cpu, mask) {
2911 struct msr *reg = per_cpu_ptr(msrs, cpu);
2912 nbe = reg->l & MSR_MCGCTL_NBE;
2914 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2916 (nbe ? "enabled" : "disabled"));
2924 free_cpumask_var(mask);
2928 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2930 cpumask_var_t cmask;
2933 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2934 amd64_warn("%s: error allocating mask\n", __func__);
2938 get_cpus_on_this_dct_cpumask(cmask, nid);
2940 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2942 for_each_cpu(cpu, cmask) {
2944 struct msr *reg = per_cpu_ptr(msrs, cpu);
2947 if (reg->l & MSR_MCGCTL_NBE)
2948 s->flags.nb_mce_enable = 1;
2950 reg->l |= MSR_MCGCTL_NBE;
2953 * Turn off NB MCE reporting only when it was off before
2955 if (!s->flags.nb_mce_enable)
2956 reg->l &= ~MSR_MCGCTL_NBE;
2959 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2961 free_cpumask_var(cmask);
2966 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2970 u32 value, mask = 0x3; /* UECC/CECC enable */
2972 if (toggle_ecc_err_reporting(s, nid, ON)) {
2973 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2977 amd64_read_pci_cfg(F3, NBCTL, &value);
2979 s->old_nbctl = value & mask;
2980 s->nbctl_valid = true;
2983 amd64_write_pci_cfg(F3, NBCTL, value);
2985 amd64_read_pci_cfg(F3, NBCFG, &value);
2987 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2988 nid, value, !!(value & NBCFG_ECC_ENABLE));
2990 if (!(value & NBCFG_ECC_ENABLE)) {
2991 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2993 s->flags.nb_ecc_prev = 0;
2995 /* Attempt to turn on DRAM ECC Enable */
2996 value |= NBCFG_ECC_ENABLE;
2997 amd64_write_pci_cfg(F3, NBCFG, value);
2999 amd64_read_pci_cfg(F3, NBCFG, &value);
3001 if (!(value & NBCFG_ECC_ENABLE)) {
3002 amd64_warn("Hardware rejected DRAM ECC enable,"
3003 "check memory DIMM configuration.\n");
3006 amd64_info("Hardware accepted DRAM ECC Enable\n");
3009 s->flags.nb_ecc_prev = 1;
3012 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3013 nid, value, !!(value & NBCFG_ECC_ENABLE));
3018 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3021 u32 value, mask = 0x3; /* UECC/CECC enable */
3023 if (!s->nbctl_valid)
3026 amd64_read_pci_cfg(F3, NBCTL, &value);
3028 value |= s->old_nbctl;
3030 amd64_write_pci_cfg(F3, NBCTL, value);
3032 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3033 if (!s->flags.nb_ecc_prev) {
3034 amd64_read_pci_cfg(F3, NBCFG, &value);
3035 value &= ~NBCFG_ECC_ENABLE;
3036 amd64_write_pci_cfg(F3, NBCFG, value);
3039 /* restore the NB Enable MCGCTL bit */
3040 if (toggle_ecc_err_reporting(s, nid, OFF))
3041 amd64_warn("Error restoring NB MCGCTL settings!\n");
3045 * EDAC requires that the BIOS have ECC enabled before
3046 * taking over the processing of ECC errors. A command line
3047 * option allows to force-enable hardware ECC later in
3048 * enable_ecc_error_reporting().
3050 static const char *ecc_msg =
3051 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3052 " Either enable ECC checking or force module loading by setting "
3053 "'ecc_enable_override'.\n"
3054 " (Note that use of the override may cause unknown side effects.)\n";
3056 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3058 bool nb_mce_en = false;
3062 if (boot_cpu_data.x86 >= 0x17) {
3063 u8 umc_en_mask = 0, ecc_en_mask = 0;
3065 for (i = 0; i < NUM_UMCS; i++) {
3066 u32 base = get_umc_base(i);
3068 /* Only check enabled UMCs. */
3069 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3072 if (!(value & UMC_SDP_INIT))
3075 umc_en_mask |= BIT(i);
3077 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3080 if (value & UMC_ECC_ENABLED)
3081 ecc_en_mask |= BIT(i);
3084 /* Check whether at least one UMC is enabled: */
3086 ecc_en = umc_en_mask == ecc_en_mask;
3088 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3090 /* Assume UMC MCA banks are enabled. */
3093 amd64_read_pci_cfg(F3, NBCFG, &value);
3095 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3097 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3099 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3100 MSR_IA32_MCG_CTL, nid);
3103 amd64_info("Node %d: DRAM ECC %s.\n",
3104 nid, (ecc_en ? "enabled" : "disabled"));
3106 if (!ecc_en || !nb_mce_en) {
3107 amd64_info("%s", ecc_msg);
3114 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3116 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3118 for (i = 0; i < NUM_UMCS; i++) {
3119 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3120 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3121 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3123 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3124 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3128 /* Set chipkill only if ECC is enabled: */
3130 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3136 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3138 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3140 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3144 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3145 struct amd64_family_type *fam)
3147 struct amd64_pvt *pvt = mci->pvt_info;
3149 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3150 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3153 f17h_determine_edac_ctl_cap(mci, pvt);
3155 if (pvt->nbcap & NBCAP_SECDED)
3156 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3158 if (pvt->nbcap & NBCAP_CHIPKILL)
3159 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3162 mci->edac_cap = determine_edac_cap(pvt);
3163 mci->mod_name = EDAC_MOD_STR;
3164 mci->ctl_name = fam->ctl_name;
3165 mci->dev_name = pci_name(pvt->F3);
3166 mci->ctl_page_to_phys = NULL;
3168 /* memory scrubber interface */
3169 mci->set_sdram_scrub_rate = set_scrub_rate;
3170 mci->get_sdram_scrub_rate = get_scrub_rate;
3174 * returns a pointer to the family descriptor on success, NULL otherwise.
3176 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3178 struct amd64_family_type *fam_type = NULL;
3180 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3181 pvt->stepping = boot_cpu_data.x86_stepping;
3182 pvt->model = boot_cpu_data.x86_model;
3183 pvt->fam = boot_cpu_data.x86;
3187 fam_type = &family_types[K8_CPUS];
3188 pvt->ops = &family_types[K8_CPUS].ops;
3192 fam_type = &family_types[F10_CPUS];
3193 pvt->ops = &family_types[F10_CPUS].ops;
3197 if (pvt->model == 0x30) {
3198 fam_type = &family_types[F15_M30H_CPUS];
3199 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3201 } else if (pvt->model == 0x60) {
3202 fam_type = &family_types[F15_M60H_CPUS];
3203 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3207 fam_type = &family_types[F15_CPUS];
3208 pvt->ops = &family_types[F15_CPUS].ops;
3212 if (pvt->model == 0x30) {
3213 fam_type = &family_types[F16_M30H_CPUS];
3214 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3217 fam_type = &family_types[F16_CPUS];
3218 pvt->ops = &family_types[F16_CPUS].ops;
3222 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3223 fam_type = &family_types[F17_M10H_CPUS];
3224 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3227 fam_type = &family_types[F17_CPUS];
3228 pvt->ops = &family_types[F17_CPUS].ops;
3232 amd64_err("Unsupported family!\n");
3236 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3238 (pvt->ext_model >= K8_REV_F ? "revF or later "
3239 : "revE or earlier ")
3240 : ""), pvt->mc_node_id);
3244 static const struct attribute_group *amd64_edac_attr_groups[] = {
3245 #ifdef CONFIG_EDAC_DEBUG
3246 &amd64_edac_dbg_group,
3248 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3249 &amd64_edac_inj_group,
3254 static int init_one_instance(unsigned int nid)
3256 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3257 struct amd64_family_type *fam_type = NULL;
3258 struct mem_ctl_info *mci = NULL;
3259 struct edac_mc_layer layers[2];
3260 struct amd64_pvt *pvt = NULL;
3261 u16 pci_id1, pci_id2;
3265 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3269 pvt->mc_node_id = nid;
3273 fam_type = per_family_init(pvt);
3277 if (pvt->fam >= 0x17) {
3278 pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
3284 pci_id1 = fam_type->f0_id;
3285 pci_id2 = fam_type->f6_id;
3287 pci_id1 = fam_type->f1_id;
3288 pci_id2 = fam_type->f2_id;
3291 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3298 * We need to determine how many memory channels there are. Then use
3299 * that information for calculating the size of the dynamic instance
3300 * tables in the 'mci' structure.
3303 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3304 if (pvt->channel_count < 0)
3308 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3309 layers[0].size = pvt->csels[0].b_cnt;
3310 layers[0].is_virt_csrow = true;
3311 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3314 * Always allocate two channels since we can have setups with DIMMs on
3315 * only one channel. Also, this simplifies handling later for the price
3316 * of a couple of KBs tops.
3319 layers[1].is_virt_csrow = false;
3321 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3325 mci->pvt_info = pvt;
3326 mci->pdev = &pvt->F3->dev;
3328 setup_mci_misc_attrs(mci, fam_type);
3330 if (init_csrows(mci))
3331 mci->edac_cap = EDAC_FLAG_NONE;
3334 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3335 edac_dbg(1, "failed edac_mc_add_mc()\n");
3345 free_mc_sibling_devs(pvt);
3348 if (pvt->fam >= 0x17)
3358 static int probe_one_instance(unsigned int nid)
3360 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3361 struct ecc_settings *s;
3365 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3371 if (!ecc_enabled(F3, nid)) {
3374 if (!ecc_enable_override)
3377 if (boot_cpu_data.x86 >= 0x17) {
3378 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3381 amd64_warn("Forcing ECC on!\n");
3383 if (!enable_ecc_error_reporting(s, nid, F3))
3387 ret = init_one_instance(nid);
3389 amd64_err("Error probing instance: %d\n", nid);
3391 if (boot_cpu_data.x86 < 0x17)
3392 restore_ecc_error_reporting(s, nid, F3);
3401 ecc_stngs[nid] = NULL;
3407 static void remove_one_instance(unsigned int nid)
3409 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3410 struct ecc_settings *s = ecc_stngs[nid];
3411 struct mem_ctl_info *mci;
3412 struct amd64_pvt *pvt;
3414 mci = find_mci_by_dev(&F3->dev);
3417 /* Remove from EDAC CORE tracking list */
3418 mci = edac_mc_del_mc(&F3->dev);
3422 pvt = mci->pvt_info;
3424 restore_ecc_error_reporting(s, nid, F3);
3426 free_mc_sibling_devs(pvt);
3428 kfree(ecc_stngs[nid]);
3429 ecc_stngs[nid] = NULL;
3431 /* Free the EDAC CORE resources */
3432 mci->pvt_info = NULL;
3438 static void setup_pci_device(void)
3443 pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
3445 pr_warn("%s(): Unable to create PCI control\n", __func__);
3446 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3450 static const struct x86_cpu_id amd64_cpuids[] = {
3451 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3452 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3453 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3454 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3455 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3458 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3460 static int __init amd64_edac_init(void)
3465 if (!x86_match_cpu(amd64_cpuids))
3468 if (amd_cache_northbridges() < 0)
3474 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
3478 msrs = msrs_alloc();
3482 for (i = 0; i < amd_nb_num(); i++) {
3483 err = probe_one_instance(i);
3485 /* unwind properly */
3487 remove_one_instance(i);
3493 if (!edac_has_mcs()) {
3498 /* register stuff with EDAC MCE */
3499 if (report_gart_errors)
3500 amd_report_gart_errors(true);
3502 if (boot_cpu_data.x86 >= 0x17)
3503 amd_register_ecc_decoder(decode_umc_error);
3505 amd_register_ecc_decoder(decode_bus_error);
3509 #ifdef CONFIG_X86_32
3510 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3513 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3530 static void __exit amd64_edac_exit(void)
3535 edac_pci_release_generic_ctl(pci_ctl);
3537 /* unregister from EDAC MCE */
3538 amd_report_gart_errors(false);
3540 if (boot_cpu_data.x86 >= 0x17)
3541 amd_unregister_ecc_decoder(decode_umc_error);
3543 amd_unregister_ecc_decoder(decode_bus_error);
3545 for (i = 0; i < amd_nb_num(); i++)
3546 remove_one_instance(i);
3557 module_init(amd64_edac_init);
3558 module_exit(amd64_edac_exit);
3560 MODULE_LICENSE("GPL");
3561 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3562 "Dave Peterson, Thayne Harbaugh");
3563 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3564 EDAC_AMD64_VERSION);
3566 module_param(edac_op_state, int, 0444);
3567 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");