1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
8 * Set by command line parameter. If BIOS has enabled the ECC, this override is
9 * cleared to prevent re-enabling the hardware by this driver.
11 static int ecc_enable_override;
12 module_param(ecc_enable_override, int, 0644);
14 static struct msr __percpu *msrs;
16 static struct amd64_family_type *fam_type;
19 static struct ecc_settings **ecc_stngs;
21 /* Device for the PCI component */
22 static struct device *pci_ctl_dev;
25 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
26 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
29 *FIXME: Produce a better mapping/linearisation.
31 static const struct scrubrate {
32 u32 scrubval; /* bit pattern for scrub rate */
33 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
35 { 0x01, 1600000000UL},
57 { 0x00, 0UL}, /* scrubbing off */
60 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
61 u32 *val, const char *func)
65 err = pci_read_config_dword(pdev, offset, val);
67 amd64_warn("%s: error reading F%dx%03x.\n",
68 func, PCI_FUNC(pdev->devfn), offset);
73 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
74 u32 val, const char *func)
78 err = pci_write_config_dword(pdev, offset, val);
80 amd64_warn("%s: error writing to F%dx%03x.\n",
81 func, PCI_FUNC(pdev->devfn), offset);
87 * Select DCT to which PCI cfg accesses are routed
89 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
94 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
101 * Depending on the family, F2 DCT reads need special handling:
103 * K8: has a single DCT only and no address offsets >= 0x100
105 * F10h: each DCT has its own set of regs
109 * F16h: has only 1 DCT
111 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
113 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
114 int offset, u32 *val)
118 if (dct || offset >= 0x100)
125 * Note: If ganging is enabled, barring the regs
126 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
127 * return 0. (cf. Section 2.8.1 F10h BKDG)
129 if (dct_ganging_enabled(pvt))
138 * F15h: F2x1xx addresses do not map explicitly to DCT1.
139 * We should select which DCT we access using F1x10C[DctCfgSel]
141 dct = (dct && pvt->model == 0x30) ? 3 : dct;
142 f15h_select_dct(pvt, dct);
153 return amd64_read_pci_cfg(pvt->F2, offset, val);
157 * Memory scrubber control interface. For K8, memory scrubbing is handled by
158 * hardware and can involve L2 cache, dcache as well as the main memory. With
159 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
162 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
163 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
164 * bytes/sec for the setting.
166 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
167 * other archs, we might not have access to the caches directly.
170 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
173 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
174 * are shifted down by 0x5, so scrubval 0x5 is written to the register
175 * as 0x0, scrubval 0x6 as 0x1, etc.
177 if (scrubval >= 0x5 && scrubval <= 0x14) {
179 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
180 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
186 * Scan the scrub rate mapping table for a close or matching bandwidth value to
187 * issue. If requested is too big, then use last maximum value found.
189 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
195 * map the configured rate (new_bw) to a value specific to the AMD64
196 * memory controller and apply to register. Search for the first
197 * bandwidth entry that is greater or equal than the setting requested
198 * and program that. If at last entry, turn off DRAM scrubbing.
200 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
201 * by falling back to the last element in scrubrates[].
203 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
205 * skip scrub rates which aren't recommended
206 * (see F10 BKDG, F3x58)
208 if (scrubrates[i].scrubval < min_rate)
211 if (scrubrates[i].bandwidth <= new_bw)
215 scrubval = scrubrates[i].scrubval;
218 __f17h_set_scrubval(pvt, scrubval);
219 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
220 f15h_select_dct(pvt, 0);
221 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 f15h_select_dct(pvt, 1);
223 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
229 return scrubrates[i].bandwidth;
234 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
236 struct amd64_pvt *pvt = mci->pvt_info;
237 u32 min_scrubrate = 0x5;
242 if (pvt->fam == 0x15) {
244 if (pvt->model < 0x10)
245 f15h_select_dct(pvt, 0);
247 if (pvt->model == 0x60)
250 return __set_scrub_rate(pvt, bw, min_scrubrate);
253 static int get_scrub_rate(struct mem_ctl_info *mci)
255 struct amd64_pvt *pvt = mci->pvt_info;
256 int i, retval = -EINVAL;
260 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
261 if (scrubval & BIT(0)) {
262 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
268 } else if (pvt->fam == 0x15) {
270 if (pvt->model < 0x10)
271 f15h_select_dct(pvt, 0);
273 if (pvt->model == 0x60)
274 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
276 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
278 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
281 scrubval = scrubval & 0x001F;
283 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
284 if (scrubrates[i].scrubval == scrubval) {
285 retval = scrubrates[i].bandwidth;
293 * returns true if the SysAddr given by sys_addr matches the
294 * DRAM base/limit associated with node_id
296 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
300 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
301 * all ones if the most significant implemented address bit is 1.
302 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
303 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
304 * Application Programming.
306 addr = sys_addr & 0x000000ffffffffffull;
308 return ((addr >= get_dram_base(pvt, nid)) &&
309 (addr <= get_dram_limit(pvt, nid)));
313 * Attempt to map a SysAddr to a node. On success, return a pointer to the
314 * mem_ctl_info structure for the node that the SysAddr maps to.
316 * On failure, return NULL.
318 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
321 struct amd64_pvt *pvt;
326 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
327 * 3.4.4.2) registers to map the SysAddr to a node ID.
332 * The value of this field should be the same for all DRAM Base
333 * registers. Therefore we arbitrarily choose to read it from the
334 * register for node 0.
336 intlv_en = dram_intlv_en(pvt, 0);
339 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
340 if (base_limit_match(pvt, sys_addr, node_id))
346 if (unlikely((intlv_en != 0x01) &&
347 (intlv_en != 0x03) &&
348 (intlv_en != 0x07))) {
349 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
353 bits = (((u32) sys_addr) >> 12) & intlv_en;
355 for (node_id = 0; ; ) {
356 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
357 break; /* intlv_sel field matches */
359 if (++node_id >= DRAM_RANGES)
363 /* sanity test for sys_addr */
364 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
365 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
366 "range for node %d with node interleaving enabled.\n",
367 __func__, sys_addr, node_id);
372 return edac_mc_find((int)node_id);
375 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
376 (unsigned long)sys_addr);
382 * compute the CS base address of the @csrow on the DRAM controller @dct.
383 * For details see F2x[5C:40] in the processor's BKDG
385 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
386 u64 *base, u64 *mask)
388 u64 csbase, csmask, base_bits, mask_bits;
391 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
392 csbase = pvt->csels[dct].csbases[csrow];
393 csmask = pvt->csels[dct].csmasks[csrow];
394 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
395 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
399 * F16h and F15h, models 30h and later need two addr_shift values:
400 * 8 for high and 6 for low (cf. F16h BKDG).
402 } else if (pvt->fam == 0x16 ||
403 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
404 csbase = pvt->csels[dct].csbases[csrow];
405 csmask = pvt->csels[dct].csmasks[csrow >> 1];
407 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
408 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
411 /* poke holes for the csmask */
412 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
413 (GENMASK_ULL(30, 19) << 8));
415 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
416 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
420 csbase = pvt->csels[dct].csbases[csrow];
421 csmask = pvt->csels[dct].csmasks[csrow >> 1];
424 if (pvt->fam == 0x15)
425 base_bits = mask_bits =
426 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
428 base_bits = mask_bits =
429 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
432 *base = (csbase & base_bits) << addr_shift;
435 /* poke holes for the csmask */
436 *mask &= ~(mask_bits << addr_shift);
438 *mask |= (csmask & mask_bits) << addr_shift;
441 #define for_each_chip_select(i, dct, pvt) \
442 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
444 #define chip_select_base(i, dct, pvt) \
445 pvt->csels[dct].csbases[i]
447 #define for_each_chip_select_mask(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
450 #define for_each_umc(i) \
451 for (i = 0; i < fam_type->max_mcs; i++)
454 * @input_addr is an InputAddr associated with the node given by mci. Return the
455 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
457 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
459 struct amd64_pvt *pvt;
465 for_each_chip_select(csrow, 0, pvt) {
466 if (!csrow_enabled(csrow, 0, pvt))
469 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
473 if ((input_addr & mask) == (base & mask)) {
474 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
475 (unsigned long)input_addr, csrow,
481 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
482 (unsigned long)input_addr, pvt->mc_node_id);
488 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
489 * for the node represented by mci. Info is passed back in *hole_base,
490 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
491 * info is invalid. Info may be invalid for either of the following reasons:
493 * - The revision of the node is not E or greater. In this case, the DRAM Hole
494 * Address Register does not exist.
496 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
497 * indicating that its contents are not valid.
499 * The values passed back in *hole_base, *hole_offset, and *hole_size are
500 * complete 32-bit values despite the fact that the bitfields in the DHAR
501 * only represent bits 31-24 of the base and offset values.
503 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
504 u64 *hole_offset, u64 *hole_size)
506 struct amd64_pvt *pvt = mci->pvt_info;
508 /* only revE and later have the DRAM Hole Address Register */
509 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
510 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
511 pvt->ext_model, pvt->mc_node_id);
515 /* valid for Fam10h and above */
516 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
517 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
521 if (!dhar_valid(pvt)) {
522 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
527 /* This node has Memory Hoisting */
529 /* +------------------+--------------------+--------------------+-----
530 * | memory | DRAM hole | relocated |
531 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
533 * | | | [0x100000000, |
534 * | | | (0x100000000+ |
535 * | | | (0xffffffff-x))] |
536 * +------------------+--------------------+--------------------+-----
538 * Above is a diagram of physical memory showing the DRAM hole and the
539 * relocated addresses from the DRAM hole. As shown, the DRAM hole
540 * starts at address x (the base address) and extends through address
541 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
542 * addresses in the hole so that they start at 0x100000000.
545 *hole_base = dhar_base(pvt);
546 *hole_size = (1ULL << 32) - *hole_base;
548 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
549 : k8_dhar_offset(pvt);
551 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
552 pvt->mc_node_id, (unsigned long)*hole_base,
553 (unsigned long)*hole_offset, (unsigned long)*hole_size);
557 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
560 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
561 * assumed that sys_addr maps to the node given by mci.
563 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
564 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
565 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
566 * then it is also involved in translating a SysAddr to a DramAddr. Sections
567 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
568 * These parts of the documentation are unclear. I interpret them as follows:
570 * When node n receives a SysAddr, it processes the SysAddr as follows:
572 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
573 * Limit registers for node n. If the SysAddr is not within the range
574 * specified by the base and limit values, then node n ignores the Sysaddr
575 * (since it does not map to node n). Otherwise continue to step 2 below.
577 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
578 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
579 * the range of relocated addresses (starting at 0x100000000) from the DRAM
580 * hole. If not, skip to step 3 below. Else get the value of the
581 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
582 * offset defined by this value from the SysAddr.
584 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
585 * Base register for node n. To obtain the DramAddr, subtract the base
586 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
588 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
590 struct amd64_pvt *pvt = mci->pvt_info;
591 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
594 dram_base = get_dram_base(pvt, pvt->mc_node_id);
596 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
599 if ((sys_addr >= (1ULL << 32)) &&
600 (sys_addr < ((1ULL << 32) + hole_size))) {
601 /* use DHAR to translate SysAddr to DramAddr */
602 dram_addr = sys_addr - hole_offset;
604 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
605 (unsigned long)sys_addr,
606 (unsigned long)dram_addr);
613 * Translate the SysAddr to a DramAddr as shown near the start of
614 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
615 * only deals with 40-bit values. Therefore we discard bits 63-40 of
616 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
617 * discard are all 1s. Otherwise the bits we discard are all 0s. See
618 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
619 * Programmer's Manual Volume 1 Application Programming.
621 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
623 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
624 (unsigned long)sys_addr, (unsigned long)dram_addr);
629 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
630 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
631 * for node interleaving.
633 static int num_node_interleave_bits(unsigned intlv_en)
635 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
638 BUG_ON(intlv_en > 7);
639 n = intlv_shift_table[intlv_en];
643 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
644 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
646 struct amd64_pvt *pvt;
653 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
654 * concerning translating a DramAddr to an InputAddr.
656 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
657 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
660 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
661 intlv_shift, (unsigned long)dram_addr,
662 (unsigned long)input_addr);
668 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
669 * assumed that @sys_addr maps to the node given by mci.
671 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
676 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
678 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
679 (unsigned long)sys_addr, (unsigned long)input_addr);
684 /* Map the Error address to a PAGE and PAGE OFFSET. */
685 static inline void error_address_to_page_and_offset(u64 error_address,
686 struct err_info *err)
688 err->page = (u32) (error_address >> PAGE_SHIFT);
689 err->offset = ((u32) error_address) & ~PAGE_MASK;
693 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
694 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
695 * of a node that detected an ECC memory error. mci represents the node that
696 * the error address maps to (possibly different from the node that detected
697 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
700 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
704 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
707 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
708 "address 0x%lx\n", (unsigned long)sys_addr);
712 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
715 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
718 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
720 unsigned long edac_cap = EDAC_FLAG_NONE;
724 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
727 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
730 umc_en_mask |= BIT(i);
732 /* UMC Configuration bit 12 (DimmEccEn) */
733 if (pvt->umc[i].umc_cfg & BIT(12))
734 dimm_ecc_en_mask |= BIT(i);
737 if (umc_en_mask == dimm_ecc_en_mask)
738 edac_cap = EDAC_FLAG_SECDED;
740 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
744 if (pvt->dclr0 & BIT(bit))
745 edac_cap = EDAC_FLAG_SECDED;
751 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
753 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
755 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
757 if (pvt->dram_type == MEM_LRDDR3) {
758 u32 dcsm = pvt->csels[chan].csmasks[0];
760 * It's assumed all LRDIMMs in a DCT are going to be of
761 * same 'type' until proven otherwise. So, use a cs
762 * value of '0' here to get dcsm value.
764 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
767 edac_dbg(1, "All DIMMs support ECC:%s\n",
768 (dclr & BIT(19)) ? "yes" : "no");
771 edac_dbg(1, " PAR/ERR parity: %s\n",
772 (dclr & BIT(8)) ? "enabled" : "disabled");
774 if (pvt->fam == 0x10)
775 edac_dbg(1, " DCT 128bit mode width: %s\n",
776 (dclr & BIT(11)) ? "128b" : "64b");
778 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
779 (dclr & BIT(12)) ? "yes" : "no",
780 (dclr & BIT(13)) ? "yes" : "no",
781 (dclr & BIT(14)) ? "yes" : "no",
782 (dclr & BIT(15)) ? "yes" : "no");
785 #define CS_EVEN_PRIMARY BIT(0)
786 #define CS_ODD_PRIMARY BIT(1)
787 #define CS_EVEN_SECONDARY BIT(2)
788 #define CS_ODD_SECONDARY BIT(3)
789 #define CS_3R_INTERLEAVE BIT(4)
791 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
792 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
794 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
799 if (csrow_enabled(2 * dimm, ctrl, pvt))
800 cs_mode |= CS_EVEN_PRIMARY;
802 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
803 cs_mode |= CS_ODD_PRIMARY;
805 /* Asymmetric dual-rank DIMM support. */
806 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
807 cs_mode |= CS_ODD_SECONDARY;
810 * 3 Rank inteleaving support.
811 * There should be only three bases enabled and their two masks should
814 for_each_chip_select(base, ctrl, pvt)
815 count += csrow_enabled(base, ctrl, pvt);
818 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
819 edac_dbg(1, "3R interleaving in use.\n");
820 cs_mode |= CS_3R_INTERLEAVE;
826 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
828 int dimm, size0, size1, cs0, cs1, cs_mode;
830 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
832 for (dimm = 0; dimm < 2; dimm++) {
836 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
838 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
839 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
841 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
847 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
849 struct amd64_umc *umc;
850 u32 i, tmp, umc_base;
853 umc_base = get_umc_base(i);
856 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
857 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
858 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
859 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
861 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
862 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
864 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
865 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
866 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
868 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
869 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
870 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
871 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
872 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
873 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
874 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
875 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
876 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
878 if (pvt->dram_type == MEM_LRDDR4) {
879 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
880 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
881 i, 1 << ((tmp >> 4) & 0x3));
884 debug_display_dimm_sizes_df(pvt, i);
887 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
888 pvt->dhar, dhar_base(pvt));
891 /* Display and decode various NB registers for debug purposes. */
892 static void __dump_misc_regs(struct amd64_pvt *pvt)
894 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
896 edac_dbg(1, " NB two channel DRAM capable: %s\n",
897 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
899 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
900 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
901 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
903 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
905 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
907 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
908 pvt->dhar, dhar_base(pvt),
909 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
910 : f10_dhar_offset(pvt));
912 debug_display_dimm_sizes(pvt, 0);
914 /* everything below this point is Fam10h and above */
918 debug_display_dimm_sizes(pvt, 1);
920 /* Only if NOT ganged does dclr1 have valid info */
921 if (!dct_ganging_enabled(pvt))
922 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
925 /* Display and decode various NB registers for debug purposes. */
926 static void dump_misc_regs(struct amd64_pvt *pvt)
929 __dump_misc_regs_df(pvt);
931 __dump_misc_regs(pvt);
933 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
935 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
939 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
941 static void prep_chip_selects(struct amd64_pvt *pvt)
943 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
944 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
945 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
946 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
947 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
948 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
949 } else if (pvt->fam >= 0x17) {
953 pvt->csels[umc].b_cnt = 4;
954 pvt->csels[umc].m_cnt = 2;
958 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
959 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
963 static void read_umc_base_mask(struct amd64_pvt *pvt)
965 u32 umc_base_reg, umc_base_reg_sec;
966 u32 umc_mask_reg, umc_mask_reg_sec;
967 u32 base_reg, base_reg_sec;
968 u32 mask_reg, mask_reg_sec;
969 u32 *base, *base_sec;
970 u32 *mask, *mask_sec;
974 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
975 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
977 for_each_chip_select(cs, umc, pvt) {
978 base = &pvt->csels[umc].csbases[cs];
979 base_sec = &pvt->csels[umc].csbases_sec[cs];
981 base_reg = umc_base_reg + (cs * 4);
982 base_reg_sec = umc_base_reg_sec + (cs * 4);
984 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
985 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
986 umc, cs, *base, base_reg);
988 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
989 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
990 umc, cs, *base_sec, base_reg_sec);
993 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
994 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
996 for_each_chip_select_mask(cs, umc, pvt) {
997 mask = &pvt->csels[umc].csmasks[cs];
998 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1000 mask_reg = umc_mask_reg + (cs * 4);
1001 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1003 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1004 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
1005 umc, cs, *mask, mask_reg);
1007 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1008 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1009 umc, cs, *mask_sec, mask_reg_sec);
1015 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1017 static void read_dct_base_mask(struct amd64_pvt *pvt)
1021 prep_chip_selects(pvt);
1024 return read_umc_base_mask(pvt);
1026 for_each_chip_select(cs, 0, pvt) {
1027 int reg0 = DCSB0 + (cs * 4);
1028 int reg1 = DCSB1 + (cs * 4);
1029 u32 *base0 = &pvt->csels[0].csbases[cs];
1030 u32 *base1 = &pvt->csels[1].csbases[cs];
1032 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1033 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1036 if (pvt->fam == 0xf)
1039 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1040 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1041 cs, *base1, (pvt->fam == 0x10) ? reg1
1045 for_each_chip_select_mask(cs, 0, pvt) {
1046 int reg0 = DCSM0 + (cs * 4);
1047 int reg1 = DCSM1 + (cs * 4);
1048 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1049 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1051 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1052 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1055 if (pvt->fam == 0xf)
1058 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1059 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1060 cs, *mask1, (pvt->fam == 0x10) ? reg1
1065 static void determine_memory_type(struct amd64_pvt *pvt)
1067 u32 dram_ctrl, dcsm;
1070 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1071 pvt->dram_type = MEM_LRDDR4;
1072 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1073 pvt->dram_type = MEM_RDDR4;
1075 pvt->dram_type = MEM_DDR4;
1081 if (pvt->ext_model >= K8_REV_F)
1084 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1088 if (pvt->dchr0 & DDR3_MODE)
1091 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1095 if (pvt->model < 0x60)
1099 * Model 0x60h needs special handling:
1101 * We use a Chip Select value of '0' to obtain dcsm.
1102 * Theoretically, it is possible to populate LRDIMMs of different
1103 * 'Rank' value on a DCT. But this is not the common case. So,
1104 * it's reasonable to assume all DIMMs are going to be of same
1105 * 'type' until proven otherwise.
1107 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1108 dcsm = pvt->csels[0].csmasks[0];
1110 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1111 pvt->dram_type = MEM_DDR4;
1112 else if (pvt->dclr0 & BIT(16))
1113 pvt->dram_type = MEM_DDR3;
1114 else if (dcsm & 0x3)
1115 pvt->dram_type = MEM_LRDDR3;
1117 pvt->dram_type = MEM_RDDR3;
1125 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1126 pvt->dram_type = MEM_EMPTY;
1131 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1134 /* Get the number of DCT channels the memory controller is using. */
1135 static int k8_early_channel_count(struct amd64_pvt *pvt)
1139 if (pvt->ext_model >= K8_REV_F)
1140 /* RevF (NPT) and later */
1141 flag = pvt->dclr0 & WIDTH_128;
1143 /* RevE and earlier */
1144 flag = pvt->dclr0 & REVE_WIDTH_128;
1149 return (flag) ? 2 : 1;
1152 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1153 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1155 u16 mce_nid = amd_get_nb_id(m->extcpu);
1156 struct mem_ctl_info *mci;
1161 mci = edac_mc_find(mce_nid);
1165 pvt = mci->pvt_info;
1167 if (pvt->fam == 0xf) {
1172 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1175 * Erratum 637 workaround
1177 if (pvt->fam == 0x15) {
1178 u64 cc6_base, tmp_addr;
1182 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1186 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1187 intlv_en = tmp >> 21 & 0x7;
1189 /* add [47:27] + 3 trailing bits */
1190 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1192 /* reverse and add DramIntlvEn */
1193 cc6_base |= intlv_en ^ 0x7;
1195 /* pin at [47:24] */
1199 return cc6_base | (addr & GENMASK_ULL(23, 0));
1201 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1204 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1206 /* OR DramIntlvSel into bits [14:12] */
1207 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1209 /* add remaining [11:0] bits from original MC4_ADDR */
1210 tmp_addr |= addr & GENMASK_ULL(11, 0);
1212 return cc6_base | tmp_addr;
1218 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1219 unsigned int device,
1220 struct pci_dev *related)
1222 struct pci_dev *dev = NULL;
1224 while ((dev = pci_get_device(vendor, device, dev))) {
1225 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1226 (dev->bus->number == related->bus->number) &&
1227 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1234 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1236 struct amd_northbridge *nb;
1237 struct pci_dev *f1 = NULL;
1238 unsigned int pci_func;
1239 int off = range << 3;
1242 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1243 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1245 if (pvt->fam == 0xf)
1248 if (!dram_rw(pvt, range))
1251 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1252 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1254 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1255 if (pvt->fam != 0x15)
1258 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1262 if (pvt->model == 0x60)
1263 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1264 else if (pvt->model == 0x30)
1265 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1267 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1269 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1273 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1275 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1277 /* {[39:27],111b} */
1278 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1280 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1283 pvt->ranges[range].lim.hi |= llim >> 13;
1288 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1289 struct err_info *err)
1291 struct amd64_pvt *pvt = mci->pvt_info;
1293 error_address_to_page_and_offset(sys_addr, err);
1296 * Find out which node the error address belongs to. This may be
1297 * different from the node that detected the error.
1299 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1300 if (!err->src_mci) {
1301 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1302 (unsigned long)sys_addr);
1303 err->err_code = ERR_NODE;
1307 /* Now map the sys_addr to a CSROW */
1308 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1309 if (err->csrow < 0) {
1310 err->err_code = ERR_CSROW;
1314 /* CHIPKILL enabled */
1315 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1316 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1317 if (err->channel < 0) {
1319 * Syndrome didn't map, so we don't know which of the
1320 * 2 DIMMs is in error. So we need to ID 'both' of them
1323 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1324 "possible error reporting race\n",
1326 err->err_code = ERR_CHANNEL;
1331 * non-chipkill ecc mode
1333 * The k8 documentation is unclear about how to determine the
1334 * channel number when using non-chipkill memory. This method
1335 * was obtained from email communication with someone at AMD.
1336 * (Wish the email was placed in this comment - norsk)
1338 err->channel = ((sys_addr & BIT(3)) != 0);
1342 static int ddr2_cs_size(unsigned i, bool dct_width)
1348 else if (!(i & 0x1))
1351 shift = (i + 1) >> 1;
1353 return 128 << (shift + !!dct_width);
1356 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1357 unsigned cs_mode, int cs_mask_nr)
1359 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1361 if (pvt->ext_model >= K8_REV_F) {
1362 WARN_ON(cs_mode > 11);
1363 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1365 else if (pvt->ext_model >= K8_REV_D) {
1367 WARN_ON(cs_mode > 10);
1370 * the below calculation, besides trying to win an obfuscated C
1371 * contest, maps cs_mode values to DIMM chip select sizes. The
1374 * cs_mode CS size (mb)
1375 * ======= ============
1388 * Basically, it calculates a value with which to shift the
1389 * smallest CS size of 32MB.
1391 * ddr[23]_cs_size have a similar purpose.
1393 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1395 return 32 << (cs_mode - diff);
1398 WARN_ON(cs_mode > 6);
1399 return 32 << cs_mode;
1404 * Get the number of DCT channels in use.
1407 * number of Memory Channels in operation
1409 * contents of the DCL0_LOW register
1411 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1413 int i, j, channels = 0;
1415 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1416 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1420 * Need to check if in unganged mode: In such, there are 2 channels,
1421 * but they are not in 128 bit mode and thus the above 'dclr0' status
1424 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1425 * their CSEnable bit on. If so, then SINGLE DIMM case.
1427 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1430 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1431 * is more than just one DIMM present in unganged mode. Need to check
1432 * both controllers since DIMMs can be placed in either one.
1434 for (i = 0; i < 2; i++) {
1435 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1437 for (j = 0; j < 4; j++) {
1438 if (DBAM_DIMM(j, dbam) > 0) {
1448 amd64_info("MCT channel count: %d\n", channels);
1453 static int f17_early_channel_count(struct amd64_pvt *pvt)
1455 int i, channels = 0;
1457 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1459 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1461 amd64_info("MCT channel count: %d\n", channels);
1466 static int ddr3_cs_size(unsigned i, bool dct_width)
1471 if (i == 0 || i == 3 || i == 4)
1477 else if (!(i & 0x1))
1480 shift = (i + 1) >> 1;
1483 cs_size = (128 * (1 << !!dct_width)) << shift;
1488 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1493 if (i < 4 || i == 6)
1497 else if (!(i & 0x1))
1500 shift = (i + 1) >> 1;
1503 cs_size = rank_multiply * (128 << shift);
1508 static int ddr4_cs_size(unsigned i)
1517 /* Min cs_size = 1G */
1518 cs_size = 1024 * (1 << (i >> 1));
1523 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1524 unsigned cs_mode, int cs_mask_nr)
1526 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1528 WARN_ON(cs_mode > 11);
1530 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1531 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1533 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1537 * F15h supports only 64bit DCT interfaces
1539 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1540 unsigned cs_mode, int cs_mask_nr)
1542 WARN_ON(cs_mode > 12);
1544 return ddr3_cs_size(cs_mode, false);
1547 /* F15h M60h supports DDR4 mapping as well.. */
1548 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1549 unsigned cs_mode, int cs_mask_nr)
1552 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1554 WARN_ON(cs_mode > 12);
1556 if (pvt->dram_type == MEM_DDR4) {
1560 cs_size = ddr4_cs_size(cs_mode);
1561 } else if (pvt->dram_type == MEM_LRDDR3) {
1562 unsigned rank_multiply = dcsm & 0xf;
1564 if (rank_multiply == 3)
1566 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1568 /* Minimum cs size is 512mb for F15hM60h*/
1572 cs_size = ddr3_cs_size(cs_mode, false);
1579 * F16h and F15h model 30h have only limited cs_modes.
1581 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1582 unsigned cs_mode, int cs_mask_nr)
1584 WARN_ON(cs_mode > 12);
1586 if (cs_mode == 6 || cs_mode == 8 ||
1587 cs_mode == 9 || cs_mode == 12)
1590 return ddr3_cs_size(cs_mode, false);
1593 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1594 unsigned int cs_mode, int csrow_nr)
1596 u32 addr_mask_orig, addr_mask_deinterleaved;
1597 u32 msb, weight, num_zero_bits;
1600 /* No Chip Selects are enabled. */
1604 /* Requested size of an even CS but none are enabled. */
1605 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1608 /* Requested size of an odd CS but none are enabled. */
1609 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1613 * There is one mask per DIMM, and two Chip Selects per DIMM.
1614 * CS0 and CS1 -> DIMM0
1615 * CS2 and CS3 -> DIMM1
1617 dimm = csrow_nr >> 1;
1619 /* Asymmetric dual-rank DIMM support. */
1620 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1621 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1623 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1626 * The number of zero bits in the mask is equal to the number of bits
1627 * in a full mask minus the number of bits in the current mask.
1629 * The MSB is the number of bits in the full mask because BIT[0] is
1632 * In the special 3 Rank interleaving case, a single bit is flipped
1633 * without swapping with the most significant bit. This can be handled
1634 * by keeping the MSB where it is and ignoring the single zero bit.
1636 msb = fls(addr_mask_orig) - 1;
1637 weight = hweight_long(addr_mask_orig);
1638 num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
1640 /* Take the number of zero bits off from the top of the mask. */
1641 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1643 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1644 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1645 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1647 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1648 size = (addr_mask_deinterleaved >> 2) + 1;
1650 /* Return size in MBs. */
1654 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1657 if (pvt->fam == 0xf)
1660 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1661 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1662 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1664 edac_dbg(0, " DCTs operate in %s mode\n",
1665 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1667 if (!dct_ganging_enabled(pvt))
1668 edac_dbg(0, " Address range split per DCT: %s\n",
1669 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1671 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1672 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1673 (dct_memory_cleared(pvt) ? "yes" : "no"));
1675 edac_dbg(0, " channel interleave: %s, "
1676 "interleave bits selector: 0x%x\n",
1677 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1678 dct_sel_interleave_addr(pvt));
1681 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1685 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1686 * 2.10.12 Memory Interleaving Modes).
1688 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1689 u8 intlv_en, int num_dcts_intlv,
1696 return (u8)(dct_sel);
1698 if (num_dcts_intlv == 2) {
1699 select = (sys_addr >> 8) & 0x3;
1700 channel = select ? 0x3 : 0;
1701 } else if (num_dcts_intlv == 4) {
1702 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1703 switch (intlv_addr) {
1705 channel = (sys_addr >> 8) & 0x3;
1708 channel = (sys_addr >> 9) & 0x3;
1716 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1717 * Interleaving Modes.
1719 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1720 bool hi_range_sel, u8 intlv_en)
1722 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1724 if (dct_ganging_enabled(pvt))
1728 return dct_sel_high;
1731 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1733 if (dct_interleave_enabled(pvt)) {
1734 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1736 /* return DCT select function: 0=DCT0, 1=DCT1 */
1738 return sys_addr >> 6 & 1;
1740 if (intlv_addr & 0x2) {
1741 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1742 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1744 return ((sys_addr >> shift) & 1) ^ temp;
1747 if (intlv_addr & 0x4) {
1748 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1750 return (sys_addr >> shift) & 1;
1753 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1756 if (dct_high_range_enabled(pvt))
1757 return ~dct_sel_high & 1;
1762 /* Convert the sys_addr to the normalized DCT address */
1763 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1764 u64 sys_addr, bool hi_rng,
1765 u32 dct_sel_base_addr)
1768 u64 dram_base = get_dram_base(pvt, range);
1769 u64 hole_off = f10_dhar_offset(pvt);
1770 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1775 * base address of high range is below 4Gb
1776 * (bits [47:27] at [31:11])
1777 * DRAM address space on this DCT is hoisted above 4Gb &&
1780 * remove hole offset from sys_addr
1782 * remove high range offset from sys_addr
1784 if ((!(dct_sel_base_addr >> 16) ||
1785 dct_sel_base_addr < dhar_base(pvt)) &&
1787 (sys_addr >= BIT_64(32)))
1788 chan_off = hole_off;
1790 chan_off = dct_sel_base_off;
1794 * we have a valid hole &&
1799 * remove dram base to normalize to DCT address
1801 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1802 chan_off = hole_off;
1804 chan_off = dram_base;
1807 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1811 * checks if the csrow passed in is marked as SPARED, if so returns the new
1814 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1818 if (online_spare_swap_done(pvt, dct) &&
1819 csrow == online_spare_bad_dramcs(pvt, dct)) {
1821 for_each_chip_select(tmp_cs, dct, pvt) {
1822 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1832 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1833 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1836 * -EINVAL: NOT FOUND
1837 * 0..csrow = Chip-Select Row
1839 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1841 struct mem_ctl_info *mci;
1842 struct amd64_pvt *pvt;
1843 u64 cs_base, cs_mask;
1844 int cs_found = -EINVAL;
1847 mci = edac_mc_find(nid);
1851 pvt = mci->pvt_info;
1853 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1855 for_each_chip_select(csrow, dct, pvt) {
1856 if (!csrow_enabled(csrow, dct, pvt))
1859 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1861 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1862 csrow, cs_base, cs_mask);
1866 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1867 (in_addr & cs_mask), (cs_base & cs_mask));
1869 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1870 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1874 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1876 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1884 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1885 * swapped with a region located at the bottom of memory so that the GPU can use
1886 * the interleaved region and thus two channels.
1888 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1890 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1892 if (pvt->fam == 0x10) {
1893 /* only revC3 and revE have that feature */
1894 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1898 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1900 if (!(swap_reg & 0x1))
1903 swap_base = (swap_reg >> 3) & 0x7f;
1904 swap_limit = (swap_reg >> 11) & 0x7f;
1905 rgn_size = (swap_reg >> 20) & 0x7f;
1906 tmp_addr = sys_addr >> 27;
1908 if (!(sys_addr >> 34) &&
1909 (((tmp_addr >= swap_base) &&
1910 (tmp_addr <= swap_limit)) ||
1911 (tmp_addr < rgn_size)))
1912 return sys_addr ^ (u64)swap_base << 27;
1917 /* For a given @dram_range, check if @sys_addr falls within it. */
1918 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1919 u64 sys_addr, int *chan_sel)
1921 int cs_found = -EINVAL;
1925 bool high_range = false;
1927 u8 node_id = dram_dst_node(pvt, range);
1928 u8 intlv_en = dram_intlv_en(pvt, range);
1929 u32 intlv_sel = dram_intlv_sel(pvt, range);
1931 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1932 range, sys_addr, get_dram_limit(pvt, range));
1934 if (dhar_valid(pvt) &&
1935 dhar_base(pvt) <= sys_addr &&
1936 sys_addr < BIT_64(32)) {
1937 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1942 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1945 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1947 dct_sel_base = dct_sel_baseaddr(pvt);
1950 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1951 * select between DCT0 and DCT1.
1953 if (dct_high_range_enabled(pvt) &&
1954 !dct_ganging_enabled(pvt) &&
1955 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1958 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1960 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1961 high_range, dct_sel_base);
1963 /* Remove node interleaving, see F1x120 */
1965 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1966 (chan_addr & 0xfff);
1968 /* remove channel interleave */
1969 if (dct_interleave_enabled(pvt) &&
1970 !dct_high_range_enabled(pvt) &&
1971 !dct_ganging_enabled(pvt)) {
1973 if (dct_sel_interleave_addr(pvt) != 1) {
1974 if (dct_sel_interleave_addr(pvt) == 0x3)
1976 chan_addr = ((chan_addr >> 10) << 9) |
1977 (chan_addr & 0x1ff);
1979 /* A[6] or hash 6 */
1980 chan_addr = ((chan_addr >> 7) << 6) |
1984 chan_addr = ((chan_addr >> 13) << 12) |
1985 (chan_addr & 0xfff);
1988 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1990 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1993 *chan_sel = channel;
1998 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1999 u64 sys_addr, int *chan_sel)
2001 int cs_found = -EINVAL;
2002 int num_dcts_intlv = 0;
2003 u64 chan_addr, chan_offset;
2004 u64 dct_base, dct_limit;
2005 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2006 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2008 u64 dhar_offset = f10_dhar_offset(pvt);
2009 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2010 u8 node_id = dram_dst_node(pvt, range);
2011 u8 intlv_en = dram_intlv_en(pvt, range);
2013 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2014 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2016 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2017 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2019 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2020 range, sys_addr, get_dram_limit(pvt, range));
2022 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2023 !(get_dram_limit(pvt, range) >= sys_addr))
2026 if (dhar_valid(pvt) &&
2027 dhar_base(pvt) <= sys_addr &&
2028 sys_addr < BIT_64(32)) {
2029 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2034 /* Verify sys_addr is within DCT Range. */
2035 dct_base = (u64) dct_sel_baseaddr(pvt);
2036 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2038 if (!(dct_cont_base_reg & BIT(0)) &&
2039 !(dct_base <= (sys_addr >> 27) &&
2040 dct_limit >= (sys_addr >> 27)))
2043 /* Verify number of dct's that participate in channel interleaving. */
2044 num_dcts_intlv = (int) hweight8(intlv_en);
2046 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2049 if (pvt->model >= 0x60)
2050 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2052 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2053 num_dcts_intlv, dct_sel);
2055 /* Verify we stay within the MAX number of channels allowed */
2059 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2061 /* Get normalized DCT addr */
2062 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2063 chan_offset = dhar_offset;
2065 chan_offset = dct_base << 27;
2067 chan_addr = sys_addr - chan_offset;
2069 /* remove channel interleave */
2070 if (num_dcts_intlv == 2) {
2071 if (intlv_addr == 0x4)
2072 chan_addr = ((chan_addr >> 9) << 8) |
2074 else if (intlv_addr == 0x5)
2075 chan_addr = ((chan_addr >> 10) << 9) |
2076 (chan_addr & 0x1ff);
2080 } else if (num_dcts_intlv == 4) {
2081 if (intlv_addr == 0x4)
2082 chan_addr = ((chan_addr >> 10) << 8) |
2084 else if (intlv_addr == 0x5)
2085 chan_addr = ((chan_addr >> 11) << 9) |
2086 (chan_addr & 0x1ff);
2091 if (dct_offset_en) {
2092 amd64_read_pci_cfg(pvt->F1,
2093 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2095 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2098 f15h_select_dct(pvt, channel);
2100 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2104 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2105 * there is support for 4 DCT's, but only 2 are currently functional.
2106 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2107 * pvt->csels[1]. So we need to use '1' here to get correct info.
2108 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2110 alias_channel = (channel == 3) ? 1 : channel;
2112 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2115 *chan_sel = alias_channel;
2120 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2124 int cs_found = -EINVAL;
2127 for (range = 0; range < DRAM_RANGES; range++) {
2128 if (!dram_rw(pvt, range))
2131 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2132 cs_found = f15_m30h_match_to_this_node(pvt, range,
2136 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2137 (get_dram_limit(pvt, range) >= sys_addr)) {
2138 cs_found = f1x_match_to_this_node(pvt, range,
2139 sys_addr, chan_sel);
2148 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2149 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2151 * The @sys_addr is usually an error address received from the hardware
2154 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2155 struct err_info *err)
2157 struct amd64_pvt *pvt = mci->pvt_info;
2159 error_address_to_page_and_offset(sys_addr, err);
2161 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2162 if (err->csrow < 0) {
2163 err->err_code = ERR_CSROW;
2168 * We need the syndromes for channel detection only when we're
2169 * ganged. Otherwise @chan should already contain the channel at
2172 if (dct_ganging_enabled(pvt))
2173 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2177 * debug routine to display the memory sizes of all logical DIMMs and its
2180 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2182 int dimm, size0, size1;
2183 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2184 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2186 if (pvt->fam == 0xf) {
2187 /* K8 families < revF not supported yet */
2188 if (pvt->ext_model < K8_REV_F)
2194 if (pvt->fam == 0x10) {
2195 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2197 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2198 pvt->csels[1].csbases :
2199 pvt->csels[0].csbases;
2202 dcsb = pvt->csels[1].csbases;
2204 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2207 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2209 /* Dump memory sizes for DIMM and its CSROWs */
2210 for (dimm = 0; dimm < 4; dimm++) {
2213 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2215 * For F15m60h, we need multiplier for LRDIMM cs_size
2216 * calculation. We pass dimm value to the dbam_to_cs
2217 * mapper so we can find the multiplier from the
2218 * corresponding DCSM.
2220 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2221 DBAM_DIMM(dimm, dbam),
2225 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2226 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2227 DBAM_DIMM(dimm, dbam),
2230 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2232 dimm * 2 + 1, size1);
2236 static struct amd64_family_type family_types[] = {
2239 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2240 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2243 .early_channel_count = k8_early_channel_count,
2244 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2245 .dbam_to_cs = k8_dbam_to_chip_select,
2250 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2251 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2254 .early_channel_count = f1x_early_channel_count,
2255 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2256 .dbam_to_cs = f10_dbam_to_chip_select,
2261 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2262 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2265 .early_channel_count = f1x_early_channel_count,
2266 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2267 .dbam_to_cs = f15_dbam_to_chip_select,
2271 .ctl_name = "F15h_M30h",
2272 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2273 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2276 .early_channel_count = f1x_early_channel_count,
2277 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2278 .dbam_to_cs = f16_dbam_to_chip_select,
2282 .ctl_name = "F15h_M60h",
2283 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2284 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2287 .early_channel_count = f1x_early_channel_count,
2288 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2289 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2294 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2295 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2298 .early_channel_count = f1x_early_channel_count,
2299 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2300 .dbam_to_cs = f16_dbam_to_chip_select,
2304 .ctl_name = "F16h_M30h",
2305 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2306 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2309 .early_channel_count = f1x_early_channel_count,
2310 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2311 .dbam_to_cs = f16_dbam_to_chip_select,
2316 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2317 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2320 .early_channel_count = f17_early_channel_count,
2321 .dbam_to_cs = f17_addr_mask_to_cs_size,
2325 .ctl_name = "F17h_M10h",
2326 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2327 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2330 .early_channel_count = f17_early_channel_count,
2331 .dbam_to_cs = f17_addr_mask_to_cs_size,
2335 .ctl_name = "F17h_M30h",
2336 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2337 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2340 .early_channel_count = f17_early_channel_count,
2341 .dbam_to_cs = f17_addr_mask_to_cs_size,
2345 .ctl_name = "F17h_M60h",
2346 .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
2347 .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
2350 .early_channel_count = f17_early_channel_count,
2351 .dbam_to_cs = f17_addr_mask_to_cs_size,
2355 .ctl_name = "F17h_M70h",
2356 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2357 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2360 .early_channel_count = f17_early_channel_count,
2361 .dbam_to_cs = f17_addr_mask_to_cs_size,
2366 .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
2367 .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
2370 .early_channel_count = f17_early_channel_count,
2371 .dbam_to_cs = f17_addr_mask_to_cs_size,
2377 * These are tables of eigenvectors (one per line) which can be used for the
2378 * construction of the syndrome tables. The modified syndrome search algorithm
2379 * uses those to find the symbol in error and thus the DIMM.
2381 * Algorithm courtesy of Ross LaFetra from AMD.
2383 static const u16 x4_vectors[] = {
2384 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2385 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2386 0x0001, 0x0002, 0x0004, 0x0008,
2387 0x1013, 0x3032, 0x4044, 0x8088,
2388 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2389 0x4857, 0xc4fe, 0x13cc, 0x3288,
2390 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2391 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2392 0x15c1, 0x2a42, 0x89ac, 0x4758,
2393 0x2b03, 0x1602, 0x4f0c, 0xca08,
2394 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2395 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2396 0x2b87, 0x164e, 0x642c, 0xdc18,
2397 0x40b9, 0x80de, 0x1094, 0x20e8,
2398 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2399 0x11c1, 0x2242, 0x84ac, 0x4c58,
2400 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2401 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2402 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2403 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2404 0x16b3, 0x3d62, 0x4f34, 0x8518,
2405 0x1e2f, 0x391a, 0x5cac, 0xf858,
2406 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2407 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2408 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2409 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2410 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2411 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2412 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2413 0x185d, 0x2ca6, 0x7914, 0x9e28,
2414 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2415 0x4199, 0x82ee, 0x19f4, 0x2e58,
2416 0x4807, 0xc40e, 0x130c, 0x3208,
2417 0x1905, 0x2e0a, 0x5804, 0xac08,
2418 0x213f, 0x132a, 0xadfc, 0x5ba8,
2419 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2422 static const u16 x8_vectors[] = {
2423 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2424 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2425 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2426 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2427 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2428 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2429 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2430 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2431 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2432 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2433 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2434 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2435 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2436 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2437 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2438 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2439 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2440 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2441 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2444 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2447 unsigned int i, err_sym;
2449 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2451 unsigned v_idx = err_sym * v_dim;
2452 unsigned v_end = (err_sym + 1) * v_dim;
2454 /* walk over all 16 bits of the syndrome */
2455 for (i = 1; i < (1U << 16); i <<= 1) {
2457 /* if bit is set in that eigenvector... */
2458 if (v_idx < v_end && vectors[v_idx] & i) {
2459 u16 ev_comp = vectors[v_idx++];
2461 /* ... and bit set in the modified syndrome, */
2471 /* can't get to zero, move to next symbol */
2476 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2480 static int map_err_sym_to_channel(int err_sym, int sym_size)
2493 return err_sym >> 4;
2499 /* imaginary bits not in a DIMM */
2501 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2513 return err_sym >> 3;
2519 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2521 struct amd64_pvt *pvt = mci->pvt_info;
2524 if (pvt->ecc_sym_sz == 8)
2525 err_sym = decode_syndrome(syndrome, x8_vectors,
2526 ARRAY_SIZE(x8_vectors),
2528 else if (pvt->ecc_sym_sz == 4)
2529 err_sym = decode_syndrome(syndrome, x4_vectors,
2530 ARRAY_SIZE(x4_vectors),
2533 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2537 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2540 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2543 enum hw_event_mc_err_type err_type;
2547 err_type = HW_EVENT_ERR_CORRECTED;
2548 else if (ecc_type == 1)
2549 err_type = HW_EVENT_ERR_UNCORRECTED;
2550 else if (ecc_type == 3)
2551 err_type = HW_EVENT_ERR_DEFERRED;
2553 WARN(1, "Something is rotten in the state of Denmark.\n");
2557 switch (err->err_code) {
2562 string = "Failed to map error addr to a node";
2565 string = "Failed to map error addr to a csrow";
2568 string = "Unknown syndrome - possible error reporting race";
2571 string = "MCA_SYND not valid - unknown syndrome and csrow";
2574 string = "Cannot decode normalized address";
2577 string = "WTF error";
2581 edac_mc_handle_error(err_type, mci, 1,
2582 err->page, err->offset, err->syndrome,
2583 err->csrow, err->channel, -1,
2587 static inline void decode_bus_error(int node_id, struct mce *m)
2589 struct mem_ctl_info *mci;
2590 struct amd64_pvt *pvt;
2591 u8 ecc_type = (m->status >> 45) & 0x3;
2592 u8 xec = XEC(m->status, 0x1f);
2593 u16 ec = EC(m->status);
2595 struct err_info err;
2597 mci = edac_mc_find(node_id);
2601 pvt = mci->pvt_info;
2603 /* Bail out early if this was an 'observed' error */
2604 if (PP(ec) == NBSL_PP_OBS)
2607 /* Do only ECC errors */
2608 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2611 memset(&err, 0, sizeof(err));
2613 sys_addr = get_error_address(pvt, m);
2616 err.syndrome = extract_syndrome(m->status);
2618 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2620 __log_ecc_error(mci, &err, ecc_type);
2624 * To find the UMC channel represented by this bank we need to match on its
2625 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2628 * Currently, we can derive the channel number by looking at the 6th nibble in
2629 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2632 static int find_umc_channel(struct mce *m)
2634 return (m->ipid & GENMASK(31, 0)) >> 20;
2637 static void decode_umc_error(int node_id, struct mce *m)
2639 u8 ecc_type = (m->status >> 45) & 0x3;
2640 struct mem_ctl_info *mci;
2641 struct amd64_pvt *pvt;
2642 struct err_info err;
2645 mci = edac_mc_find(node_id);
2649 pvt = mci->pvt_info;
2651 memset(&err, 0, sizeof(err));
2653 if (m->status & MCI_STATUS_DEFERRED)
2656 err.channel = find_umc_channel(m);
2658 if (!(m->status & MCI_STATUS_SYNDV)) {
2659 err.err_code = ERR_SYND;
2663 if (ecc_type == 2) {
2664 u8 length = (m->synd >> 18) & 0x3f;
2667 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2669 err.err_code = ERR_CHANNEL;
2672 err.csrow = m->synd & 0x7;
2674 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2675 err.err_code = ERR_NORM_ADDR;
2679 error_address_to_page_and_offset(sys_addr, &err);
2682 __log_ecc_error(mci, &err, ecc_type);
2686 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2687 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2688 * Reserve F0 and F6 on systems with a UMC.
2691 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2694 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2696 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2700 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2702 pci_dev_put(pvt->F0);
2705 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2710 pci_ctl_dev = &pvt->F0->dev;
2712 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2713 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2714 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2719 /* Reserve the ADDRESS MAP Device */
2720 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2722 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2726 /* Reserve the DCT Device */
2727 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2729 pci_dev_put(pvt->F1);
2732 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2737 pci_ctl_dev = &pvt->F2->dev;
2739 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2740 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2741 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2746 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2749 pci_dev_put(pvt->F0);
2750 pci_dev_put(pvt->F6);
2752 pci_dev_put(pvt->F1);
2753 pci_dev_put(pvt->F2);
2757 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2759 pvt->ecc_sym_sz = 4;
2765 /* Check enabled channels only: */
2766 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2767 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2768 pvt->ecc_sym_sz = 16;
2770 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2771 pvt->ecc_sym_sz = 8;
2776 } else if (pvt->fam >= 0x10) {
2779 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2780 /* F16h has only DCT0, so no need to read dbam1. */
2781 if (pvt->fam != 0x16)
2782 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2784 /* F10h, revD and later can do x8 ECC too. */
2785 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2786 pvt->ecc_sym_sz = 8;
2791 * Retrieve the hardware registers of the memory controller.
2793 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2795 u8 nid = pvt->mc_node_id;
2796 struct amd64_umc *umc;
2799 /* Read registers from each UMC */
2802 umc_base = get_umc_base(i);
2805 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2806 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2807 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2808 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2809 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2814 * Retrieve the hardware registers of the memory controller (this includes the
2815 * 'Address Map' and 'Misc' device regs)
2817 static void read_mc_regs(struct amd64_pvt *pvt)
2823 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2824 * those are Read-As-Zero.
2826 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2827 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2829 /* Check first whether TOP_MEM2 is enabled: */
2830 rdmsrl(MSR_K8_SYSCFG, msr_val);
2831 if (msr_val & BIT(21)) {
2832 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2833 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2835 edac_dbg(0, " TOP_MEM2 disabled\n");
2839 __read_mc_regs_df(pvt);
2840 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2845 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2847 read_dram_ctl_register(pvt);
2849 for (range = 0; range < DRAM_RANGES; range++) {
2852 /* read settings for this DRAM range */
2853 read_dram_base_limit_regs(pvt, range);
2855 rw = dram_rw(pvt, range);
2859 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2861 get_dram_base(pvt, range),
2862 get_dram_limit(pvt, range));
2864 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2865 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2866 (rw & 0x1) ? "R" : "-",
2867 (rw & 0x2) ? "W" : "-",
2868 dram_intlv_sel(pvt, range),
2869 dram_dst_node(pvt, range));
2872 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2873 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2875 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2877 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2878 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2880 if (!dct_ganging_enabled(pvt)) {
2881 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2882 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2886 read_dct_base_mask(pvt);
2888 determine_memory_type(pvt);
2889 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2891 determine_ecc_sym_sz(pvt);
2895 * NOTE: CPU Revision Dependent code
2898 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2899 * k8 private pointer to -->
2900 * DRAM Bank Address mapping register
2902 * DCL register where dual_channel_active is
2904 * The DBAM register consists of 4 sets of 4 bits each definitions:
2907 * 0-3 CSROWs 0 and 1
2908 * 4-7 CSROWs 2 and 3
2909 * 8-11 CSROWs 4 and 5
2910 * 12-15 CSROWs 6 and 7
2912 * Values range from: 0 to 15
2913 * The meaning of the values depends on CPU revision and dual-channel state,
2914 * see relevant BKDG more info.
2916 * The memory controller provides for total of only 8 CSROWs in its current
2917 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2918 * single channel or two (2) DIMMs in dual channel mode.
2920 * The following code logic collapses the various tables for CSROW based on CPU
2924 * The number of PAGE_SIZE pages on the specified CSROW number it
2928 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2930 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2931 int csrow_nr = csrow_nr_orig;
2932 u32 cs_mode, nr_pages;
2936 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2938 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2941 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2942 nr_pages <<= 20 - PAGE_SHIFT;
2944 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2945 csrow_nr_orig, dct, cs_mode);
2946 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2951 static int init_csrows_df(struct mem_ctl_info *mci)
2953 struct amd64_pvt *pvt = mci->pvt_info;
2954 enum edac_type edac_mode = EDAC_NONE;
2955 enum dev_type dev_type = DEV_UNKNOWN;
2956 struct dimm_info *dimm;
2960 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2961 edac_mode = EDAC_S16ECD16ED;
2963 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2964 edac_mode = EDAC_S8ECD8ED;
2966 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2967 edac_mode = EDAC_S4ECD4ED;
2969 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2970 edac_mode = EDAC_SECDED;
2974 for_each_chip_select(cs, umc, pvt) {
2975 if (!csrow_enabled(cs, umc, pvt))
2979 dimm = mci->csrows[cs]->channels[umc]->dimm;
2981 edac_dbg(1, "MC node: %d, csrow: %d\n",
2982 pvt->mc_node_id, cs);
2984 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2985 dimm->mtype = pvt->dram_type;
2986 dimm->edac_mode = edac_mode;
2987 dimm->dtype = dev_type;
2996 * Initialize the array of csrow attribute instances, based on the values
2997 * from pci config hardware registers.
2999 static int init_csrows(struct mem_ctl_info *mci)
3001 struct amd64_pvt *pvt = mci->pvt_info;
3002 enum edac_type edac_mode = EDAC_NONE;
3003 struct csrow_info *csrow;
3004 struct dimm_info *dimm;
3005 int i, j, empty = 1;
3010 return init_csrows_df(mci);
3012 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3016 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3017 pvt->mc_node_id, val,
3018 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3021 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3023 for_each_chip_select(i, 0, pvt) {
3024 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3025 bool row_dct1 = false;
3027 if (pvt->fam != 0xf)
3028 row_dct1 = !!csrow_enabled(i, 1, pvt);
3030 if (!row_dct0 && !row_dct1)
3033 csrow = mci->csrows[i];
3036 edac_dbg(1, "MC node: %d, csrow: %d\n",
3037 pvt->mc_node_id, i);
3040 nr_pages = get_csrow_nr_pages(pvt, 0, i);
3041 csrow->channels[0]->dimm->nr_pages = nr_pages;
3044 /* K8 has only one DCT */
3045 if (pvt->fam != 0xf && row_dct1) {
3046 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
3048 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3049 nr_pages += row_dct1_pages;
3052 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3054 /* Determine DIMM ECC mode: */
3055 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3056 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3061 for (j = 0; j < pvt->channel_count; j++) {
3062 dimm = csrow->channels[j]->dimm;
3063 dimm->mtype = pvt->dram_type;
3064 dimm->edac_mode = edac_mode;
3072 /* get all cores on this DCT */
3073 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3077 for_each_online_cpu(cpu)
3078 if (amd_get_nb_id(cpu) == nid)
3079 cpumask_set_cpu(cpu, mask);
3082 /* check MCG_CTL on all the cpus on this node */
3083 static bool nb_mce_bank_enabled_on_node(u16 nid)
3089 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3090 amd64_warn("%s: Error allocating mask\n", __func__);
3094 get_cpus_on_this_dct_cpumask(mask, nid);
3096 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3098 for_each_cpu(cpu, mask) {
3099 struct msr *reg = per_cpu_ptr(msrs, cpu);
3100 nbe = reg->l & MSR_MCGCTL_NBE;
3102 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3104 (nbe ? "enabled" : "disabled"));
3112 free_cpumask_var(mask);
3116 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3118 cpumask_var_t cmask;
3121 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3122 amd64_warn("%s: error allocating mask\n", __func__);
3126 get_cpus_on_this_dct_cpumask(cmask, nid);
3128 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3130 for_each_cpu(cpu, cmask) {
3132 struct msr *reg = per_cpu_ptr(msrs, cpu);
3135 if (reg->l & MSR_MCGCTL_NBE)
3136 s->flags.nb_mce_enable = 1;
3138 reg->l |= MSR_MCGCTL_NBE;
3141 * Turn off NB MCE reporting only when it was off before
3143 if (!s->flags.nb_mce_enable)
3144 reg->l &= ~MSR_MCGCTL_NBE;
3147 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3149 free_cpumask_var(cmask);
3154 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3158 u32 value, mask = 0x3; /* UECC/CECC enable */
3160 if (toggle_ecc_err_reporting(s, nid, ON)) {
3161 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3165 amd64_read_pci_cfg(F3, NBCTL, &value);
3167 s->old_nbctl = value & mask;
3168 s->nbctl_valid = true;
3171 amd64_write_pci_cfg(F3, NBCTL, value);
3173 amd64_read_pci_cfg(F3, NBCFG, &value);
3175 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3176 nid, value, !!(value & NBCFG_ECC_ENABLE));
3178 if (!(value & NBCFG_ECC_ENABLE)) {
3179 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3181 s->flags.nb_ecc_prev = 0;
3183 /* Attempt to turn on DRAM ECC Enable */
3184 value |= NBCFG_ECC_ENABLE;
3185 amd64_write_pci_cfg(F3, NBCFG, value);
3187 amd64_read_pci_cfg(F3, NBCFG, &value);
3189 if (!(value & NBCFG_ECC_ENABLE)) {
3190 amd64_warn("Hardware rejected DRAM ECC enable,"
3191 "check memory DIMM configuration.\n");
3194 amd64_info("Hardware accepted DRAM ECC Enable\n");
3197 s->flags.nb_ecc_prev = 1;
3200 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3201 nid, value, !!(value & NBCFG_ECC_ENABLE));
3206 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3209 u32 value, mask = 0x3; /* UECC/CECC enable */
3211 if (!s->nbctl_valid)
3214 amd64_read_pci_cfg(F3, NBCTL, &value);
3216 value |= s->old_nbctl;
3218 amd64_write_pci_cfg(F3, NBCTL, value);
3220 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3221 if (!s->flags.nb_ecc_prev) {
3222 amd64_read_pci_cfg(F3, NBCFG, &value);
3223 value &= ~NBCFG_ECC_ENABLE;
3224 amd64_write_pci_cfg(F3, NBCFG, value);
3227 /* restore the NB Enable MCGCTL bit */
3228 if (toggle_ecc_err_reporting(s, nid, OFF))
3229 amd64_warn("Error restoring NB MCGCTL settings!\n");
3232 static bool ecc_enabled(struct amd64_pvt *pvt)
3234 u16 nid = pvt->mc_node_id;
3235 bool nb_mce_en = false;
3239 if (boot_cpu_data.x86 >= 0x17) {
3240 u8 umc_en_mask = 0, ecc_en_mask = 0;
3241 struct amd64_umc *umc;
3246 /* Only check enabled UMCs. */
3247 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3250 umc_en_mask |= BIT(i);
3252 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3253 ecc_en_mask |= BIT(i);
3256 /* Check whether at least one UMC is enabled: */
3258 ecc_en = umc_en_mask == ecc_en_mask;
3260 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3262 /* Assume UMC MCA banks are enabled. */
3265 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3267 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3269 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3271 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3272 MSR_IA32_MCG_CTL, nid);
3275 amd64_info("Node %d: DRAM ECC %s.\n",
3276 nid, (ecc_en ? "enabled" : "disabled"));
3278 if (!ecc_en || !nb_mce_en)
3285 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3287 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3290 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3291 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3292 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3294 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3295 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3299 /* Set chipkill only if ECC is enabled: */
3301 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3307 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3309 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3311 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3315 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
3317 struct amd64_pvt *pvt = mci->pvt_info;
3319 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3320 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3323 f17h_determine_edac_ctl_cap(mci, pvt);
3325 if (pvt->nbcap & NBCAP_SECDED)
3326 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3328 if (pvt->nbcap & NBCAP_CHIPKILL)
3329 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3332 mci->edac_cap = determine_edac_cap(pvt);
3333 mci->mod_name = EDAC_MOD_STR;
3334 mci->ctl_name = fam_type->ctl_name;
3335 mci->dev_name = pci_name(pvt->F3);
3336 mci->ctl_page_to_phys = NULL;
3338 /* memory scrubber interface */
3339 mci->set_sdram_scrub_rate = set_scrub_rate;
3340 mci->get_sdram_scrub_rate = get_scrub_rate;
3344 * returns a pointer to the family descriptor on success, NULL otherwise.
3346 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3348 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3349 pvt->stepping = boot_cpu_data.x86_stepping;
3350 pvt->model = boot_cpu_data.x86_model;
3351 pvt->fam = boot_cpu_data.x86;
3355 fam_type = &family_types[K8_CPUS];
3356 pvt->ops = &family_types[K8_CPUS].ops;
3360 fam_type = &family_types[F10_CPUS];
3361 pvt->ops = &family_types[F10_CPUS].ops;
3365 if (pvt->model == 0x30) {
3366 fam_type = &family_types[F15_M30H_CPUS];
3367 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3369 } else if (pvt->model == 0x60) {
3370 fam_type = &family_types[F15_M60H_CPUS];
3371 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3373 /* Richland is only client */
3374 } else if (pvt->model == 0x13) {
3377 fam_type = &family_types[F15_CPUS];
3378 pvt->ops = &family_types[F15_CPUS].ops;
3383 if (pvt->model == 0x30) {
3384 fam_type = &family_types[F16_M30H_CPUS];
3385 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3388 fam_type = &family_types[F16_CPUS];
3389 pvt->ops = &family_types[F16_CPUS].ops;
3393 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3394 fam_type = &family_types[F17_M10H_CPUS];
3395 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3397 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3398 fam_type = &family_types[F17_M30H_CPUS];
3399 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3401 } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
3402 fam_type = &family_types[F17_M60H_CPUS];
3403 pvt->ops = &family_types[F17_M60H_CPUS].ops;
3405 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3406 fam_type = &family_types[F17_M70H_CPUS];
3407 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3412 fam_type = &family_types[F17_CPUS];
3413 pvt->ops = &family_types[F17_CPUS].ops;
3415 if (pvt->fam == 0x18)
3416 family_types[F17_CPUS].ctl_name = "F18h";
3420 if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
3421 fam_type = &family_types[F17_M70H_CPUS];
3422 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3423 fam_type->ctl_name = "F19h_M20h";
3426 fam_type = &family_types[F19_CPUS];
3427 pvt->ops = &family_types[F19_CPUS].ops;
3428 family_types[F19_CPUS].ctl_name = "F19h";
3432 amd64_err("Unsupported family!\n");
3436 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3438 (pvt->ext_model >= K8_REV_F ? "revF or later "
3439 : "revE or earlier ")
3440 : ""), pvt->mc_node_id);
3444 static const struct attribute_group *amd64_edac_attr_groups[] = {
3445 #ifdef CONFIG_EDAC_DEBUG
3446 &amd64_edac_dbg_group,
3448 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3449 &amd64_edac_inj_group,
3454 static int hw_info_get(struct amd64_pvt *pvt)
3456 u16 pci_id1, pci_id2;
3459 if (pvt->fam >= 0x17) {
3460 pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3464 pci_id1 = fam_type->f0_id;
3465 pci_id2 = fam_type->f6_id;
3467 pci_id1 = fam_type->f1_id;
3468 pci_id2 = fam_type->f2_id;
3471 ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3480 static void hw_info_put(struct amd64_pvt *pvt)
3482 if (pvt->F0 || pvt->F1)
3483 free_mc_sibling_devs(pvt);
3488 static int init_one_instance(struct amd64_pvt *pvt)
3490 struct mem_ctl_info *mci = NULL;
3491 struct edac_mc_layer layers[2];
3495 * We need to determine how many memory channels there are. Then use
3496 * that information for calculating the size of the dynamic instance
3497 * tables in the 'mci' structure.
3499 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3500 if (pvt->channel_count < 0)
3504 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3505 layers[0].size = pvt->csels[0].b_cnt;
3506 layers[0].is_virt_csrow = true;
3507 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3510 * Always allocate two channels since we can have setups with DIMMs on
3511 * only one channel. Also, this simplifies handling later for the price
3512 * of a couple of KBs tops.
3514 layers[1].size = fam_type->max_mcs;
3515 layers[1].is_virt_csrow = false;
3517 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3521 mci->pvt_info = pvt;
3522 mci->pdev = &pvt->F3->dev;
3524 setup_mci_misc_attrs(mci);
3526 if (init_csrows(mci))
3527 mci->edac_cap = EDAC_FLAG_NONE;
3530 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3531 edac_dbg(1, "failed edac_mc_add_mc()\n");
3539 static bool instance_has_memory(struct amd64_pvt *pvt)
3541 bool cs_enabled = false;
3542 int cs = 0, dct = 0;
3544 for (dct = 0; dct < fam_type->max_mcs; dct++) {
3545 for_each_chip_select(cs, dct, pvt)
3546 cs_enabled |= csrow_enabled(cs, dct, pvt);
3552 static int probe_one_instance(unsigned int nid)
3554 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3555 struct amd64_pvt *pvt = NULL;
3556 struct ecc_settings *s;
3560 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3566 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3570 pvt->mc_node_id = nid;
3574 fam_type = per_family_init(pvt);
3578 ret = hw_info_get(pvt);
3583 if (!instance_has_memory(pvt)) {
3584 amd64_info("Node %d: No DIMMs detected.\n", nid);
3588 if (!ecc_enabled(pvt)) {
3591 if (!ecc_enable_override)
3594 if (boot_cpu_data.x86 >= 0x17) {
3595 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3598 amd64_warn("Forcing ECC on!\n");
3600 if (!enable_ecc_error_reporting(s, nid, F3))
3604 ret = init_one_instance(pvt);
3606 amd64_err("Error probing instance: %d\n", nid);
3608 if (boot_cpu_data.x86 < 0x17)
3609 restore_ecc_error_reporting(s, nid, F3);
3614 dump_misc_regs(pvt);
3624 ecc_stngs[nid] = NULL;
3630 static void remove_one_instance(unsigned int nid)
3632 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3633 struct ecc_settings *s = ecc_stngs[nid];
3634 struct mem_ctl_info *mci;
3635 struct amd64_pvt *pvt;
3637 /* Remove from EDAC CORE tracking list */
3638 mci = edac_mc_del_mc(&F3->dev);
3642 pvt = mci->pvt_info;
3644 restore_ecc_error_reporting(s, nid, F3);
3646 kfree(ecc_stngs[nid]);
3647 ecc_stngs[nid] = NULL;
3649 /* Free the EDAC CORE resources */
3650 mci->pvt_info = NULL;
3657 static void setup_pci_device(void)
3662 pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
3664 pr_warn("%s(): Unable to create PCI control\n", __func__);
3665 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3669 static const struct x86_cpu_id amd64_cpuids[] = {
3670 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
3671 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
3672 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
3673 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
3674 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
3675 X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
3676 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
3679 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3681 static int __init amd64_edac_init(void)
3687 owner = edac_get_owner();
3688 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3691 if (!x86_match_cpu(amd64_cpuids))
3694 if (amd_cache_northbridges() < 0)
3700 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3704 msrs = msrs_alloc();
3708 for (i = 0; i < amd_nb_num(); i++) {
3709 err = probe_one_instance(i);
3711 /* unwind properly */
3713 remove_one_instance(i);
3719 if (!edac_has_mcs()) {
3724 /* register stuff with EDAC MCE */
3725 if (boot_cpu_data.x86 >= 0x17)
3726 amd_register_ecc_decoder(decode_umc_error);
3728 amd_register_ecc_decoder(decode_bus_error);
3732 #ifdef CONFIG_X86_32
3733 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3736 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3753 static void __exit amd64_edac_exit(void)
3758 edac_pci_release_generic_ctl(pci_ctl);
3760 /* unregister from EDAC MCE */
3761 if (boot_cpu_data.x86 >= 0x17)
3762 amd_unregister_ecc_decoder(decode_umc_error);
3764 amd_unregister_ecc_decoder(decode_bus_error);
3766 for (i = 0; i < amd_nb_num(); i++)
3767 remove_one_instance(i);
3778 module_init(amd64_edac_init);
3779 module_exit(amd64_edac_exit);
3781 MODULE_LICENSE("GPL");
3782 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3783 "Dave Peterson, Thayne Harbaugh");
3784 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3785 EDAC_AMD64_VERSION);
3787 module_param(edac_op_state, int, 0444);
3788 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");