1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Intel(R) 10nm server memory controller.
4 * Copyright (c) 2019, Intel Corporation.
8 #include <linux/kernel.h>
10 #include <asm/cpu_device_id.h>
11 #include <asm/intel-family.h>
13 #include "edac_module.h"
14 #include "skx_common.h"
16 #define I10NM_REVISION "v0.0.5"
17 #define EDAC_MOD_STR "i10nm_edac"
20 #define i10nm_printk(level, fmt, arg...) \
21 edac_printk(level, "i10nm", fmt, ##arg)
23 #define I10NM_GET_SCK_BAR(d, reg) \
24 pci_read_config_dword((d)->uracu, 0xd0, &(reg))
25 #define I10NM_GET_IMC_BAR(d, i, reg) \
26 pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
27 #define I10NM_GET_SAD(d, offset, i, reg)\
28 pci_read_config_dword((d)->sad_all, (offset) + (i) * 8, &(reg))
29 #define I10NM_GET_HBM_IMC_BAR(d, reg) \
30 pci_read_config_dword((d)->uracu, 0xd4, &(reg))
31 #define I10NM_GET_CAPID3_CFG(d, reg) \
32 pci_read_config_dword((d)->pcu_cr3, 0x90, &(reg))
33 #define I10NM_GET_DIMMMTR(m, i, j) \
34 readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
35 (i) * (m)->chan_mmio_sz + (j) * 4)
36 #define I10NM_GET_MCDDRTCFG(m, i) \
37 readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
38 (i) * (m)->chan_mmio_sz)
39 #define I10NM_GET_MCMTR(m, i) \
40 readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
41 (i) * (m)->chan_mmio_sz)
42 #define I10NM_GET_AMAP(m, i) \
43 readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
44 (i) * (m)->chan_mmio_sz)
45 #define I10NM_GET_REG32(m, i, offset) \
46 readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
47 #define I10NM_GET_REG64(m, i, offset) \
48 readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
49 #define I10NM_SET_REG32(m, i, offset, v) \
50 writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
52 #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
53 #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
54 #define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
55 GET_BITFIELD(reg, 0, 10) + 1) << 12)
56 #define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \
57 ((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
59 #define I10NM_HBM_IMC_MMIO_SIZE 0x9000
60 #define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30)
61 #define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29)
63 #define I10NM_MAX_SAD 16
64 #define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
65 #define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
67 #define RETRY_RD_ERR_LOG_UC BIT(1)
68 #define RETRY_RD_ERR_LOG_NOOVER BIT(14)
69 #define RETRY_RD_ERR_LOG_EN BIT(15)
70 #define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
71 #define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
73 static struct list_head *i10nm_edac_list;
75 static struct res_config *res_cfg;
76 static int retry_rd_err_log;
77 static int decoding_via_mca;
78 static bool mem_cfg_2lm;
80 static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
81 static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
82 static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
83 static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
84 static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
85 static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
86 static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
87 static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
88 static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
90 static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
91 u32 *offsets_scrub, u32 *offsets_demand,
96 s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
97 d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
99 d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
102 /* Save default configurations */
103 imc->chan[chan].retry_rd_err_log_s = s;
104 imc->chan[chan].retry_rd_err_log_d = d;
106 imc->chan[chan].retry_rd_err_log_d2 = d2;
108 s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
109 s |= RETRY_RD_ERR_LOG_EN;
110 d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
111 d |= RETRY_RD_ERR_LOG_EN;
113 if (offsets_demand2) {
114 d2 &= ~RETRY_RD_ERR_LOG_UC;
115 d2 |= RETRY_RD_ERR_LOG_NOOVER;
116 d2 |= RETRY_RD_ERR_LOG_EN;
119 /* Restore default configurations */
120 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
121 s |= RETRY_RD_ERR_LOG_UC;
122 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
123 s |= RETRY_RD_ERR_LOG_NOOVER;
124 if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
125 s &= ~RETRY_RD_ERR_LOG_EN;
126 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
127 d |= RETRY_RD_ERR_LOG_UC;
128 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
129 d |= RETRY_RD_ERR_LOG_NOOVER;
130 if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
131 d &= ~RETRY_RD_ERR_LOG_EN;
133 if (offsets_demand2) {
134 if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
135 d2 |= RETRY_RD_ERR_LOG_UC;
136 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
137 d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
138 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
139 d2 &= ~RETRY_RD_ERR_LOG_EN;
143 I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
144 I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
146 I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
149 static void enable_retry_rd_err_log(bool enable)
157 list_for_each_entry(d, i10nm_edac_list, list)
158 for (i = 0; i < I10NM_NUM_IMC; i++) {
163 for (j = 0; j < I10NM_NUM_CHANNELS; j++) {
165 __enable_retry_rd_err_log(imc, j, enable,
166 res_cfg->offsets_scrub_hbm0,
167 res_cfg->offsets_demand_hbm0,
169 __enable_retry_rd_err_log(imc, j, enable,
170 res_cfg->offsets_scrub_hbm1,
171 res_cfg->offsets_demand_hbm1,
174 __enable_retry_rd_err_log(imc, j, enable,
175 res_cfg->offsets_scrub,
176 res_cfg->offsets_demand,
177 res_cfg->offsets_demand2);
183 static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
184 int len, bool scrub_err)
186 struct skx_imc *imc = &res->dev->imc[res->imc];
187 u32 log0, log1, log2, log3, log4;
188 u32 corr0, corr1, corr2, corr3;
189 u32 lxg0, lxg1, lxg3, lxg4;
203 offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
204 res_cfg->offsets_demand_hbm1;
206 offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
207 res_cfg->offsets_demand_hbm0;
210 offsets = res_cfg->offsets_scrub;
212 offsets = res_cfg->offsets_demand;
213 xffsets = res_cfg->offsets_demand2;
217 log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
218 log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
219 log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
220 log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
221 log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
224 lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
225 lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
226 lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
227 lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
228 lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
231 if (res_cfg->type == SPR) {
232 log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
233 n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
234 log0, log1, log2a, log3, log4, log5);
238 lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
239 n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
240 lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
242 n += snprintf(msg + n, len - n, "]");
246 log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
247 n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
248 log0, log1, log2, log3, log4, log5);
253 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
254 corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
255 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
256 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
258 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
259 corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
260 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
261 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
264 corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
265 corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
266 corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
267 corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
271 snprintf(msg + n, len - n,
272 " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
273 corr0 & 0xffff, corr0 >> 16,
274 corr1 & 0xffff, corr1 >> 16,
275 corr2 & 0xffff, corr2 >> 16,
276 corr3 & 0xffff, corr3 >> 16);
278 /* Clear status bits */
279 if (retry_rd_err_log == 2) {
280 if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
281 log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
282 I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
285 if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
286 lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
287 I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
292 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
293 unsigned int dev, unsigned int fun)
295 struct pci_dev *pdev;
297 pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
299 edac_dbg(2, "No device %02x:%02x.%x\n",
304 if (unlikely(pci_enable_device(pdev) < 0)) {
305 edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
314 static bool i10nm_check_2lm(struct res_config *cfg)
320 list_for_each_entry(d, i10nm_edac_list, list) {
321 d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[1],
322 PCI_SLOT(cfg->sad_all_devfn),
323 PCI_FUNC(cfg->sad_all_devfn));
327 for (i = 0; i < I10NM_MAX_SAD; i++) {
328 I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg);
329 if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) {
330 edac_dbg(2, "2-level memory configuration.\n");
340 * Check whether the error comes from DDRT by ICX/Tremont model specific error code.
341 * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS.
343 static bool i10nm_mscod_is_ddrt(u32 mscod)
346 case 0x0106: case 0x0107:
347 case 0x0800: case 0x0804:
348 case 0x0806 ... 0x0808:
349 case 0x080a ... 0x080e:
350 case 0x0810: case 0x0811:
351 case 0x0816: case 0x081e:
359 static bool i10nm_mc_decode_available(struct mce *mce)
363 if (!decoding_via_mca || mem_cfg_2lm)
366 if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
367 != (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
372 switch (res_cfg->type) {
374 if (bank < 13 || bank > 26)
377 /* DDRT errors can't be decoded from MCA bank registers */
378 if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
381 if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
384 /* Check whether one of {13,14,17,18,21,22,25,26} */
385 return ((bank - 13) & BIT(1)) == 0;
391 static bool i10nm_mc_decode(struct decoded_addr *res)
393 struct mce *m = res->mce;
397 if (!i10nm_mc_decode_available(m))
400 list_for_each_entry(d, i10nm_edac_list, list) {
401 if (d->imc[0].src_id == m->socketid) {
402 res->socket = m->socketid;
408 switch (res_cfg->type) {
412 res->channel = bank % 2;
419 skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
420 m->socketid, res->imc);
424 res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
425 res->row = GET_BITFIELD(m->misc, 19, 39);
426 res->bank_group = GET_BITFIELD(m->misc, 40, 41);
427 res->bank_address = GET_BITFIELD(m->misc, 42, 43);
428 res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
429 res->rank = GET_BITFIELD(m->misc, 56, 58);
430 res->dimm = res->rank >> 2;
431 res->rank = res->rank % 4;
436 static int i10nm_get_ddr_munits(void)
438 struct pci_dev *mdev;
446 list_for_each_entry(d, i10nm_edac_list, list) {
447 d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1);
451 d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1);
455 if (I10NM_GET_SCK_BAR(d, reg)) {
456 i10nm_printk(KERN_ERR, "Failed to socket bar\n");
460 base = I10NM_GET_SCK_MMIO_BASE(reg);
461 edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
464 for (i = 0; i < I10NM_NUM_DDR_IMC; i++) {
465 mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
467 if (i == 0 && !mdev) {
468 i10nm_printk(KERN_ERR, "No IMC found\n");
474 d->imc[i].mdev = mdev;
476 if (I10NM_GET_IMC_BAR(d, i, reg)) {
477 i10nm_printk(KERN_ERR, "Failed to get mc bar\n");
481 off = I10NM_GET_IMC_MMIO_OFFSET(reg);
482 size = I10NM_GET_IMC_MMIO_SIZE(reg);
483 edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
484 i, base + off, size, reg);
486 mbase = ioremap(base + off, size);
488 i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
493 d->imc[i].mbase = mbase;
500 static bool i10nm_check_hbm_imc(struct skx_dev *d)
504 if (I10NM_GET_CAPID3_CFG(d, reg)) {
505 i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n");
509 return I10NM_IS_HBM_PRESENT(reg) != 0;
512 static int i10nm_get_hbm_munits(void)
514 struct pci_dev *mdev;
521 list_for_each_entry(d, i10nm_edac_list, list) {
522 d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[1], 30, 3);
526 if (!i10nm_check_hbm_imc(d)) {
527 i10nm_printk(KERN_DEBUG, "No hbm memory\n");
531 if (I10NM_GET_SCK_BAR(d, reg)) {
532 i10nm_printk(KERN_ERR, "Failed to get socket bar\n");
535 base = I10NM_GET_SCK_MMIO_BASE(reg);
537 if (I10NM_GET_HBM_IMC_BAR(d, reg)) {
538 i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n");
541 base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
543 lmc = I10NM_NUM_DDR_IMC;
545 for (i = 0; i < I10NM_NUM_HBM_IMC; i++) {
546 mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
547 12 + i / 4, 1 + i % 4);
548 if (i == 0 && !mdev) {
549 i10nm_printk(KERN_ERR, "No hbm mc found\n");
555 d->imc[lmc].mdev = mdev;
556 off = i * I10NM_HBM_IMC_MMIO_SIZE;
558 edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n",
559 lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE);
561 mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
563 pci_dev_put(d->imc[lmc].mdev);
564 d->imc[lmc].mdev = NULL;
566 i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
571 d->imc[lmc].mbase = mbase;
572 d->imc[lmc].hbm_mc = true;
574 mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
575 if (!I10NM_IS_HBM_IMC(mcmtr)) {
576 iounmap(d->imc[lmc].mbase);
577 d->imc[lmc].mbase = NULL;
578 d->imc[lmc].hbm_mc = false;
579 pci_dev_put(d->imc[lmc].mdev);
580 d->imc[lmc].mdev = NULL;
582 i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
593 static struct res_config i10nm_cfg0 = {
596 .busno_cfg_offset = 0xcc,
597 .ddr_chan_mmio_sz = 0x4000,
598 .sad_all_devfn = PCI_DEVFN(29, 0),
599 .sad_all_offset = 0x108,
600 .offsets_scrub = offsets_scrub_icx,
601 .offsets_demand = offsets_demand_icx,
604 static struct res_config i10nm_cfg1 = {
607 .busno_cfg_offset = 0xd0,
608 .ddr_chan_mmio_sz = 0x4000,
609 .sad_all_devfn = PCI_DEVFN(29, 0),
610 .sad_all_offset = 0x108,
611 .offsets_scrub = offsets_scrub_icx,
612 .offsets_demand = offsets_demand_icx,
615 static struct res_config spr_cfg = {
618 .busno_cfg_offset = 0xd0,
619 .ddr_chan_mmio_sz = 0x8000,
620 .hbm_chan_mmio_sz = 0x4000,
621 .support_ddr5 = true,
622 .sad_all_devfn = PCI_DEVFN(10, 0),
623 .sad_all_offset = 0x300,
624 .offsets_scrub = offsets_scrub_spr,
625 .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
626 .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
627 .offsets_demand = offsets_demand_spr,
628 .offsets_demand2 = offsets_demand2_spr,
629 .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
630 .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
633 static const struct x86_cpu_id i10nm_cpuids[] = {
634 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
635 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
636 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
637 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
638 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
639 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
642 MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
644 static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
648 mcmtr = I10NM_GET_MCMTR(imc, chan);
649 edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
651 return !!GET_BITFIELD(mcmtr, 2, 2);
654 static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
655 struct res_config *cfg)
657 struct skx_pvt *pvt = mci->pvt_info;
658 struct skx_imc *imc = pvt->imc;
659 u32 mtr, amap, mcddrtcfg;
660 struct dimm_info *dimm;
663 for (i = 0; i < imc->num_channels; i++) {
668 amap = I10NM_GET_AMAP(imc, i);
669 mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
670 for (j = 0; j < imc->num_dimms; j++) {
671 dimm = edac_get_dimm(mci, i, j, 0);
672 mtr = I10NM_GET_DIMMMTR(imc, i, j);
673 edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
674 mtr, mcddrtcfg, imc->mc, i, j);
676 if (IS_DIMM_PRESENT(mtr))
677 ndimms += skx_get_dimm_info(mtr, 0, amap, dimm,
679 else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
680 ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
683 if (ndimms && !i10nm_check_ecc(imc, i)) {
684 i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
693 static struct notifier_block i10nm_mce_dec = {
694 .notifier_call = skx_mce_check_error,
695 .priority = MCE_PRIO_EDAC,
698 #ifdef CONFIG_EDAC_DEBUG
701 * Exercise the address decode logic by writing an address to
702 * /sys/kernel/debug/edac/i10nm_test/addr.
704 static struct dentry *i10nm_test;
706 static int debugfs_u64_set(void *data, u64 val)
710 pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
712 memset(&m, 0, sizeof(m));
713 /* ADDRV + MemRd + Unknown channel */
714 m.status = MCI_STATUS_ADDRV + 0x90;
715 /* One corrected error */
716 m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
718 skx_mce_check_error(NULL, 0, &m);
722 DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
724 static void setup_i10nm_debug(void)
726 i10nm_test = edac_debugfs_create_dir("i10nm_test");
730 if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
731 NULL, &fops_u64_wo)) {
732 debugfs_remove(i10nm_test);
737 static void teardown_i10nm_debug(void)
739 debugfs_remove_recursive(i10nm_test);
742 static inline void setup_i10nm_debug(void) {}
743 static inline void teardown_i10nm_debug(void) {}
744 #endif /*CONFIG_EDAC_DEBUG*/
746 static int __init i10nm_init(void)
748 u8 mc = 0, src_id = 0, node_id = 0;
749 const struct x86_cpu_id *id;
750 struct res_config *cfg;
753 int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
758 owner = edac_get_owner();
759 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
762 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
765 id = x86_match_cpu(i10nm_cpuids);
769 cfg = (struct res_config *)id->driver_data;
772 rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
776 rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
780 i10nm_printk(KERN_ERR, "No memory controllers found\n");
784 mem_cfg_2lm = i10nm_check_2lm(cfg);
785 skx_set_mem_cfg(mem_cfg_2lm);
787 rc = i10nm_get_ddr_munits();
789 if (i10nm_get_hbm_munits() && rc)
792 list_for_each_entry(d, i10nm_edac_list, list) {
793 rc = skx_get_src_id(d, 0xf8, &src_id);
797 rc = skx_get_node_id(d, &node_id);
801 edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
802 for (i = 0; i < I10NM_NUM_IMC; i++) {
808 d->imc[i].src_id = src_id;
809 d->imc[i].node_id = node_id;
810 if (d->imc[i].hbm_mc) {
811 d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
812 d->imc[i].num_channels = I10NM_NUM_HBM_CHANNELS;
813 d->imc[i].num_dimms = I10NM_NUM_HBM_DIMMS;
815 d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
816 d->imc[i].num_channels = I10NM_NUM_DDR_CHANNELS;
817 d->imc[i].num_dimms = I10NM_NUM_DDR_DIMMS;
820 rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
821 "Intel_10nm Socket", EDAC_MOD_STR,
822 i10nm_get_dimm_config, cfg);
833 mce_register_decode_chain(&i10nm_mce_dec);
836 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
837 skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
838 if (retry_rd_err_log == 2)
839 enable_retry_rd_err_log(true);
841 skx_set_decode(i10nm_mc_decode, NULL);
844 i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
852 static void __exit i10nm_exit(void)
856 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
857 skx_set_decode(NULL, NULL);
858 if (retry_rd_err_log == 2)
859 enable_retry_rd_err_log(false);
862 teardown_i10nm_debug();
863 mce_unregister_decode_chain(&i10nm_mce_dec);
868 module_init(i10nm_init);
869 module_exit(i10nm_exit);
871 static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
876 ret = kstrtoul(buf, 0, &val);
881 if (val && mem_cfg_2lm) {
882 i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
886 ret = param_set_int(buf, kp);
891 static const struct kernel_param_ops decoding_via_mca_param_ops = {
892 .set = set_decoding_via_mca,
893 .get = param_get_int,
896 module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
897 MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
899 module_param(retry_rd_err_log, int, 0444);
900 MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
902 MODULE_LICENSE("GPL v2");
903 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");