Lines Matching +full:is +full:- +full:decoded +full:- +full:cs

1 // SPDX-License-Identifier: GPL-2.0
11 #include <asm/intel-family.h>
24 pci_read_config_dword((d)->uracu, 0xd0, &(reg))
26 pci_read_config_dword((d)->uracu, \
27 (res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
29 pci_read_config_dword((d)->sad_all, (offset) + (i) * \
30 (res_cfg->type == GNR ? 12 : 8), &(reg))
32 pci_read_config_dword((d)->uracu, 0xd4, &(reg))
34 pci_read_config_dword((d)->pcu_cr3, \
35 res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
37 pci_read_config_dword((d)->pcu_cr3, \
38 res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
40 readl((m)->mbase + ((m)->hbm_mc ? 0x80c : \
41 (res_cfg->type == GNR ? 0xc0c : 0x2080c)) + \
42 (i) * (m)->chan_mmio_sz + (j) * 4)
44 readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
45 (i) * (m)->chan_mmio_sz)
47 readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \
48 (res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \
49 (i) * (m)->chan_mmio_sz)
51 readl((m)->mbase + ((m)->hbm_mc ? 0x814 : \
52 (res_cfg->type == GNR ? 0xc14 : 0x20814)) + \
53 (i) * (m)->chan_mmio_sz)
55 readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
57 readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
59 writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
63 #define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
115 imc->chan[chan].retry_rd_err_log_s = s; in __enable_retry_rd_err_log()
116 imc->chan[chan].retry_rd_err_log_d = d; in __enable_retry_rd_err_log()
118 imc->chan[chan].retry_rd_err_log_d2 = d2; in __enable_retry_rd_err_log()
132 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC) in __enable_retry_rd_err_log()
134 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER) in __enable_retry_rd_err_log()
136 if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN)) in __enable_retry_rd_err_log()
138 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC) in __enable_retry_rd_err_log()
140 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER) in __enable_retry_rd_err_log()
142 if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN)) in __enable_retry_rd_err_log()
146 if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC) in __enable_retry_rd_err_log()
148 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER)) in __enable_retry_rd_err_log()
150 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN)) in __enable_retry_rd_err_log()
170 imc_num = res_cfg->ddr_imc_num; in enable_retry_rd_err_log()
171 chan_num = res_cfg->ddr_chan_num; in enable_retry_rd_err_log()
174 imc = &d->imc[i]; in enable_retry_rd_err_log()
175 if (!imc->mbase) in enable_retry_rd_err_log()
180 res_cfg->offsets_scrub, in enable_retry_rd_err_log()
181 res_cfg->offsets_demand, in enable_retry_rd_err_log()
182 res_cfg->offsets_demand2); in enable_retry_rd_err_log()
185 imc_num += res_cfg->hbm_imc_num; in enable_retry_rd_err_log()
186 chan_num = res_cfg->hbm_chan_num; in enable_retry_rd_err_log()
189 imc = &d->imc[i]; in enable_retry_rd_err_log()
190 if (!imc->mbase || !imc->hbm_mc) in enable_retry_rd_err_log()
195 res_cfg->offsets_scrub_hbm0, in enable_retry_rd_err_log()
196 res_cfg->offsets_demand_hbm0, in enable_retry_rd_err_log()
199 res_cfg->offsets_scrub_hbm1, in enable_retry_rd_err_log()
200 res_cfg->offsets_demand_hbm1, in enable_retry_rd_err_log()
210 struct skx_imc *imc = &res->dev->imc[res->imc]; in show_retry_rd_err_log()
220 if (!imc->mbase) in show_retry_rd_err_log()
223 if (imc->hbm_mc) { in show_retry_rd_err_log()
224 pch = res->cs & 1; in show_retry_rd_err_log()
227 offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 : in show_retry_rd_err_log()
228 res_cfg->offsets_demand_hbm1; in show_retry_rd_err_log()
230 offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 : in show_retry_rd_err_log()
231 res_cfg->offsets_demand_hbm0; in show_retry_rd_err_log()
234 offsets = res_cfg->offsets_scrub; in show_retry_rd_err_log()
236 offsets = res_cfg->offsets_demand; in show_retry_rd_err_log()
237 xffsets = res_cfg->offsets_demand2; in show_retry_rd_err_log()
241 log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]); in show_retry_rd_err_log()
242 log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]); in show_retry_rd_err_log()
243 log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]); in show_retry_rd_err_log()
244 log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]); in show_retry_rd_err_log()
245 log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]); in show_retry_rd_err_log()
248 lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]); in show_retry_rd_err_log()
249 lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]); in show_retry_rd_err_log()
250 lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]); in show_retry_rd_err_log()
251 lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]); in show_retry_rd_err_log()
252 lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]); in show_retry_rd_err_log()
255 if (res_cfg->type == SPR) { in show_retry_rd_err_log()
256 log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]); in show_retry_rd_err_log()
260 if (len - n > 0) { in show_retry_rd_err_log()
262 lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]); in show_retry_rd_err_log()
263 n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]", in show_retry_rd_err_log()
266 n += snprintf(msg + n, len - n, "]"); in show_retry_rd_err_log()
270 log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]); in show_retry_rd_err_log()
275 if (imc->hbm_mc) { in show_retry_rd_err_log()
277 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18); in show_retry_rd_err_log()
278 corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c); in show_retry_rd_err_log()
279 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20); in show_retry_rd_err_log()
280 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24); in show_retry_rd_err_log()
282 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818); in show_retry_rd_err_log()
283 corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c); in show_retry_rd_err_log()
284 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820); in show_retry_rd_err_log()
285 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824); in show_retry_rd_err_log()
288 corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18); in show_retry_rd_err_log()
289 corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c); in show_retry_rd_err_log()
290 corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20); in show_retry_rd_err_log()
291 corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24); in show_retry_rd_err_log()
294 if (len - n > 0) in show_retry_rd_err_log()
295 snprintf(msg + n, len - n, in show_retry_rd_err_log()
306 I10NM_SET_REG32(imc, res->channel, offsets[0], log0); in show_retry_rd_err_log()
311 I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0); in show_retry_rd_err_log()
339 * i10nm_get_imc_num() - Get the number of present DDR memory controllers.
344 * at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
345 * For other CPUs, the number of present DDR memory controllers is statically
346 * configured in @cfg->ddr_imc_num.
357 d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus], in i10nm_get_imc_num()
358 res_cfg->pcu_cr3_bdf.dev, in i10nm_get_imc_num()
359 res_cfg->pcu_cr3_bdf.fun); in i10nm_get_imc_num()
360 if (!d->pcu_cr3) in i10nm_get_imc_num()
376 switch (cfg->type) { in i10nm_get_imc_num()
385 return -ENODEV; in i10nm_get_imc_num()
390 return -EINVAL; in i10nm_get_imc_num()
393 if (cfg->ddr_imc_num != imc_num) { in i10nm_get_imc_num()
397 cfg->ddr_imc_num = imc_num; in i10nm_get_imc_num()
405 * is statically pre-configured in cfg->ddr_imc_num. in i10nm_get_imc_num()
418 d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus], in i10nm_check_2lm()
419 res_cfg->sad_all_bdf.dev, in i10nm_check_2lm()
420 res_cfg->sad_all_bdf.fun); in i10nm_check_2lm()
421 if (!d->sad_all) in i10nm_check_2lm()
425 I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg); in i10nm_check_2lm()
427 edac_dbg(2, "2-level memory configuration.\n"); in i10nm_check_2lm()
442 switch (res_cfg->type) { in i10nm_mscod_is_ddrt()
483 if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV)) in i10nm_mc_decode_available()
487 bank = mce->bank; in i10nm_mc_decode_available()
489 switch (res_cfg->type) { in i10nm_mc_decode_available()
491 /* Check whether the bank is one of {13,14,17,18,21,22,25,26} */ in i10nm_mc_decode_available()
503 /* DDRT errors can't be decoded from MCA bank registers */ in i10nm_mc_decode_available()
504 if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT) in i10nm_mc_decode_available()
507 if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status))) in i10nm_mc_decode_available()
515 struct mce *m = res->mce; in i10nm_mc_decode()
523 if (d->imc[0].src_id == m->socketid) { in i10nm_mc_decode()
524 res->socket = m->socketid; in i10nm_mc_decode()
525 res->dev = d; in i10nm_mc_decode()
530 switch (res_cfg->type) { in i10nm_mc_decode()
532 bank = m->bank - 13; in i10nm_mc_decode()
533 res->imc = bank / 4; in i10nm_mc_decode()
534 res->channel = bank % 2; in i10nm_mc_decode()
535 res->column = GET_BITFIELD(m->misc, 9, 18) << 2; in i10nm_mc_decode()
536 res->row = GET_BITFIELD(m->misc, 19, 39); in i10nm_mc_decode()
537 res->bank_group = GET_BITFIELD(m->misc, 40, 41); in i10nm_mc_decode()
538 res->bank_address = GET_BITFIELD(m->misc, 42, 43); in i10nm_mc_decode()
539 res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2; in i10nm_mc_decode()
540 res->rank = GET_BITFIELD(m->misc, 56, 58); in i10nm_mc_decode()
541 res->dimm = res->rank >> 2; in i10nm_mc_decode()
542 res->rank = res->rank % 4; in i10nm_mc_decode()
545 bank = m->bank - 13; in i10nm_mc_decode()
546 res->imc = bank / 2; in i10nm_mc_decode()
547 res->channel = bank % 2; in i10nm_mc_decode()
548 res->column = GET_BITFIELD(m->misc, 9, 18) << 2; in i10nm_mc_decode()
549 res->row = GET_BITFIELD(m->misc, 19, 36); in i10nm_mc_decode()
550 res->bank_group = GET_BITFIELD(m->misc, 37, 38); in i10nm_mc_decode()
551 res->bank_address = GET_BITFIELD(m->misc, 39, 40); in i10nm_mc_decode()
552 res->bank_group |= GET_BITFIELD(m->misc, 41, 41) << 2; in i10nm_mc_decode()
553 res->rank = GET_BITFIELD(m->misc, 57, 57); in i10nm_mc_decode()
554 res->dimm = GET_BITFIELD(m->misc, 58, 58); in i10nm_mc_decode()
560 if (!res->dev) { in i10nm_mc_decode()
562 m->socketid, res->imc); in i10nm_mc_decode()
570 * get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
573 * @logical_idx : The logical index of the present memory controller (0 ~ max present MC# - 1).
576 * RETURNS : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
586 * Detect present memory controllers from { PCI device: 8-5, function 7-1 } in get_gnr_mdev()
589 mdev = pci_get_dev_wrapper(d->seg, in get_gnr_mdev()
590 d->bus[res_cfg->ddr_mdev_bdf.bus], in get_gnr_mdev()
591 res_cfg->ddr_mdev_bdf.dev + i / 7, in get_gnr_mdev()
592 res_cfg->ddr_mdev_bdf.fun + i % 7); in get_gnr_mdev()
609 * get_ddr_munit() - Get the resource of the i-th DDR memory controller.
613 * @offset : To store the MMIO offset of the i-th DDR memory controller.
614 * @size : To store the MMIO size of the i-th DDR memory controller.
616 * RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
624 switch (res_cfg->type) { in get_ddr_munit()
647 mdev = pci_get_dev_wrapper(d->seg, in get_ddr_munit()
648 d->bus[res_cfg->ddr_mdev_bdf.bus], in get_ddr_munit()
649 res_cfg->ddr_mdev_bdf.dev + i, in get_ddr_munit()
650 res_cfg->ddr_mdev_bdf.fun); in get_ddr_munit()
662 * i10nm_imc_absent() - Check whether the memory controller @imc is absent
666 * RETURNS : true if the memory controller EDAC device is absent, false otherwise.
673 switch (res_cfg->type) { in i10nm_imc_absent()
675 for (i = 0; i < res_cfg->ddr_chan_num; i++) { in i10nm_imc_absent()
708 d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus], in i10nm_get_ddr_munits()
709 res_cfg->util_all_bdf.dev, in i10nm_get_ddr_munits()
710 res_cfg->util_all_bdf.fun); in i10nm_get_ddr_munits()
711 if (!d->util_all) in i10nm_get_ddr_munits()
712 return -ENODEV; in i10nm_get_ddr_munits()
714 d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus], in i10nm_get_ddr_munits()
715 res_cfg->uracu_bdf.dev, in i10nm_get_ddr_munits()
716 res_cfg->uracu_bdf.fun); in i10nm_get_ddr_munits()
717 if (!d->uracu) in i10nm_get_ddr_munits()
718 return -ENODEV; in i10nm_get_ddr_munits()
722 return -ENODEV; in i10nm_get_ddr_munits()
729 for (lmc = 0, i = 0; i < res_cfg->ddr_imc_num; i++) { in i10nm_get_ddr_munits()
734 return -ENODEV; in i10nm_get_ddr_munits()
746 return -ENODEV; in i10nm_get_ddr_munits()
749 d->imc[lmc].mbase = mbase; in i10nm_get_ddr_munits()
750 if (i10nm_imc_absent(&d->imc[lmc])) { in i10nm_get_ddr_munits()
753 d->imc[lmc].mbase = NULL; in i10nm_get_ddr_munits()
757 d->imc[lmc].mdev = mdev; in i10nm_get_ddr_munits()
788 if (!d->pcu_cr3) in i10nm_get_hbm_munits()
789 return -ENODEV; in i10nm_get_hbm_munits()
793 return -ENODEV; in i10nm_get_hbm_munits()
798 return -ENODEV; in i10nm_get_hbm_munits()
804 return -ENODEV; in i10nm_get_hbm_munits()
808 lmc = res_cfg->ddr_imc_num; in i10nm_get_hbm_munits()
810 for (i = 0; i < res_cfg->hbm_imc_num; i++) { in i10nm_get_hbm_munits()
811 mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus], in i10nm_get_hbm_munits()
812 res_cfg->hbm_mdev_bdf.dev + i / 4, in i10nm_get_hbm_munits()
813 res_cfg->hbm_mdev_bdf.fun + i % 4); in i10nm_get_hbm_munits()
817 return -ENODEV; in i10nm_get_hbm_munits()
822 d->imc[lmc].mdev = mdev; in i10nm_get_hbm_munits()
830 pci_dev_put(d->imc[lmc].mdev); in i10nm_get_hbm_munits()
831 d->imc[lmc].mdev = NULL; in i10nm_get_hbm_munits()
835 return -ENOMEM; in i10nm_get_hbm_munits()
838 d->imc[lmc].mbase = mbase; in i10nm_get_hbm_munits()
839 d->imc[lmc].hbm_mc = true; in i10nm_get_hbm_munits()
841 mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0); in i10nm_get_hbm_munits()
843 iounmap(d->imc[lmc].mbase); in i10nm_get_hbm_munits()
844 d->imc[lmc].mbase = NULL; in i10nm_get_hbm_munits()
845 d->imc[lmc].hbm_mc = false; in i10nm_get_hbm_munits()
846 pci_dev_put(d->imc[lmc].mdev); in i10nm_get_hbm_munits()
847 d->imc[lmc].mdev = NULL; in i10nm_get_hbm_munits()
850 return -ENODEV; in i10nm_get_hbm_munits()
971 struct skx_pvt *pvt = mci->pvt_info; in i10nm_get_dimm_config()
972 struct skx_imc *imc = pvt->imc; in i10nm_get_dimm_config()
977 for (i = 0; i < imc->num_channels; i++) { in i10nm_get_dimm_config()
978 if (!imc->mbase) in i10nm_get_dimm_config()
984 if (res_cfg->type != GNR) in i10nm_get_dimm_config()
987 for (j = 0; j < imc->num_dimms; j++) { in i10nm_get_dimm_config()
991 mtr, mcddrtcfg, imc->mc, i, j); in i10nm_get_dimm_config()
1001 i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n", in i10nm_get_dimm_config()
1002 imc->mc, i); in i10nm_get_dimm_config()
1003 return -ENODEV; in i10nm_get_dimm_config()
1077 return -EBUSY; in i10nm_init()
1081 return -EBUSY; in i10nm_init()
1084 return -ENODEV; in i10nm_init()
1088 return -ENODEV; in i10nm_init()
1090 cfg = (struct res_config *)id->driver_data; in i10nm_init()
1102 return -ENODEV; in i10nm_init()
1117 imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num; in i10nm_init()
1130 if (!d->imc[i].mdev) in i10nm_init()
1133 d->imc[i].mc = mc++; in i10nm_init()
1134 d->imc[i].lmc = i; in i10nm_init()
1135 d->imc[i].src_id = src_id; in i10nm_init()
1136 d->imc[i].node_id = node_id; in i10nm_init()
1137 if (d->imc[i].hbm_mc) { in i10nm_init()
1138 d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz; in i10nm_init()
1139 d->imc[i].num_channels = cfg->hbm_chan_num; in i10nm_init()
1140 d->imc[i].num_dimms = cfg->hbm_dimm_num; in i10nm_init()
1142 d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz; in i10nm_init()
1143 d->imc[i].num_channels = cfg->ddr_chan_num; in i10nm_init()
1144 d->imc[i].num_dimms = cfg->ddr_dimm_num; in i10nm_init()
1147 rc = skx_register_mci(&d->imc[i], d->imc[i].mdev, in i10nm_init()
1163 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) { in i10nm_init()
1183 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) { in i10nm_exit()
1206 return -EINVAL; in set_decoding_via_mca()
1210 return -EIO; in set_decoding_via_mca()