Lines Matching +full:scrubber +full:- +full:done
1 // SPDX-License-Identifier: GPL-2.0-only
9 * cleared to prevent re-enabling the hardware by this driver.
18 if (!pvt->flags.zn_regs_v2) in get_umc_reg()
31 /* Per-node stuff */
38 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
39 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
81 func, PCI_FUNC(pdev->devfn), offset); in __amd64_read_pci_cfg_dword()
94 func, PCI_FUNC(pdev->devfn), offset); in __amd64_write_pci_cfg_dword()
106 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); in f15h_select_dct()
107 reg &= (pvt->model == 0x30) ? ~3 : ~1; in f15h_select_dct()
109 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); in f15h_select_dct()
119 * DCT0 -> F2x040..
120 * DCT1 -> F2x140..
129 switch (pvt->fam) { in amd64_read_dct_pci_cfg()
132 return -EINVAL; in amd64_read_dct_pci_cfg()
154 dct = (dct && pvt->model == 0x30) ? 3 : dct; in amd64_read_dct_pci_cfg()
160 return -EINVAL; in amd64_read_dct_pci_cfg()
166 return amd64_read_pci_cfg(pvt->F2, offset, val); in amd64_read_dct_pci_cfg()
170 * Memory scrubber control interface. For K8, memory scrubbing is handled by
179 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
201 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { in __set_scrub_rate()
215 if (pvt->fam == 0x15 && pvt->model == 0x60) { in __set_scrub_rate()
217 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F); in __set_scrub_rate()
219 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F); in __set_scrub_rate()
221 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F); in __set_scrub_rate()
232 struct amd64_pvt *pvt = mci->pvt_info; in set_scrub_rate()
235 if (pvt->fam == 0xf) in set_scrub_rate()
238 if (pvt->fam == 0x15) { in set_scrub_rate()
240 if (pvt->model < 0x10) in set_scrub_rate()
243 if (pvt->model == 0x60) in set_scrub_rate()
251 struct amd64_pvt *pvt = mci->pvt_info; in get_scrub_rate()
252 int i, retval = -EINVAL; in get_scrub_rate()
255 if (pvt->fam == 0x15) { in get_scrub_rate()
257 if (pvt->model < 0x10) in get_scrub_rate()
260 if (pvt->model == 0x60) in get_scrub_rate()
261 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); in get_scrub_rate()
263 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); in get_scrub_rate()
265 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); in get_scrub_rate()
287 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be in base_limit_match()
289 * Here we discard bits 63-40. See section 3.4.2 of AMD publication in base_limit_match()
290 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 in base_limit_match()
316 pvt = mci->pvt_info; in find_mc_by_sys_addr()
378 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { in get_cs_base_and_mask()
379 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask()
380 csmask = pvt->csels[dct].csmasks[csrow]; in get_cs_base_and_mask()
389 } else if (pvt->fam == 0x16 || in get_cs_base_and_mask()
390 (pvt->fam == 0x15 && pvt->model >= 0x30)) { in get_cs_base_and_mask()
391 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask()
392 csmask = pvt->csels[dct].csmasks[csrow >> 1]; in get_cs_base_and_mask()
407 csbase = pvt->csels[dct].csbases[csrow]; in get_cs_base_and_mask()
408 csmask = pvt->csels[dct].csmasks[csrow >> 1]; in get_cs_base_and_mask()
411 if (pvt->fam == 0x15) in get_cs_base_and_mask()
429 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
432 pvt->csels[dct].csbases[i]
435 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
438 for (i = 0; i < pvt->max_mcs; i++)
442 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
450 pvt = mci->pvt_info; in input_addr_to_csrow()
463 pvt->mc_node_id); in input_addr_to_csrow()
469 (unsigned long)input_addr, pvt->mc_node_id); in input_addr_to_csrow()
471 return -1; in input_addr_to_csrow()
480 * - The revision of the node is not E or greater. In this case, the DRAM Hole
483 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
487 * complete 32-bit values despite the fact that the bitfields in the DHAR
488 * only represent bits 31-24 of the base and offset values.
493 struct amd64_pvt *pvt = mci->pvt_info; in get_dram_hole_info()
496 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) { in get_dram_hole_info()
498 pvt->ext_model, pvt->mc_node_id); in get_dram_hole_info()
503 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) { in get_dram_hole_info()
510 pvt->mc_node_id); in get_dram_hole_info()
516 /* +------------------+--------------------+--------------------+----- in get_dram_hole_info()
518 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | in get_dram_hole_info()
522 * | | | (0xffffffff-x))] | in get_dram_hole_info()
523 * +------------------+--------------------+--------------------+----- in get_dram_hole_info()
533 *hole_size = (1ULL << 32) - *hole_base; in get_dram_hole_info()
535 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt) in get_dram_hole_info()
539 pvt->mc_node_id, (unsigned long)*hole_base, in get_dram_hole_info()
551 struct amd64_pvt *pvt = mci->pvt_info; \
553 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
602 struct amd64_pvt *pvt = mci->pvt_info; in inject_section_show()
603 return sprintf(buf, "0x%x\n", pvt->injection.section); in inject_section_show()
607 * store error injection section value which refers to one of 4 16-byte sections
608 * within a 64-byte cacheline
617 struct amd64_pvt *pvt = mci->pvt_info; in inject_section_store()
627 return -EINVAL; in inject_section_store()
630 pvt->injection.section = (u32) value; in inject_section_store()
638 struct amd64_pvt *pvt = mci->pvt_info; in inject_word_show()
639 return sprintf(buf, "0x%x\n", pvt->injection.word); in inject_word_show()
643 * store error injection word value which refers to one of 9 16-bit word of the
644 * 16-byte (128-bit + ECC bits) section
653 struct amd64_pvt *pvt = mci->pvt_info; in inject_word_store()
663 return -EINVAL; in inject_word_store()
666 pvt->injection.word = (u32) value; in inject_word_store()
675 struct amd64_pvt *pvt = mci->pvt_info; in inject_ecc_vector_show()
676 return sprintf(buf, "0x%x\n", pvt->injection.bit_map); in inject_ecc_vector_show()
689 struct amd64_pvt *pvt = mci->pvt_info; in inject_ecc_vector_store()
699 return -EINVAL; in inject_ecc_vector_store()
702 pvt->injection.bit_map = (u32) value; in inject_ecc_vector_store()
715 struct amd64_pvt *pvt = mci->pvt_info; in inject_read_store()
724 /* Form value to choose 16-byte section of cacheline */ in inject_read_store()
725 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); in inject_read_store()
727 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); in inject_read_store()
729 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection); in inject_read_store()
732 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); in inject_read_store()
748 struct amd64_pvt *pvt = mci->pvt_info; in inject_write_store()
757 /* Form value to choose 16-byte section of cacheline */ in inject_write_store()
758 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section); in inject_write_store()
760 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); in inject_write_store()
762 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection); in inject_write_store()
771 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); in inject_write_store()
775 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp); in inject_write_store()
811 struct amd64_pvt *pvt = mci->pvt_info; in inj_is_visible()
814 if (pvt->fam >= 0x10 && pvt->fam <= 0x16) in inj_is_visible()
815 return attr->mode; in inj_is_visible()
857 struct amd64_pvt *pvt = mci->pvt_info; in sys_addr_to_dram_addr()
861 dram_base = get_dram_base(pvt, pvt->mc_node_id); in sys_addr_to_dram_addr()
868 dram_addr = sys_addr - hole_offset; in sys_addr_to_dram_addr()
880 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 in sys_addr_to_dram_addr()
881 * only deals with 40-bit values. Therefore we discard bits 63-40 of in sys_addr_to_dram_addr()
884 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture in sys_addr_to_dram_addr()
887 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base; in sys_addr_to_dram_addr()
916 pvt = mci->pvt_info; in dram_addr_to_input_addr()
919 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) in dram_addr_to_input_addr()
954 err->page = (u32) (error_address >> PAGE_SHIFT); in error_address_to_page_and_offset()
955 err->offset = ((u32) error_address) & ~PAGE_MASK; in error_address_to_page_and_offset()
963 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
972 if (csrow == -1) in sys_addr_to_csrow()
1006 * Mapping of nodes from hardware-provided AMD Node ID to a in gpu_get_node_map()
1010 if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3) in gpu_get_node_map()
1014 * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID in gpu_get_node_map()
1022 ret = -ENODEV; in gpu_get_node_map()
1041 u8 nid = (m->ipid >> 44) & 0xF; in fixup_node_id()
1043 if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2) in fixup_node_id()
1050 /* Convert the hardware-provided AMD Node ID to a Linux logical one. */ in fixup_node_id()
1051 return nid - gpu_node_map.base_node_id + 1; in fixup_node_id()
1074 int err = -ENODEV; in __df_indirect_read()
1079 F4 = node_to_amd_nb(node)->link; in __df_indirect_read()
1155 ctx.ret_addr -= hi_addr_offset; in umc_normaddr_to_sysaddr()
1193 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */ in umc_normaddr_to_sysaddr()
1249 cs_mask = (1 << die_id_bit) - 1; in umc_normaddr_to_sysaddr()
1278 * The pre-interleaved address consists of XXXXXXIIIYYYYY in umc_normaddr_to_sysaddr()
1280 * address bits from the post-interleaved address. in umc_normaddr_to_sysaddr()
1285 temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0); in umc_normaddr_to_sysaddr()
1301 ctx.ret_addr += (BIT_ULL(32) - dram_hole_base); in umc_normaddr_to_sysaddr()
1305 /* Save some parentheses and grab ls-bit at the end. */ in umc_normaddr_to_sysaddr()
1326 return -EINVAL; in umc_normaddr_to_sysaddr()
1340 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F) in dct_determine_edac_cap()
1344 if (pvt->dclr0 & BIT(bit)) in dct_determine_edac_cap()
1356 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT)) in umc_determine_edac_cap()
1362 if (pvt->umc[i].umc_cfg & BIT(12)) in umc_determine_edac_cap()
1378 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; in dct_debug_display_dimm_sizes()
1379 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; in dct_debug_display_dimm_sizes()
1382 if (pvt->fam == 0xf) { in dct_debug_display_dimm_sizes()
1384 if (pvt->ext_model < K8_REV_F) in dct_debug_display_dimm_sizes()
1390 if (pvt->fam == 0x10) { in dct_debug_display_dimm_sizes()
1391 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 in dct_debug_display_dimm_sizes()
1392 : pvt->dbam0; in dct_debug_display_dimm_sizes()
1394 pvt->csels[1].csbases : in dct_debug_display_dimm_sizes()
1395 pvt->csels[0].csbases; in dct_debug_display_dimm_sizes()
1397 dbam = pvt->dbam0; in dct_debug_display_dimm_sizes()
1398 dcsb = pvt->csels[1].csbases; in dct_debug_display_dimm_sizes()
1415 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, in dct_debug_display_dimm_sizes()
1421 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, in dct_debug_display_dimm_sizes()
1436 if (pvt->dram_type == MEM_LRDDR3) { in debug_dump_dramcfg_low()
1437 u32 dcsm = pvt->csels[chan].csmasks[0]; in debug_dump_dramcfg_low()
1453 if (pvt->fam == 0x10) in debug_dump_dramcfg_low()
1484 /* Asymmetric dual-rank DIMM support. */ in umc_get_cs_mode()
1497 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) { in umc_get_cs_mode()
1523 msb = fls(addr_mask_orig) - 1; in __addr_mask_to_cs_size()
1525 num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE); in __addr_mask_to_cs_size()
1528 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1); in __addr_mask_to_cs_size()
1564 * CS0 and CS1 -> MASK0 / DIMM0 in umc_addr_mask_to_cs_size()
1565 * CS2 and CS3 -> MASK1 / DIMM1 in umc_addr_mask_to_cs_size()
1570 * CS0 -> MASK0 -> DIMM0 in umc_addr_mask_to_cs_size()
1571 * CS1 -> MASK1 -> DIMM0 in umc_addr_mask_to_cs_size()
1572 * CS2 -> MASK2 -> DIMM1 in umc_addr_mask_to_cs_size()
1573 * CS3 -> MASK3 -> DIMM1 in umc_addr_mask_to_cs_size()
1580 if (!pvt->flags.zn_regs_v2) in umc_addr_mask_to_cs_size()
1583 /* Asymmetric dual-rank DIMM support. */ in umc_addr_mask_to_cs_size()
1585 addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; in umc_addr_mask_to_cs_size()
1587 addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; in umc_addr_mask_to_cs_size()
1620 umc = &pvt->umc[i]; in umc_dump_misc_regs()
1622 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); in umc_dump_misc_regs()
1623 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); in umc_dump_misc_regs()
1624 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); in umc_dump_misc_regs()
1625 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); in umc_dump_misc_regs()
1627 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp); in umc_dump_misc_regs()
1630 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp); in umc_dump_misc_regs()
1632 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi); in umc_dump_misc_regs()
1635 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no", in umc_dump_misc_regs()
1636 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no"); in umc_dump_misc_regs()
1638 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no"); in umc_dump_misc_regs()
1640 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no"); in umc_dump_misc_regs()
1642 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); in umc_dump_misc_regs()
1644 if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) { in umc_dump_misc_regs()
1645 amd_smn_read(pvt->mc_node_id, in umc_dump_misc_regs()
1658 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); in dct_dump_misc_regs()
1661 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); in dct_dump_misc_regs()
1664 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", in dct_dump_misc_regs()
1665 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); in dct_dump_misc_regs()
1667 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0); in dct_dump_misc_regs()
1669 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); in dct_dump_misc_regs()
1672 pvt->dhar, dhar_base(pvt), in dct_dump_misc_regs()
1673 (pvt->fam == 0xf) ? k8_dhar_offset(pvt) in dct_dump_misc_regs()
1679 if (pvt->fam == 0xf) in dct_dump_misc_regs()
1686 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1); in dct_dump_misc_regs()
1690 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz); in dct_dump_misc_regs()
1698 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) { in dct_prep_chip_selects()
1699 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; in dct_prep_chip_selects()
1700 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; in dct_prep_chip_selects()
1701 } else if (pvt->fam == 0x15 && pvt->model == 0x30) { in dct_prep_chip_selects()
1702 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4; in dct_prep_chip_selects()
1703 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2; in dct_prep_chip_selects()
1705 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; in dct_prep_chip_selects()
1706 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; in dct_prep_chip_selects()
1715 pvt->csels[umc].b_cnt = 4; in umc_prep_chip_selects()
1716 pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2; in umc_prep_chip_selects()
1735 base = &pvt->csels[umc].csbases[cs]; in umc_read_base_mask()
1736 base_sec = &pvt->csels[umc].csbases_sec[cs]; in umc_read_base_mask()
1741 if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) in umc_read_base_mask()
1745 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec)) in umc_read_base_mask()
1754 mask = &pvt->csels[umc].csmasks[cs]; in umc_read_base_mask()
1755 mask_sec = &pvt->csels[umc].csmasks_sec[cs]; in umc_read_base_mask()
1760 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) in umc_read_base_mask()
1764 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec)) in umc_read_base_mask()
1781 u32 *base0 = &pvt->csels[0].csbases[cs]; in dct_read_base_mask()
1782 u32 *base1 = &pvt->csels[1].csbases[cs]; in dct_read_base_mask()
1788 if (pvt->fam == 0xf) in dct_read_base_mask()
1793 cs, *base1, (pvt->fam == 0x10) ? reg1 in dct_read_base_mask()
1800 u32 *mask0 = &pvt->csels[0].csmasks[cs]; in dct_read_base_mask()
1801 u32 *mask1 = &pvt->csels[1].csmasks[cs]; in dct_read_base_mask()
1807 if (pvt->fam == 0xf) in dct_read_base_mask()
1812 cs, *mask1, (pvt->fam == 0x10) ? reg1 in dct_read_base_mask()
1823 umc = &pvt->umc[i]; in umc_determine_memory_type()
1825 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) { in umc_determine_memory_type()
1826 umc->dram_type = MEM_EMPTY; in umc_determine_memory_type()
1834 if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { in umc_determine_memory_type()
1835 if (umc->dimm_cfg & BIT(5)) in umc_determine_memory_type()
1836 umc->dram_type = MEM_LRDDR5; in umc_determine_memory_type()
1837 else if (umc->dimm_cfg & BIT(4)) in umc_determine_memory_type()
1838 umc->dram_type = MEM_RDDR5; in umc_determine_memory_type()
1840 umc->dram_type = MEM_DDR5; in umc_determine_memory_type()
1842 if (umc->dimm_cfg & BIT(5)) in umc_determine_memory_type()
1843 umc->dram_type = MEM_LRDDR4; in umc_determine_memory_type()
1844 else if (umc->dimm_cfg & BIT(4)) in umc_determine_memory_type()
1845 umc->dram_type = MEM_RDDR4; in umc_determine_memory_type()
1847 umc->dram_type = MEM_DDR4; in umc_determine_memory_type()
1850 edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]); in umc_determine_memory_type()
1858 switch (pvt->fam) { in dct_determine_memory_type()
1860 if (pvt->ext_model >= K8_REV_F) in dct_determine_memory_type()
1863 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; in dct_determine_memory_type()
1867 if (pvt->dchr0 & DDR3_MODE) in dct_determine_memory_type()
1870 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; in dct_determine_memory_type()
1874 if (pvt->model < 0x60) in dct_determine_memory_type()
1887 dcsm = pvt->csels[0].csmasks[0]; in dct_determine_memory_type()
1890 pvt->dram_type = MEM_DDR4; in dct_determine_memory_type()
1891 else if (pvt->dclr0 & BIT(16)) in dct_determine_memory_type()
1892 pvt->dram_type = MEM_DDR3; in dct_determine_memory_type()
1894 pvt->dram_type = MEM_LRDDR3; in dct_determine_memory_type()
1896 pvt->dram_type = MEM_RDDR3; in dct_determine_memory_type()
1904 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); in dct_determine_memory_type()
1905 pvt->dram_type = MEM_EMPTY; in dct_determine_memory_type()
1908 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); in dct_determine_memory_type()
1912 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; in dct_determine_memory_type()
1918 u16 mce_nid = topology_die_id(m->extcpu); in get_error_address()
1928 pvt = mci->pvt_info; in get_error_address()
1930 if (pvt->fam == 0xf) { in get_error_address()
1935 addr = m->addr & GENMASK_ULL(end_bit, start_bit); in get_error_address()
1940 if (pvt->fam == 0x15) { in get_error_address()
1949 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp); in get_error_address()
1964 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp); in get_error_address()
1988 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) && in pci_get_related_function()
1989 (dev->bus->number == related->bus->number) && in pci_get_related_function()
1990 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) in pci_get_related_function()
2005 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); in read_dram_base_limit_regs()
2006 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); in read_dram_base_limit_regs()
2008 if (pvt->fam == 0xf) in read_dram_base_limit_regs()
2014 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); in read_dram_base_limit_regs()
2015 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); in read_dram_base_limit_regs()
2018 if (pvt->fam != 0x15) in read_dram_base_limit_regs()
2025 if (pvt->model == 0x60) in read_dram_base_limit_regs()
2027 else if (pvt->model == 0x30) in read_dram_base_limit_regs()
2032 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc); in read_dram_base_limit_regs()
2038 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0); in read_dram_base_limit_regs()
2041 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16; in read_dram_base_limit_regs()
2043 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0); in read_dram_base_limit_regs()
2046 pvt->ranges[range].lim.hi |= llim >> 13; in read_dram_base_limit_regs()
2054 struct amd64_pvt *pvt = mci->pvt_info; in k8_map_sysaddr_to_csrow()
2062 err->src_mci = find_mc_by_sys_addr(mci, sys_addr); in k8_map_sysaddr_to_csrow()
2063 if (!err->src_mci) { in k8_map_sysaddr_to_csrow()
2066 err->err_code = ERR_NODE; in k8_map_sysaddr_to_csrow()
2071 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr); in k8_map_sysaddr_to_csrow()
2072 if (err->csrow < 0) { in k8_map_sysaddr_to_csrow()
2073 err->err_code = ERR_CSROW; in k8_map_sysaddr_to_csrow()
2078 if (pvt->nbcfg & NBCFG_CHIPKILL) { in k8_map_sysaddr_to_csrow()
2079 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); in k8_map_sysaddr_to_csrow()
2080 if (err->channel < 0) { in k8_map_sysaddr_to_csrow()
2086 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - " in k8_map_sysaddr_to_csrow()
2088 err->syndrome); in k8_map_sysaddr_to_csrow()
2089 err->err_code = ERR_CHANNEL; in k8_map_sysaddr_to_csrow()
2094 * non-chipkill ecc mode in k8_map_sysaddr_to_csrow()
2097 * channel number when using non-chipkill memory. This method in k8_map_sysaddr_to_csrow()
2099 * (Wish the email was placed in this comment - norsk) in k8_map_sysaddr_to_csrow()
2101 err->channel = ((sys_addr & BIT(3)) != 0); in k8_map_sysaddr_to_csrow()
2122 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; in k8_dbam_to_chip_select()
2124 if (pvt->ext_model >= K8_REV_F) { in k8_dbam_to_chip_select()
2128 else if (pvt->ext_model >= K8_REV_D) { in k8_dbam_to_chip_select()
2158 return 32 << (cs_mode - diff); in k8_dbam_to_chip_select()
2172 cs_size = -1; in ddr3_cs_size()
2182 if (cs_size != -1) in ddr3_cs_size()
2194 cs_size = -1; in ddr3_lrdimm_cs_size()
2202 if (cs_size != -1) in ddr3_lrdimm_cs_size()
2213 cs_size = -1; in ddr4_cs_size()
2226 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; in f10_dbam_to_chip_select()
2230 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) in f10_dbam_to_chip_select()
2252 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr]; in f15_m60h_dbam_to_chip_select()
2256 if (pvt->dram_type == MEM_DDR4) { in f15_m60h_dbam_to_chip_select()
2258 return -1; in f15_m60h_dbam_to_chip_select()
2261 } else if (pvt->dram_type == MEM_LRDDR3) { in f15_m60h_dbam_to_chip_select()
2270 return -1; in f15_m60h_dbam_to_chip_select()
2288 return -1; in f16_dbam_to_chip_select()
2296 if (pvt->fam == 0xf) in read_dram_ctl_register()
2299 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) { in read_dram_ctl_register()
2301 pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); in read_dram_ctl_register()
2320 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi); in read_dram_ctl_register()
2361 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; in f1x_determine_channel()
2370 * see F2x110[DctSelIntLvAddr] - channel interleave mode in f1x_determine_channel()
2409 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16; in f1x_get_norm_dct_addr()
2446 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23)); in f1x_get_norm_dct_addr()
2475 * -EINVAL: NOT FOUND
2476 * 0..csrow = Chip-Select Row
2483 int cs_found = -EINVAL; in f1x_lookup_addr_in_dct()
2490 pvt = mci->pvt_info; in f1x_lookup_addr_in_dct()
2509 if (pvt->fam == 0x15 && pvt->model >= 0x30) { in f1x_lookup_addr_in_dct()
2523 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2531 if (pvt->fam == 0x10) { in f1x_swap_interleaved_region()
2533 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3)) in f1x_swap_interleaved_region()
2537 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg); in f1x_swap_interleaved_region()
2560 int cs_found = -EINVAL; in f1x_match_to_this_node()
2578 return -EINVAL; in f1x_match_to_this_node()
2582 return -EINVAL; in f1x_match_to_this_node()
2640 int cs_found = -EINVAL; in f15_m30h_match_to_this_node()
2652 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg); in f15_m30h_match_to_this_node()
2653 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg); in f15_m30h_match_to_this_node()
2663 return -EINVAL; in f15_m30h_match_to_this_node()
2670 return -EINVAL; in f15_m30h_match_to_this_node()
2680 return -EINVAL; in f15_m30h_match_to_this_node()
2686 return -EINVAL; in f15_m30h_match_to_this_node()
2688 if (pvt->model >= 0x60) in f15_m30h_match_to_this_node()
2696 return -EINVAL; in f15_m30h_match_to_this_node()
2706 chan_addr = sys_addr - chan_offset; in f15_m30h_match_to_this_node()
2717 return -EINVAL; in f15_m30h_match_to_this_node()
2727 return -EINVAL; in f15_m30h_match_to_this_node()
2731 amd64_read_pci_cfg(pvt->F1, in f15_m30h_match_to_this_node()
2746 * pvt->csels[1]. So we need to use '1' here to get correct info. in f15_m30h_match_to_this_node()
2763 int cs_found = -EINVAL; in f1x_translate_sysaddr_to_cs()
2770 if (pvt->fam == 0x15 && pvt->model >= 0x30) in f1x_translate_sysaddr_to_cs()
2796 struct amd64_pvt *pvt = mci->pvt_info; in f1x_map_sysaddr_to_csrow()
2800 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel); in f1x_map_sysaddr_to_csrow()
2801 if (err->csrow < 0) { in f1x_map_sysaddr_to_csrow()
2802 err->err_code = ERR_CSROW; in f1x_map_sysaddr_to_csrow()
2812 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome); in f1x_map_sysaddr_to_csrow()
2916 return -1; in decode_syndrome()
2939 return -1; in map_err_sym_to_channel()
2947 return -1; in map_err_sym_to_channel()
2952 struct amd64_pvt *pvt = mci->pvt_info; in get_channel_from_ecc_syndrome()
2953 int err_sym = -1; in get_channel_from_ecc_syndrome()
2955 if (pvt->ecc_sym_sz == 8) in get_channel_from_ecc_syndrome()
2958 pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
2959 else if (pvt->ecc_sym_sz == 4) in get_channel_from_ecc_syndrome()
2962 pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
2964 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
2968 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); in get_channel_from_ecc_syndrome()
2988 switch (err->err_code) { in __log_ecc_error()
2999 string = "Unknown syndrome - possible error reporting race"; in __log_ecc_error()
3002 string = "MCA_SYND not valid - unknown syndrome and csrow"; in __log_ecc_error()
3013 err->page, err->offset, err->syndrome, in __log_ecc_error()
3014 err->csrow, err->channel, -1, in __log_ecc_error()
3022 u8 ecc_type = (m->status >> 45) & 0x3; in decode_bus_error()
3023 u8 xec = XEC(m->status, 0x1f); in decode_bus_error()
3024 u16 ec = EC(m->status); in decode_bus_error()
3032 pvt = mci->pvt_info; in decode_bus_error()
3047 err.syndrome = extract_syndrome(m->status); in decode_bus_error()
3049 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err); in decode_bus_error()
3068 err->channel = (m->ipid & GENMASK(31, 0)) >> 20; in umc_get_err_info()
3069 err->csrow = m->synd & 0x7; in umc_get_err_info()
3074 u8 ecc_type = (m->status >> 45) & 0x3; in decode_umc_error()
3086 pvt = mci->pvt_info; in decode_umc_error()
3090 if (m->status & MCI_STATUS_DEFERRED) in decode_umc_error()
3093 if (!(m->status & MCI_STATUS_SYNDV)) { in decode_umc_error()
3099 u8 length = (m->synd >> 18) & 0x3f; in decode_umc_error()
3102 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0); in decode_umc_error()
3107 pvt->ops->get_err_info(m, &err); in decode_umc_error()
3109 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { in decode_umc_error()
3121 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
3128 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3); in reserve_mc_sibling_devs()
3129 if (!pvt->F1) { in reserve_mc_sibling_devs()
3131 return -ENODEV; in reserve_mc_sibling_devs()
3135 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3); in reserve_mc_sibling_devs()
3136 if (!pvt->F2) { in reserve_mc_sibling_devs()
3137 pci_dev_put(pvt->F1); in reserve_mc_sibling_devs()
3138 pvt->F1 = NULL; in reserve_mc_sibling_devs()
3141 return -ENODEV; in reserve_mc_sibling_devs()
3145 pci_ctl_dev = &pvt->F2->dev; in reserve_mc_sibling_devs()
3147 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); in reserve_mc_sibling_devs()
3148 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); in reserve_mc_sibling_devs()
3149 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); in reserve_mc_sibling_devs()
3156 pvt->ecc_sym_sz = 4; in determine_ecc_sym_sz()
3158 if (pvt->fam >= 0x10) { in determine_ecc_sym_sz()
3161 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); in determine_ecc_sym_sz()
3163 if (pvt->fam != 0x16) in determine_ecc_sym_sz()
3164 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1); in determine_ecc_sym_sz()
3167 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25)) in determine_ecc_sym_sz()
3168 pvt->ecc_sym_sz = 8; in determine_ecc_sym_sz()
3177 u8 nid = pvt->mc_node_id; in umc_read_mc_regs()
3185 umc = &pvt->umc[i]; in umc_read_mc_regs()
3187 amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); in umc_read_mc_regs()
3188 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); in umc_read_mc_regs()
3189 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); in umc_read_mc_regs()
3190 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); in umc_read_mc_regs()
3191 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi); in umc_read_mc_regs()
3206 * those are Read-As-Zero. in dct_read_mc_regs()
3208 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); in dct_read_mc_regs()
3209 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); in dct_read_mc_regs()
3214 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); in dct_read_mc_regs()
3215 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); in dct_read_mc_regs()
3220 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); in dct_read_mc_regs()
3241 (rw & 0x1) ? "R" : "-", in dct_read_mc_regs()
3242 (rw & 0x2) ? "W" : "-", in dct_read_mc_regs()
3247 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); in dct_read_mc_regs()
3248 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0); in dct_read_mc_regs()
3250 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); in dct_read_mc_regs()
3252 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0); in dct_read_mc_regs()
3253 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0); in dct_read_mc_regs()
3256 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1); in dct_read_mc_regs()
3257 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1); in dct_read_mc_regs()
3267 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3268 * k8 private pointer to -->
3276 * 0-3 CSROWs 0 and 1
3277 * 4-7 CSROWs 2 and 3
3278 * 8-11 CSROWs 4 and 5
3279 * 12-15 CSROWs 6 and 7
3282 * The meaning of the values depends on CPU revision and dual-channel state,
3299 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; in dct_get_csrow_nr_pages()
3305 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); in dct_get_csrow_nr_pages()
3306 nr_pages <<= 20 - PAGE_SHIFT; in dct_get_csrow_nr_pages()
3323 nr_pages <<= 20 - PAGE_SHIFT; in umc_get_csrow_nr_pages()
3334 struct amd64_pvt *pvt = mci->pvt_info; in umc_init_csrows()
3340 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) { in umc_init_csrows()
3343 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) { in umc_init_csrows()
3346 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) { in umc_init_csrows()
3349 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) { in umc_init_csrows()
3358 dimm = mci->csrows[cs]->channels[umc]->dimm; in umc_init_csrows()
3361 pvt->mc_node_id, cs); in umc_init_csrows()
3363 dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs); in umc_init_csrows()
3364 dimm->mtype = pvt->umc[umc].dram_type; in umc_init_csrows()
3365 dimm->edac_mode = edac_mode; in umc_init_csrows()
3366 dimm->dtype = dev_type; in umc_init_csrows()
3367 dimm->grain = 64; in umc_init_csrows()
3378 struct amd64_pvt *pvt = mci->pvt_info; in dct_init_csrows()
3386 amd64_read_pci_cfg(pvt->F3, NBCFG, &val); in dct_init_csrows()
3388 pvt->nbcfg = val; in dct_init_csrows()
3391 pvt->mc_node_id, val, in dct_init_csrows()
3401 if (pvt->fam != 0xf) in dct_init_csrows()
3407 csrow = mci->csrows[i]; in dct_init_csrows()
3410 pvt->mc_node_id, i); in dct_init_csrows()
3414 csrow->channels[0]->dimm->nr_pages = nr_pages; in dct_init_csrows()
3418 if (pvt->fam != 0xf && row_dct1) { in dct_init_csrows()
3421 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; in dct_init_csrows()
3428 if (pvt->nbcfg & NBCFG_ECC_ENABLE) { in dct_init_csrows()
3429 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) in dct_init_csrows()
3434 for (j = 0; j < pvt->max_mcs; j++) { in dct_init_csrows()
3435 dimm = csrow->channels[j]->dimm; in dct_init_csrows()
3436 dimm->mtype = pvt->dram_type; in dct_init_csrows()
3437 dimm->edac_mode = edac_mode; in dct_init_csrows()
3438 dimm->grain = 64; in dct_init_csrows()
3471 nbe = reg->l & MSR_MCGCTL_NBE; in nb_mce_bank_enabled_on_node()
3474 cpu, reg->q, in nb_mce_bank_enabled_on_node()
3494 return -ENOMEM; in toggle_ecc_err_reporting()
3506 if (reg->l & MSR_MCGCTL_NBE) in toggle_ecc_err_reporting()
3507 s->flags.nb_mce_enable = 1; in toggle_ecc_err_reporting()
3509 reg->l |= MSR_MCGCTL_NBE; in toggle_ecc_err_reporting()
3514 if (!s->flags.nb_mce_enable) in toggle_ecc_err_reporting()
3515 reg->l &= ~MSR_MCGCTL_NBE; in toggle_ecc_err_reporting()
3538 s->old_nbctl = value & mask; in enable_ecc_error_reporting()
3539 s->nbctl_valid = true; in enable_ecc_error_reporting()
3552 s->flags.nb_ecc_prev = 0; in enable_ecc_error_reporting()
3568 s->flags.nb_ecc_prev = 1; in enable_ecc_error_reporting()
3582 if (!s->nbctl_valid) in restore_ecc_error_reporting()
3587 value |= s->old_nbctl; in restore_ecc_error_reporting()
3591 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ in restore_ecc_error_reporting()
3592 if (!s->flags.nb_ecc_prev) { in restore_ecc_error_reporting()
3605 u16 nid = pvt->mc_node_id; in dct_ecc_enabled()
3610 amd64_read_pci_cfg(pvt->F3, NBCFG, &value); in dct_ecc_enabled()
3630 u16 nid = pvt->mc_node_id; in umc_ecc_enabled()
3635 umc = &pvt->umc[i]; in umc_ecc_enabled()
3638 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) in umc_ecc_enabled()
3643 if (umc->umc_cap_hi & UMC_ECC_ENABLED) in umc_ecc_enabled()
3667 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { in umc_determine_edac_ctl_cap()
3668 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED); in umc_determine_edac_ctl_cap()
3669 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP); in umc_determine_edac_ctl_cap()
3671 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6)); in umc_determine_edac_ctl_cap()
3672 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7)); in umc_determine_edac_ctl_cap()
3678 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; in umc_determine_edac_ctl_cap()
3684 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; in umc_determine_edac_ctl_cap()
3686 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED; in umc_determine_edac_ctl_cap()
3688 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED; in umc_determine_edac_ctl_cap()
3694 struct amd64_pvt *pvt = mci->pvt_info; in dct_setup_mci_misc_attrs()
3696 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; in dct_setup_mci_misc_attrs()
3697 mci->edac_ctl_cap = EDAC_FLAG_NONE; in dct_setup_mci_misc_attrs()
3699 if (pvt->nbcap & NBCAP_SECDED) in dct_setup_mci_misc_attrs()
3700 mci->edac_ctl_cap |= EDAC_FLAG_SECDED; in dct_setup_mci_misc_attrs()
3702 if (pvt->nbcap & NBCAP_CHIPKILL) in dct_setup_mci_misc_attrs()
3703 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; in dct_setup_mci_misc_attrs()
3705 mci->edac_cap = dct_determine_edac_cap(pvt); in dct_setup_mci_misc_attrs()
3706 mci->mod_name = EDAC_MOD_STR; in dct_setup_mci_misc_attrs()
3707 mci->ctl_name = pvt->ctl_name; in dct_setup_mci_misc_attrs()
3708 mci->dev_name = pci_name(pvt->F3); in dct_setup_mci_misc_attrs()
3709 mci->ctl_page_to_phys = NULL; in dct_setup_mci_misc_attrs()
3711 /* memory scrubber interface */ in dct_setup_mci_misc_attrs()
3712 mci->set_sdram_scrub_rate = set_scrub_rate; in dct_setup_mci_misc_attrs()
3713 mci->get_sdram_scrub_rate = get_scrub_rate; in dct_setup_mci_misc_attrs()
3720 struct amd64_pvt *pvt = mci->pvt_info; in umc_setup_mci_misc_attrs()
3722 mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4; in umc_setup_mci_misc_attrs()
3723 mci->edac_ctl_cap = EDAC_FLAG_NONE; in umc_setup_mci_misc_attrs()
3727 mci->edac_cap = umc_determine_edac_cap(pvt); in umc_setup_mci_misc_attrs()
3728 mci->mod_name = EDAC_MOD_STR; in umc_setup_mci_misc_attrs()
3729 mci->ctl_name = pvt->ctl_name; in umc_setup_mci_misc_attrs()
3730 mci->dev_name = pci_name(pvt->F3); in umc_setup_mci_misc_attrs()
3731 mci->ctl_page_to_phys = NULL; in umc_setup_mci_misc_attrs()
3738 int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id); in dct_hw_info_get()
3753 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL); in umc_hw_info_get()
3754 if (!pvt->umc) in umc_hw_info_get()
3755 return -ENOMEM; in umc_hw_info_get()
3784 u8 ch = (m->ipid & GENMASK(31, 0)) >> 20; in gpu_get_err_info()
3785 u8 phy = ((m->ipid >> 12) & 0xf); in gpu_get_err_info()
3787 err->channel = ch % 2 ? phy + 4 : phy; in gpu_get_err_info()
3788 err->csrow = phy; in gpu_get_err_info()
3794 u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr]; in gpu_addr_mask_to_cs_size()
3819 umc = &pvt->umc[i]; in gpu_dump_misc_regs()
3821 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg); in gpu_dump_misc_regs()
3822 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl); in gpu_dump_misc_regs()
3823 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl); in gpu_dump_misc_regs()
3836 nr_pages <<= 20 - PAGE_SHIFT; in gpu_get_csrow_nr_pages()
3846 struct amd64_pvt *pvt = mci->pvt_info; in gpu_init_csrows()
3855 dimm = mci->csrows[umc]->channels[cs]->dimm; in gpu_init_csrows()
3858 pvt->mc_node_id, cs); in gpu_init_csrows()
3860 dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs); in gpu_init_csrows()
3861 dimm->edac_mode = EDAC_SECDED; in gpu_init_csrows()
3862 dimm->mtype = pvt->dram_type; in gpu_init_csrows()
3863 dimm->dtype = DEV_X16; in gpu_init_csrows()
3864 dimm->grain = 64; in gpu_init_csrows()
3871 struct amd64_pvt *pvt = mci->pvt_info; in gpu_setup_mci_misc_attrs()
3873 mci->mtype_cap = MEM_FLAG_HBM2; in gpu_setup_mci_misc_attrs()
3874 mci->edac_ctl_cap = EDAC_FLAG_SECDED; in gpu_setup_mci_misc_attrs()
3876 mci->edac_cap = EDAC_FLAG_EC; in gpu_setup_mci_misc_attrs()
3877 mci->mod_name = EDAC_MOD_STR; in gpu_setup_mci_misc_attrs()
3878 mci->ctl_name = pvt->ctl_name; in gpu_setup_mci_misc_attrs()
3879 mci->dev_name = pci_name(pvt->F3); in gpu_setup_mci_misc_attrs()
3880 mci->ctl_page_to_phys = NULL; in gpu_setup_mci_misc_attrs()
3913 return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12); in gpu_get_umc_base()
3918 u8 nid = pvt->mc_node_id; in gpu_read_mc_regs()
3925 umc = &pvt->umc[i]; in gpu_read_mc_regs()
3927 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); in gpu_read_mc_regs()
3928 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); in gpu_read_mc_regs()
3929 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); in gpu_read_mc_regs()
3942 base = &pvt->csels[umc].csbases[cs]; in gpu_read_base_mask()
3944 if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) { in gpu_read_base_mask()
3950 mask = &pvt->csels[umc].csmasks[cs]; in gpu_read_base_mask()
3952 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) { in gpu_read_base_mask()
3965 pvt->csels[umc].b_cnt = 8; in gpu_prep_chip_selects()
3966 pvt->csels[umc].m_cnt = 8; in gpu_prep_chip_selects()
3978 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL); in gpu_hw_info_get()
3979 if (!pvt->umc) in gpu_hw_info_get()
3980 return -ENOMEM; in gpu_hw_info_get()
3991 pci_dev_put(pvt->F1); in hw_info_put()
3992 pci_dev_put(pvt->F2); in hw_info_put()
3993 kfree(pvt->umc); in hw_info_put()
4024 pvt->ext_model = boot_cpu_data.x86_model >> 4; in per_family_init()
4025 pvt->stepping = boot_cpu_data.x86_stepping; in per_family_init()
4026 pvt->model = boot_cpu_data.x86_model; in per_family_init()
4027 pvt->fam = boot_cpu_data.x86; in per_family_init()
4028 pvt->max_mcs = 2; in per_family_init()
4034 if (pvt->fam >= 0x17) in per_family_init()
4035 pvt->ops = &umc_ops; in per_family_init()
4037 pvt->ops = &dct_ops; in per_family_init()
4039 switch (pvt->fam) { in per_family_init()
4041 pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ? in per_family_init()
4043 pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP; in per_family_init()
4044 pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL; in per_family_init()
4045 pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow; in per_family_init()
4046 pvt->ops->dbam_to_cs = k8_dbam_to_chip_select; in per_family_init()
4050 pvt->ctl_name = "F10h"; in per_family_init()
4051 pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP; in per_family_init()
4052 pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM; in per_family_init()
4053 pvt->ops->dbam_to_cs = f10_dbam_to_chip_select; in per_family_init()
4057 switch (pvt->model) { in per_family_init()
4059 pvt->ctl_name = "F15h_M30h"; in per_family_init()
4060 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1; in per_family_init()
4061 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2; in per_family_init()
4064 pvt->ctl_name = "F15h_M60h"; in per_family_init()
4065 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1; in per_family_init()
4066 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2; in per_family_init()
4067 pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select; in per_family_init()
4071 return -ENODEV; in per_family_init()
4073 pvt->ctl_name = "F15h"; in per_family_init()
4074 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1; in per_family_init()
4075 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2; in per_family_init()
4076 pvt->ops->dbam_to_cs = f15_dbam_to_chip_select; in per_family_init()
4082 switch (pvt->model) { in per_family_init()
4084 pvt->ctl_name = "F16h_M30h"; in per_family_init()
4085 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1; in per_family_init()
4086 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2; in per_family_init()
4089 pvt->ctl_name = "F16h"; in per_family_init()
4090 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1; in per_family_init()
4091 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2; in per_family_init()
4097 switch (pvt->model) { in per_family_init()
4099 pvt->ctl_name = "F17h_M10h"; in per_family_init()
4102 pvt->ctl_name = "F17h_M30h"; in per_family_init()
4103 pvt->max_mcs = 8; in per_family_init()
4106 pvt->ctl_name = "F17h_M60h"; in per_family_init()
4109 pvt->ctl_name = "F17h_M70h"; in per_family_init()
4112 pvt->ctl_name = "F17h"; in per_family_init()
4118 pvt->ctl_name = "F18h"; in per_family_init()
4122 switch (pvt->model) { in per_family_init()
4124 pvt->ctl_name = "F19h"; in per_family_init()
4125 pvt->max_mcs = 8; in per_family_init()
4128 pvt->ctl_name = "F19h_M10h"; in per_family_init()
4129 pvt->max_mcs = 12; in per_family_init()
4130 pvt->flags.zn_regs_v2 = 1; in per_family_init()
4133 pvt->ctl_name = "F19h_M20h"; in per_family_init()
4136 if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) { in per_family_init()
4137 pvt->ctl_name = "MI200"; in per_family_init()
4138 pvt->max_mcs = 4; in per_family_init()
4139 pvt->dram_type = MEM_HBM2; in per_family_init()
4140 pvt->gpu_umc_base = 0x50000; in per_family_init()
4141 pvt->ops = &gpu_ops; in per_family_init()
4143 pvt->ctl_name = "F19h_M30h"; in per_family_init()
4144 pvt->max_mcs = 8; in per_family_init()
4148 pvt->ctl_name = "F19h_M50h"; in per_family_init()
4151 pvt->ctl_name = "F19h_M60h"; in per_family_init()
4152 pvt->flags.zn_regs_v2 = 1; in per_family_init()
4155 pvt->ctl_name = "F19h_M70h"; in per_family_init()
4156 pvt->flags.zn_regs_v2 = 1; in per_family_init()
4159 pvt->ctl_name = "F19h_M90h"; in per_family_init()
4160 pvt->max_mcs = 4; in per_family_init()
4161 pvt->dram_type = MEM_HBM3; in per_family_init()
4162 pvt->gpu_umc_base = 0x90000; in per_family_init()
4163 pvt->ops = &gpu_ops; in per_family_init()
4166 pvt->ctl_name = "F19h_MA0h"; in per_family_init()
4167 pvt->max_mcs = 12; in per_family_init()
4168 pvt->flags.zn_regs_v2 = 1; in per_family_init()
4174 switch (pvt->model) { in per_family_init()
4176 pvt->ctl_name = "F1Ah"; in per_family_init()
4177 pvt->max_mcs = 12; in per_family_init()
4178 pvt->flags.zn_regs_v2 = 1; in per_family_init()
4181 pvt->ctl_name = "F1Ah_M40h"; in per_family_init()
4182 pvt->flags.zn_regs_v2 = 1; in per_family_init()
4189 return -ENODEV; in per_family_init()
4209 bool is_gpu = (pvt->ops == &gpu_ops); in get_layer_size()
4212 return is_gpu ? pvt->max_mcs in get_layer_size()
4213 : pvt->csels[0].b_cnt; in get_layer_size()
4215 return is_gpu ? pvt->csels[0].b_cnt in get_layer_size()
4216 : pvt->max_mcs; in get_layer_size()
4223 int ret = -ENOMEM; in init_one_instance()
4232 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0); in init_one_instance()
4236 mci->pvt_info = pvt; in init_one_instance()
4237 mci->pdev = &pvt->F3->dev; in init_one_instance()
4239 pvt->ops->setup_mci_misc_attrs(mci); in init_one_instance()
4241 ret = -ENODEV; in init_one_instance()
4256 for (dct = 0; dct < pvt->max_mcs; dct++) { in instance_has_memory()
4266 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; in probe_one_instance()
4271 ret = -ENOMEM; in probe_one_instance()
4282 pvt->mc_node_id = nid; in probe_one_instance()
4283 pvt->F3 = F3; in probe_one_instance()
4289 ret = pvt->ops->hw_info_get(pvt); in probe_one_instance()
4299 if (!pvt->ops->ecc_enabled(pvt)) { in probe_one_instance()
4300 ret = -ENODEV; in probe_one_instance()
4325 amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id); in probe_one_instance()
4328 pvt->ops->dump_misc_regs(pvt); in probe_one_instance()
4346 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; in remove_one_instance()
4352 mci = edac_mc_del_mc(&F3->dev); in remove_one_instance()
4356 pvt = mci->pvt_info; in remove_one_instance()
4364 mci->pvt_info = NULL; in remove_one_instance()
4399 int err = -ENODEV; in amd64_edac_init()
4403 return -EBUSY; in amd64_edac_init()
4407 return -EBUSY; in amd64_edac_init()
4410 return -ENODEV; in amd64_edac_init()
4413 return -ENODEV; in amd64_edac_init()
4417 err = -ENOMEM; in amd64_edac_init()
4430 while (--i >= 0) in amd64_edac_init()
4438 err = -ENODEV; in amd64_edac_init()
4451 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR); in amd64_edac_init()