19e58f52dSBen Widawsky /* 29e58f52dSBen Widawsky * CXL Utility library for components 39e58f52dSBen Widawsky * 49e58f52dSBen Widawsky * Copyright(C) 2020 Intel Corporation. 59e58f52dSBen Widawsky * 69e58f52dSBen Widawsky * This work is licensed under the terms of the GNU GPL, version 2. See the 79e58f52dSBen Widawsky * COPYING file in the top-level directory. 89e58f52dSBen Widawsky */ 99e58f52dSBen Widawsky 109e58f52dSBen Widawsky #include "qemu/osdep.h" 119e58f52dSBen Widawsky #include "qemu/log.h" 12829de299SJonathan Cameron #include "qapi/error.h" 139e58f52dSBen Widawsky #include "hw/pci/pci.h" 149e58f52dSBen Widawsky #include "hw/cxl/cxl.h" 159e58f52dSBen Widawsky 169e58f52dSBen Widawsky static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset, 179e58f52dSBen Widawsky unsigned size) 189e58f52dSBen Widawsky { 199e58f52dSBen Widawsky CXLComponentState *cxl_cstate = opaque; 209e58f52dSBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 219e58f52dSBen Widawsky 229e58f52dSBen Widawsky if (size == 8) { 239e58f52dSBen Widawsky qemu_log_mask(LOG_UNIMP, 249e58f52dSBen Widawsky "CXL 8 byte cache mem registers not implemented\n"); 259e58f52dSBen Widawsky return 0; 269e58f52dSBen Widawsky } 279e58f52dSBen Widawsky 289e58f52dSBen Widawsky if (cregs->special_ops && cregs->special_ops->read) { 299e58f52dSBen Widawsky return cregs->special_ops->read(cxl_cstate, offset, size); 309e58f52dSBen Widawsky } else { 319e58f52dSBen Widawsky return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; 329e58f52dSBen Widawsky } 339e58f52dSBen Widawsky } 349e58f52dSBen Widawsky 353540bf56SBen Widawsky static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset, 363540bf56SBen Widawsky uint32_t value) 373540bf56SBen Widawsky { 383540bf56SBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 393540bf56SBen Widawsky uint32_t *cache_mem = cregs->cache_mem_registers; 403540bf56SBen Widawsky bool should_commit = false; 413540bf56SBen Widawsky 423540bf56SBen Widawsky switch (offset) { 433540bf56SBen Widawsky case A_CXL_HDM_DECODER0_CTRL: 443540bf56SBen Widawsky should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT); 453540bf56SBen Widawsky break; 463540bf56SBen Widawsky default: 473540bf56SBen Widawsky break; 483540bf56SBen Widawsky } 493540bf56SBen Widawsky 503540bf56SBen Widawsky memory_region_transaction_begin(); 513540bf56SBen Widawsky stl_le_p((uint8_t *)cache_mem + offset, value); 523540bf56SBen Widawsky if (should_commit) { 533540bf56SBen Widawsky ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0); 543540bf56SBen Widawsky ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0); 553540bf56SBen Widawsky ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1); 563540bf56SBen Widawsky } 573540bf56SBen Widawsky memory_region_transaction_commit(); 583540bf56SBen Widawsky } 593540bf56SBen Widawsky 609e58f52dSBen Widawsky static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, 619e58f52dSBen Widawsky unsigned size) 629e58f52dSBen Widawsky { 639e58f52dSBen Widawsky CXLComponentState *cxl_cstate = opaque; 649e58f52dSBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 659e58f52dSBen Widawsky uint32_t mask; 669e58f52dSBen Widawsky 679e58f52dSBen Widawsky if (size == 8) { 689e58f52dSBen Widawsky qemu_log_mask(LOG_UNIMP, 699e58f52dSBen Widawsky "CXL 8 byte cache mem registers not implemented\n"); 709e58f52dSBen Widawsky return; 719e58f52dSBen Widawsky } 729e58f52dSBen Widawsky mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)]; 739e58f52dSBen Widawsky value &= mask; 749e58f52dSBen Widawsky /* RO bits should remain constant. Done by reading existing value */ 759e58f52dSBen Widawsky value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; 769e58f52dSBen Widawsky if (cregs->special_ops && cregs->special_ops->write) { 779e58f52dSBen Widawsky cregs->special_ops->write(cxl_cstate, offset, value, size); 783540bf56SBen Widawsky return; 793540bf56SBen Widawsky } 803540bf56SBen Widawsky 813540bf56SBen Widawsky if (offset >= A_CXL_HDM_DECODER_CAPABILITY && 823540bf56SBen Widawsky offset <= A_CXL_HDM_DECODER0_TARGET_LIST_HI) { 833540bf56SBen Widawsky dumb_hdm_handler(cxl_cstate, offset, value); 849e58f52dSBen Widawsky } else { 859e58f52dSBen Widawsky cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value; 869e58f52dSBen Widawsky } 879e58f52dSBen Widawsky } 889e58f52dSBen Widawsky 899e58f52dSBen Widawsky /* 909e58f52dSBen Widawsky * 8.2.3 919e58f52dSBen Widawsky * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0 929e58f52dSBen Widawsky * Component Registers. 939e58f52dSBen Widawsky * 949e58f52dSBen Widawsky * 8.2.2 959e58f52dSBen Widawsky * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial 969e58f52dSBen Widawsky * reads are not permitted. 979e58f52dSBen Widawsky * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial 989e58f52dSBen Widawsky * reads are not permitted. 999e58f52dSBen Widawsky * 1009e58f52dSBen Widawsky * As of the spec defined today, only 4 byte registers exist. 1019e58f52dSBen Widawsky */ 1029e58f52dSBen Widawsky static const MemoryRegionOps cache_mem_ops = { 1039e58f52dSBen Widawsky .read = cxl_cache_mem_read_reg, 1049e58f52dSBen Widawsky .write = cxl_cache_mem_write_reg, 1059e58f52dSBen Widawsky .endianness = DEVICE_LITTLE_ENDIAN, 1069e58f52dSBen Widawsky .valid = { 1079e58f52dSBen Widawsky .min_access_size = 4, 1089e58f52dSBen Widawsky .max_access_size = 8, 1099e58f52dSBen Widawsky .unaligned = false, 1109e58f52dSBen Widawsky }, 1119e58f52dSBen Widawsky .impl = { 1129e58f52dSBen Widawsky .min_access_size = 4, 1139e58f52dSBen Widawsky .max_access_size = 8, 1149e58f52dSBen Widawsky }, 1159e58f52dSBen Widawsky }; 1169e58f52dSBen Widawsky 1179e58f52dSBen Widawsky void cxl_component_register_block_init(Object *obj, 1189e58f52dSBen Widawsky CXLComponentState *cxl_cstate, 1199e58f52dSBen Widawsky const char *type) 1209e58f52dSBen Widawsky { 1219e58f52dSBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 1229e58f52dSBen Widawsky 1239e58f52dSBen Widawsky memory_region_init(&cregs->component_registers, obj, type, 1249e58f52dSBen Widawsky CXL2_COMPONENT_BLOCK_SIZE); 1259e58f52dSBen Widawsky 1269e58f52dSBen Widawsky /* io registers controls link which we don't care about in QEMU */ 1279e58f52dSBen Widawsky memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io", 1289e58f52dSBen Widawsky CXL2_COMPONENT_IO_REGION_SIZE); 1299e58f52dSBen Widawsky memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs, 1309e58f52dSBen Widawsky ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE); 1319e58f52dSBen Widawsky 1329e58f52dSBen Widawsky memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io); 1339e58f52dSBen Widawsky memory_region_add_subregion(&cregs->component_registers, 1349e58f52dSBen Widawsky CXL2_COMPONENT_IO_REGION_SIZE, 1359e58f52dSBen Widawsky &cregs->cache_mem); 1369e58f52dSBen Widawsky } 1379e58f52dSBen Widawsky 1389e58f52dSBen Widawsky static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk) 1399e58f52dSBen Widawsky { 1409e58f52dSBen Widawsky /* 1419e58f52dSBen Widawsky * Error status is RW1C but given bits are not yet set, it can 1429e58f52dSBen Widawsky * be handled as RO. 1439e58f52dSBen Widawsky */ 144*cb4e642cSJonathan Cameron stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0); 1459e58f52dSBen Widawsky /* Bits 12-13 and 17-31 reserved in CXL 2.0 */ 146*cb4e642cSJonathan Cameron stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff); 147*cb4e642cSJonathan Cameron stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff); 148*cb4e642cSJonathan Cameron stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff); 149*cb4e642cSJonathan Cameron stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff); 150*cb4e642cSJonathan Cameron stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0); 151*cb4e642cSJonathan Cameron stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f); 152*cb4e642cSJonathan Cameron stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f); 1539e58f52dSBen Widawsky /* CXL switches and devices must set */ 154*cb4e642cSJonathan Cameron stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x00); 1559e58f52dSBen Widawsky } 1569e58f52dSBen Widawsky 157f824f529SJonathan Cameron static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk, 158f824f529SJonathan Cameron enum reg_type type) 1599e58f52dSBen Widawsky { 1609e58f52dSBen Widawsky int decoder_count = 1; 1619e58f52dSBen Widawsky int i; 1629e58f52dSBen Widawsky 1639e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, 1649e58f52dSBen Widawsky cxl_decoder_count_enc(decoder_count)); 1659e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1); 1669e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1); 1679e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); 1689e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); 1699e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL, 1709e58f52dSBen Widawsky HDM_DECODER_ENABLE, 0); 1719e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3; 1729e58f52dSBen Widawsky for (i = 0; i < decoder_count; i++) { 1739e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * 0x20] = 0xf0000000; 1749e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * 0x20] = 0xffffffff; 1759e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * 0x20] = 0xf0000000; 1769e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * 0x20] = 0xffffffff; 1779e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_CTRL + i * 0x20] = 0x13ff; 178f824f529SJonathan Cameron if (type == CXL2_DEVICE || 179f824f529SJonathan Cameron type == CXL2_TYPE3_DEVICE || 180f824f529SJonathan Cameron type == CXL2_LOGICAL_DEVICE) { 181f824f529SJonathan Cameron write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xf0000000; 182f824f529SJonathan Cameron } else { 183f824f529SJonathan Cameron write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xffffffff; 184f824f529SJonathan Cameron } 185f824f529SJonathan Cameron write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * 0x20] = 0xffffffff; 1869e58f52dSBen Widawsky } 1879e58f52dSBen Widawsky } 1889e58f52dSBen Widawsky 1899e58f52dSBen Widawsky void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk, 1909e58f52dSBen Widawsky enum reg_type type) 1919e58f52dSBen Widawsky { 1929e58f52dSBen Widawsky int caps = 0; 1939e58f52dSBen Widawsky 1949e58f52dSBen Widawsky /* 1959e58f52dSBen Widawsky * In CXL 2.0 the capabilities required for each CXL component are such that, 1969e58f52dSBen Widawsky * with the ordering chosen here, a single number can be used to define 1979e58f52dSBen Widawsky * which capabilities should be provided. 1989e58f52dSBen Widawsky */ 1999e58f52dSBen Widawsky switch (type) { 2009e58f52dSBen Widawsky case CXL2_DOWNSTREAM_PORT: 2019e58f52dSBen Widawsky case CXL2_DEVICE: 2029e58f52dSBen Widawsky /* RAS, Link */ 2039e58f52dSBen Widawsky caps = 2; 2049e58f52dSBen Widawsky break; 2059e58f52dSBen Widawsky case CXL2_UPSTREAM_PORT: 2069e58f52dSBen Widawsky case CXL2_TYPE3_DEVICE: 2079e58f52dSBen Widawsky case CXL2_LOGICAL_DEVICE: 2089e58f52dSBen Widawsky /* + HDM */ 2099e58f52dSBen Widawsky caps = 3; 2109e58f52dSBen Widawsky break; 2119e58f52dSBen Widawsky case CXL2_ROOT_PORT: 2129e58f52dSBen Widawsky /* + Extended Security, + Snoop */ 2139e58f52dSBen Widawsky caps = 5; 2149e58f52dSBen Widawsky break; 2159e58f52dSBen Widawsky default: 2169e58f52dSBen Widawsky abort(); 2179e58f52dSBen Widawsky } 2189e58f52dSBen Widawsky 2199e58f52dSBen Widawsky memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE); 2209e58f52dSBen Widawsky 2219e58f52dSBen Widawsky /* CXL Capability Header Register */ 2229e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1); 2239e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1); 2249e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1); 2259e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps); 2269e58f52dSBen Widawsky 2279e58f52dSBen Widawsky #define init_cap_reg(reg, id, version) \ 2289e58f52dSBen Widawsky QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \ 2299e58f52dSBen Widawsky do { \ 2309e58f52dSBen Widawsky int which = R_CXL_##reg##_CAPABILITY_HEADER; \ 2319e58f52dSBen Widawsky reg_state[which] = FIELD_DP32(reg_state[which], \ 2329e58f52dSBen Widawsky CXL_##reg##_CAPABILITY_HEADER, ID, id); \ 2339e58f52dSBen Widawsky reg_state[which] = \ 2349e58f52dSBen Widawsky FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \ 2359e58f52dSBen Widawsky VERSION, version); \ 2369e58f52dSBen Widawsky reg_state[which] = \ 2379e58f52dSBen Widawsky FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \ 2389e58f52dSBen Widawsky CXL_##reg##_REGISTERS_OFFSET); \ 2399e58f52dSBen Widawsky } while (0) 2409e58f52dSBen Widawsky 2419e58f52dSBen Widawsky init_cap_reg(RAS, 2, 2); 2429e58f52dSBen Widawsky ras_init_common(reg_state, write_msk); 2439e58f52dSBen Widawsky 2449e58f52dSBen Widawsky init_cap_reg(LINK, 4, 2); 2459e58f52dSBen Widawsky 2469e58f52dSBen Widawsky if (caps < 3) { 2479e58f52dSBen Widawsky return; 2489e58f52dSBen Widawsky } 2499e58f52dSBen Widawsky 2509e58f52dSBen Widawsky init_cap_reg(HDM, 5, 1); 251f824f529SJonathan Cameron hdm_init_common(reg_state, write_msk, type); 2529e58f52dSBen Widawsky 2539e58f52dSBen Widawsky if (caps < 5) { 2549e58f52dSBen Widawsky return; 2559e58f52dSBen Widawsky } 2569e58f52dSBen Widawsky 2579e58f52dSBen Widawsky init_cap_reg(EXTSEC, 6, 1); 2589e58f52dSBen Widawsky init_cap_reg(SNOOP, 8, 1); 2599e58f52dSBen Widawsky 2609e58f52dSBen Widawsky #undef init_cap_reg 2619e58f52dSBen Widawsky } 2629e58f52dSBen Widawsky 2639e58f52dSBen Widawsky /* 2649e58f52dSBen Widawsky * Helper to creates a DVSEC header for a CXL entity. The caller is responsible 2659e58f52dSBen Widawsky * for tracking the valid offset. 2669e58f52dSBen Widawsky * 2679e58f52dSBen Widawsky * This function will build the DVSEC header on behalf of the caller and then 2689e58f52dSBen Widawsky * copy in the remaining data for the vendor specific bits. 2699e58f52dSBen Widawsky * It will also set up appropriate write masks. 2709e58f52dSBen Widawsky */ 2719e58f52dSBen Widawsky void cxl_component_create_dvsec(CXLComponentState *cxl, 2729e58f52dSBen Widawsky enum reg_type cxl_dev_type, uint16_t length, 2739e58f52dSBen Widawsky uint16_t type, uint8_t rev, uint8_t *body) 2749e58f52dSBen Widawsky { 2759e58f52dSBen Widawsky PCIDevice *pdev = cxl->pdev; 2769e58f52dSBen Widawsky uint16_t offset = cxl->dvsec_offset; 2779e58f52dSBen Widawsky uint8_t *wmask = pdev->wmask; 2789e58f52dSBen Widawsky 2799e58f52dSBen Widawsky assert(offset >= PCI_CFG_SPACE_SIZE && 2809e58f52dSBen Widawsky ((offset + length) < PCI_CFG_SPACE_EXP_SIZE)); 2819e58f52dSBen Widawsky assert((length & 0xf000) == 0); 2829e58f52dSBen Widawsky assert((rev & ~0xf) == 0); 2839e58f52dSBen Widawsky 2849e58f52dSBen Widawsky /* Create the DVSEC in the MCFG space */ 2859e58f52dSBen Widawsky pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length); 2869e58f52dSBen Widawsky pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET, 2879e58f52dSBen Widawsky (length << 20) | (rev << 16) | CXL_VENDOR_ID); 2889e58f52dSBen Widawsky pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type); 2899e58f52dSBen Widawsky memcpy(pdev->config + offset + sizeof(DVSECHeader), 2909e58f52dSBen Widawsky body + sizeof(DVSECHeader), 2919e58f52dSBen Widawsky length - sizeof(DVSECHeader)); 2929e58f52dSBen Widawsky 2939e58f52dSBen Widawsky /* Configure write masks */ 2949e58f52dSBen Widawsky switch (type) { 2959e58f52dSBen Widawsky case PCIE_CXL_DEVICE_DVSEC: 296e1706ea8SBen Widawsky /* Cntrl RW Lock - so needs explicit blocking when lock is set */ 297e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD; 298e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F; 299e1706ea8SBen Widawsky /* Status is RW1CS */ 300e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F; 301e1706ea8SBen Widawsky /* Lock is RW Once */ 302e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01; 303e1706ea8SBen Widawsky /* range1/2_base_high/low is RW Lock */ 304e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF; 305e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF; 306e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF; 307e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF; 308e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0; 309e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF; 310e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF; 311e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF; 312e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF; 313e1706ea8SBen Widawsky wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0; 3149e58f52dSBen Widawsky break; 3159e58f52dSBen Widawsky case NON_CXL_FUNCTION_MAP_DVSEC: 3169e58f52dSBen Widawsky break; /* Not yet implemented */ 3179e58f52dSBen Widawsky case EXTENSIONS_PORT_DVSEC: 3189e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F; 3199e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40; 3209e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF; 3219e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF; 3229e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0; 3239e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF; 3249e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0; 3259e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF; 3269e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0; 3279e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF; 3289e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0; 3299e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF; 3309e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF; 3319e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF; 3329e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF; 3339e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF; 3349e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF; 3359e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF; 3369e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF; 3379e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF; 3389e58f52dSBen Widawsky break; 3399e58f52dSBen Widawsky case GPF_PORT_DVSEC: 3409e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F; 3419e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F; 3429e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F; 3439e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F; 3449e58f52dSBen Widawsky break; 3459e58f52dSBen Widawsky case GPF_DEVICE_DVSEC: 3469e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F; 3479e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F; 3489e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF; 3499e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF; 3509e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF; 3519e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF; 3529e58f52dSBen Widawsky break; 3539e58f52dSBen Widawsky case PCIE_FLEXBUS_PORT_DVSEC: 3549e58f52dSBen Widawsky switch (cxl_dev_type) { 3559e58f52dSBen Widawsky case CXL2_ROOT_PORT: 3569e58f52dSBen Widawsky /* No MLD */ 3579e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd; 3589e58f52dSBen Widawsky break; 3599e58f52dSBen Widawsky case CXL2_DOWNSTREAM_PORT: 3609e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd; 3619e58f52dSBen Widawsky break; 3629e58f52dSBen Widawsky default: /* Registers are RO for other component types */ 3639e58f52dSBen Widawsky break; 3649e58f52dSBen Widawsky } 3659e58f52dSBen Widawsky /* There are rw1cs bits in the status register but never set currently */ 3669e58f52dSBen Widawsky break; 3679e58f52dSBen Widawsky } 3689e58f52dSBen Widawsky 3699e58f52dSBen Widawsky /* Update state for future DVSEC additions */ 3709e58f52dSBen Widawsky range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length); 3719e58f52dSBen Widawsky cxl->dvsec_offset += length; 3729e58f52dSBen Widawsky } 373829de299SJonathan Cameron 374829de299SJonathan Cameron uint8_t cxl_interleave_ways_enc(int iw, Error **errp) 375829de299SJonathan Cameron { 376829de299SJonathan Cameron switch (iw) { 377829de299SJonathan Cameron case 1: return 0x0; 378829de299SJonathan Cameron case 2: return 0x1; 379829de299SJonathan Cameron case 4: return 0x2; 380829de299SJonathan Cameron case 8: return 0x3; 381829de299SJonathan Cameron case 16: return 0x4; 382829de299SJonathan Cameron case 3: return 0x8; 383829de299SJonathan Cameron case 6: return 0x9; 384829de299SJonathan Cameron case 12: return 0xa; 385829de299SJonathan Cameron default: 386829de299SJonathan Cameron error_setg(errp, "Interleave ways: %d not supported", iw); 387829de299SJonathan Cameron return 0; 388829de299SJonathan Cameron } 389829de299SJonathan Cameron } 390829de299SJonathan Cameron 391829de299SJonathan Cameron uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp) 392829de299SJonathan Cameron { 393829de299SJonathan Cameron switch (gran) { 394829de299SJonathan Cameron case 256: return 0; 395829de299SJonathan Cameron case 512: return 1; 396829de299SJonathan Cameron case 1024: return 2; 397829de299SJonathan Cameron case 2048: return 3; 398829de299SJonathan Cameron case 4096: return 4; 399829de299SJonathan Cameron case 8192: return 5; 400829de299SJonathan Cameron case 16384: return 6; 401829de299SJonathan Cameron default: 402829de299SJonathan Cameron error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran); 403829de299SJonathan Cameron return 0; 404829de299SJonathan Cameron } 405829de299SJonathan Cameron } 406