1*9e58f52dSBen Widawsky /* 2*9e58f52dSBen Widawsky * CXL Utility library for components 3*9e58f52dSBen Widawsky * 4*9e58f52dSBen Widawsky * Copyright(C) 2020 Intel Corporation. 5*9e58f52dSBen Widawsky * 6*9e58f52dSBen Widawsky * This work is licensed under the terms of the GNU GPL, version 2. See the 7*9e58f52dSBen Widawsky * COPYING file in the top-level directory. 8*9e58f52dSBen Widawsky */ 9*9e58f52dSBen Widawsky 10*9e58f52dSBen Widawsky #include "qemu/osdep.h" 11*9e58f52dSBen Widawsky #include "qemu/log.h" 12*9e58f52dSBen Widawsky #include "hw/pci/pci.h" 13*9e58f52dSBen Widawsky #include "hw/cxl/cxl.h" 14*9e58f52dSBen Widawsky 15*9e58f52dSBen Widawsky static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset, 16*9e58f52dSBen Widawsky unsigned size) 17*9e58f52dSBen Widawsky { 18*9e58f52dSBen Widawsky CXLComponentState *cxl_cstate = opaque; 19*9e58f52dSBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 20*9e58f52dSBen Widawsky 21*9e58f52dSBen Widawsky if (size == 8) { 22*9e58f52dSBen Widawsky qemu_log_mask(LOG_UNIMP, 23*9e58f52dSBen Widawsky "CXL 8 byte cache mem registers not implemented\n"); 24*9e58f52dSBen Widawsky return 0; 25*9e58f52dSBen Widawsky } 26*9e58f52dSBen Widawsky 27*9e58f52dSBen Widawsky if (cregs->special_ops && cregs->special_ops->read) { 28*9e58f52dSBen Widawsky return cregs->special_ops->read(cxl_cstate, offset, size); 29*9e58f52dSBen Widawsky } else { 30*9e58f52dSBen Widawsky return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; 31*9e58f52dSBen Widawsky } 32*9e58f52dSBen Widawsky } 33*9e58f52dSBen Widawsky 34*9e58f52dSBen Widawsky static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value, 35*9e58f52dSBen Widawsky unsigned size) 36*9e58f52dSBen Widawsky { 37*9e58f52dSBen Widawsky CXLComponentState *cxl_cstate = opaque; 38*9e58f52dSBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 39*9e58f52dSBen Widawsky uint32_t mask; 40*9e58f52dSBen Widawsky 41*9e58f52dSBen Widawsky if (size == 8) { 42*9e58f52dSBen Widawsky qemu_log_mask(LOG_UNIMP, 43*9e58f52dSBen Widawsky "CXL 8 byte cache mem registers not implemented\n"); 44*9e58f52dSBen Widawsky return; 45*9e58f52dSBen Widawsky } 46*9e58f52dSBen Widawsky mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)]; 47*9e58f52dSBen Widawsky value &= mask; 48*9e58f52dSBen Widawsky /* RO bits should remain constant. Done by reading existing value */ 49*9e58f52dSBen Widawsky value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)]; 50*9e58f52dSBen Widawsky if (cregs->special_ops && cregs->special_ops->write) { 51*9e58f52dSBen Widawsky cregs->special_ops->write(cxl_cstate, offset, value, size); 52*9e58f52dSBen Widawsky } else { 53*9e58f52dSBen Widawsky cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value; 54*9e58f52dSBen Widawsky } 55*9e58f52dSBen Widawsky } 56*9e58f52dSBen Widawsky 57*9e58f52dSBen Widawsky /* 58*9e58f52dSBen Widawsky * 8.2.3 59*9e58f52dSBen Widawsky * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0 60*9e58f52dSBen Widawsky * Component Registers. 61*9e58f52dSBen Widawsky * 62*9e58f52dSBen Widawsky * 8.2.2 63*9e58f52dSBen Widawsky * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial 64*9e58f52dSBen Widawsky * reads are not permitted. 65*9e58f52dSBen Widawsky * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial 66*9e58f52dSBen Widawsky * reads are not permitted. 67*9e58f52dSBen Widawsky * 68*9e58f52dSBen Widawsky * As of the spec defined today, only 4 byte registers exist. 69*9e58f52dSBen Widawsky */ 70*9e58f52dSBen Widawsky static const MemoryRegionOps cache_mem_ops = { 71*9e58f52dSBen Widawsky .read = cxl_cache_mem_read_reg, 72*9e58f52dSBen Widawsky .write = cxl_cache_mem_write_reg, 73*9e58f52dSBen Widawsky .endianness = DEVICE_LITTLE_ENDIAN, 74*9e58f52dSBen Widawsky .valid = { 75*9e58f52dSBen Widawsky .min_access_size = 4, 76*9e58f52dSBen Widawsky .max_access_size = 8, 77*9e58f52dSBen Widawsky .unaligned = false, 78*9e58f52dSBen Widawsky }, 79*9e58f52dSBen Widawsky .impl = { 80*9e58f52dSBen Widawsky .min_access_size = 4, 81*9e58f52dSBen Widawsky .max_access_size = 8, 82*9e58f52dSBen Widawsky }, 83*9e58f52dSBen Widawsky }; 84*9e58f52dSBen Widawsky 85*9e58f52dSBen Widawsky void cxl_component_register_block_init(Object *obj, 86*9e58f52dSBen Widawsky CXLComponentState *cxl_cstate, 87*9e58f52dSBen Widawsky const char *type) 88*9e58f52dSBen Widawsky { 89*9e58f52dSBen Widawsky ComponentRegisters *cregs = &cxl_cstate->crb; 90*9e58f52dSBen Widawsky 91*9e58f52dSBen Widawsky memory_region_init(&cregs->component_registers, obj, type, 92*9e58f52dSBen Widawsky CXL2_COMPONENT_BLOCK_SIZE); 93*9e58f52dSBen Widawsky 94*9e58f52dSBen Widawsky /* io registers controls link which we don't care about in QEMU */ 95*9e58f52dSBen Widawsky memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io", 96*9e58f52dSBen Widawsky CXL2_COMPONENT_IO_REGION_SIZE); 97*9e58f52dSBen Widawsky memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs, 98*9e58f52dSBen Widawsky ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE); 99*9e58f52dSBen Widawsky 100*9e58f52dSBen Widawsky memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io); 101*9e58f52dSBen Widawsky memory_region_add_subregion(&cregs->component_registers, 102*9e58f52dSBen Widawsky CXL2_COMPONENT_IO_REGION_SIZE, 103*9e58f52dSBen Widawsky &cregs->cache_mem); 104*9e58f52dSBen Widawsky } 105*9e58f52dSBen Widawsky 106*9e58f52dSBen Widawsky static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk) 107*9e58f52dSBen Widawsky { 108*9e58f52dSBen Widawsky /* 109*9e58f52dSBen Widawsky * Error status is RW1C but given bits are not yet set, it can 110*9e58f52dSBen Widawsky * be handled as RO. 111*9e58f52dSBen Widawsky */ 112*9e58f52dSBen Widawsky reg_state[R_CXL_RAS_UNC_ERR_STATUS] = 0; 113*9e58f52dSBen Widawsky /* Bits 12-13 and 17-31 reserved in CXL 2.0 */ 114*9e58f52dSBen Widawsky reg_state[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; 115*9e58f52dSBen Widawsky write_msk[R_CXL_RAS_UNC_ERR_MASK] = 0x1cfff; 116*9e58f52dSBen Widawsky reg_state[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; 117*9e58f52dSBen Widawsky write_msk[R_CXL_RAS_UNC_ERR_SEVERITY] = 0x1cfff; 118*9e58f52dSBen Widawsky reg_state[R_CXL_RAS_COR_ERR_STATUS] = 0; 119*9e58f52dSBen Widawsky reg_state[R_CXL_RAS_COR_ERR_MASK] = 0x7f; 120*9e58f52dSBen Widawsky write_msk[R_CXL_RAS_COR_ERR_MASK] = 0x7f; 121*9e58f52dSBen Widawsky /* CXL switches and devices must set */ 122*9e58f52dSBen Widawsky reg_state[R_CXL_RAS_ERR_CAP_CTRL] = 0x00; 123*9e58f52dSBen Widawsky } 124*9e58f52dSBen Widawsky 125*9e58f52dSBen Widawsky static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk) 126*9e58f52dSBen Widawsky { 127*9e58f52dSBen Widawsky int decoder_count = 1; 128*9e58f52dSBen Widawsky int i; 129*9e58f52dSBen Widawsky 130*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT, 131*9e58f52dSBen Widawsky cxl_decoder_count_enc(decoder_count)); 132*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1); 133*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1); 134*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1); 135*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0); 136*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL, 137*9e58f52dSBen Widawsky HDM_DECODER_ENABLE, 0); 138*9e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3; 139*9e58f52dSBen Widawsky for (i = 0; i < decoder_count; i++) { 140*9e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * 0x20] = 0xf0000000; 141*9e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * 0x20] = 0xffffffff; 142*9e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * 0x20] = 0xf0000000; 143*9e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * 0x20] = 0xffffffff; 144*9e58f52dSBen Widawsky write_msk[R_CXL_HDM_DECODER0_CTRL + i * 0x20] = 0x13ff; 145*9e58f52dSBen Widawsky } 146*9e58f52dSBen Widawsky } 147*9e58f52dSBen Widawsky 148*9e58f52dSBen Widawsky void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk, 149*9e58f52dSBen Widawsky enum reg_type type) 150*9e58f52dSBen Widawsky { 151*9e58f52dSBen Widawsky int caps = 0; 152*9e58f52dSBen Widawsky 153*9e58f52dSBen Widawsky /* 154*9e58f52dSBen Widawsky * In CXL 2.0 the capabilities required for each CXL component are such that, 155*9e58f52dSBen Widawsky * with the ordering chosen here, a single number can be used to define 156*9e58f52dSBen Widawsky * which capabilities should be provided. 157*9e58f52dSBen Widawsky */ 158*9e58f52dSBen Widawsky switch (type) { 159*9e58f52dSBen Widawsky case CXL2_DOWNSTREAM_PORT: 160*9e58f52dSBen Widawsky case CXL2_DEVICE: 161*9e58f52dSBen Widawsky /* RAS, Link */ 162*9e58f52dSBen Widawsky caps = 2; 163*9e58f52dSBen Widawsky break; 164*9e58f52dSBen Widawsky case CXL2_UPSTREAM_PORT: 165*9e58f52dSBen Widawsky case CXL2_TYPE3_DEVICE: 166*9e58f52dSBen Widawsky case CXL2_LOGICAL_DEVICE: 167*9e58f52dSBen Widawsky /* + HDM */ 168*9e58f52dSBen Widawsky caps = 3; 169*9e58f52dSBen Widawsky break; 170*9e58f52dSBen Widawsky case CXL2_ROOT_PORT: 171*9e58f52dSBen Widawsky /* + Extended Security, + Snoop */ 172*9e58f52dSBen Widawsky caps = 5; 173*9e58f52dSBen Widawsky break; 174*9e58f52dSBen Widawsky default: 175*9e58f52dSBen Widawsky abort(); 176*9e58f52dSBen Widawsky } 177*9e58f52dSBen Widawsky 178*9e58f52dSBen Widawsky memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE); 179*9e58f52dSBen Widawsky 180*9e58f52dSBen Widawsky /* CXL Capability Header Register */ 181*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1); 182*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1); 183*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1); 184*9e58f52dSBen Widawsky ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps); 185*9e58f52dSBen Widawsky 186*9e58f52dSBen Widawsky #define init_cap_reg(reg, id, version) \ 187*9e58f52dSBen Widawsky QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \ 188*9e58f52dSBen Widawsky do { \ 189*9e58f52dSBen Widawsky int which = R_CXL_##reg##_CAPABILITY_HEADER; \ 190*9e58f52dSBen Widawsky reg_state[which] = FIELD_DP32(reg_state[which], \ 191*9e58f52dSBen Widawsky CXL_##reg##_CAPABILITY_HEADER, ID, id); \ 192*9e58f52dSBen Widawsky reg_state[which] = \ 193*9e58f52dSBen Widawsky FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \ 194*9e58f52dSBen Widawsky VERSION, version); \ 195*9e58f52dSBen Widawsky reg_state[which] = \ 196*9e58f52dSBen Widawsky FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \ 197*9e58f52dSBen Widawsky CXL_##reg##_REGISTERS_OFFSET); \ 198*9e58f52dSBen Widawsky } while (0) 199*9e58f52dSBen Widawsky 200*9e58f52dSBen Widawsky init_cap_reg(RAS, 2, 2); 201*9e58f52dSBen Widawsky ras_init_common(reg_state, write_msk); 202*9e58f52dSBen Widawsky 203*9e58f52dSBen Widawsky init_cap_reg(LINK, 4, 2); 204*9e58f52dSBen Widawsky 205*9e58f52dSBen Widawsky if (caps < 3) { 206*9e58f52dSBen Widawsky return; 207*9e58f52dSBen Widawsky } 208*9e58f52dSBen Widawsky 209*9e58f52dSBen Widawsky init_cap_reg(HDM, 5, 1); 210*9e58f52dSBen Widawsky hdm_init_common(reg_state, write_msk); 211*9e58f52dSBen Widawsky 212*9e58f52dSBen Widawsky if (caps < 5) { 213*9e58f52dSBen Widawsky return; 214*9e58f52dSBen Widawsky } 215*9e58f52dSBen Widawsky 216*9e58f52dSBen Widawsky init_cap_reg(EXTSEC, 6, 1); 217*9e58f52dSBen Widawsky init_cap_reg(SNOOP, 8, 1); 218*9e58f52dSBen Widawsky 219*9e58f52dSBen Widawsky #undef init_cap_reg 220*9e58f52dSBen Widawsky } 221*9e58f52dSBen Widawsky 222*9e58f52dSBen Widawsky /* 223*9e58f52dSBen Widawsky * Helper to creates a DVSEC header for a CXL entity. The caller is responsible 224*9e58f52dSBen Widawsky * for tracking the valid offset. 225*9e58f52dSBen Widawsky * 226*9e58f52dSBen Widawsky * This function will build the DVSEC header on behalf of the caller and then 227*9e58f52dSBen Widawsky * copy in the remaining data for the vendor specific bits. 228*9e58f52dSBen Widawsky * It will also set up appropriate write masks. 229*9e58f52dSBen Widawsky */ 230*9e58f52dSBen Widawsky void cxl_component_create_dvsec(CXLComponentState *cxl, 231*9e58f52dSBen Widawsky enum reg_type cxl_dev_type, uint16_t length, 232*9e58f52dSBen Widawsky uint16_t type, uint8_t rev, uint8_t *body) 233*9e58f52dSBen Widawsky { 234*9e58f52dSBen Widawsky PCIDevice *pdev = cxl->pdev; 235*9e58f52dSBen Widawsky uint16_t offset = cxl->dvsec_offset; 236*9e58f52dSBen Widawsky uint8_t *wmask = pdev->wmask; 237*9e58f52dSBen Widawsky 238*9e58f52dSBen Widawsky assert(offset >= PCI_CFG_SPACE_SIZE && 239*9e58f52dSBen Widawsky ((offset + length) < PCI_CFG_SPACE_EXP_SIZE)); 240*9e58f52dSBen Widawsky assert((length & 0xf000) == 0); 241*9e58f52dSBen Widawsky assert((rev & ~0xf) == 0); 242*9e58f52dSBen Widawsky 243*9e58f52dSBen Widawsky /* Create the DVSEC in the MCFG space */ 244*9e58f52dSBen Widawsky pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length); 245*9e58f52dSBen Widawsky pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET, 246*9e58f52dSBen Widawsky (length << 20) | (rev << 16) | CXL_VENDOR_ID); 247*9e58f52dSBen Widawsky pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type); 248*9e58f52dSBen Widawsky memcpy(pdev->config + offset + sizeof(DVSECHeader), 249*9e58f52dSBen Widawsky body + sizeof(DVSECHeader), 250*9e58f52dSBen Widawsky length - sizeof(DVSECHeader)); 251*9e58f52dSBen Widawsky 252*9e58f52dSBen Widawsky /* Configure write masks */ 253*9e58f52dSBen Widawsky switch (type) { 254*9e58f52dSBen Widawsky case PCIE_CXL_DEVICE_DVSEC: 255*9e58f52dSBen Widawsky break; 256*9e58f52dSBen Widawsky case NON_CXL_FUNCTION_MAP_DVSEC: 257*9e58f52dSBen Widawsky break; /* Not yet implemented */ 258*9e58f52dSBen Widawsky case EXTENSIONS_PORT_DVSEC: 259*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F; 260*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40; 261*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF; 262*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF; 263*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0; 264*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF; 265*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0; 266*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF; 267*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0; 268*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF; 269*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0; 270*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF; 271*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF; 272*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF; 273*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF; 274*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF; 275*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF; 276*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF; 277*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF; 278*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF; 279*9e58f52dSBen Widawsky break; 280*9e58f52dSBen Widawsky case GPF_PORT_DVSEC: 281*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F; 282*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F; 283*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F; 284*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F; 285*9e58f52dSBen Widawsky break; 286*9e58f52dSBen Widawsky case GPF_DEVICE_DVSEC: 287*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F; 288*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F; 289*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF; 290*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF; 291*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF; 292*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF; 293*9e58f52dSBen Widawsky break; 294*9e58f52dSBen Widawsky case PCIE_FLEXBUS_PORT_DVSEC: 295*9e58f52dSBen Widawsky switch (cxl_dev_type) { 296*9e58f52dSBen Widawsky case CXL2_ROOT_PORT: 297*9e58f52dSBen Widawsky /* No MLD */ 298*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd; 299*9e58f52dSBen Widawsky break; 300*9e58f52dSBen Widawsky case CXL2_DOWNSTREAM_PORT: 301*9e58f52dSBen Widawsky wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd; 302*9e58f52dSBen Widawsky break; 303*9e58f52dSBen Widawsky default: /* Registers are RO for other component types */ 304*9e58f52dSBen Widawsky break; 305*9e58f52dSBen Widawsky } 306*9e58f52dSBen Widawsky /* There are rw1cs bits in the status register but never set currently */ 307*9e58f52dSBen Widawsky break; 308*9e58f52dSBen Widawsky } 309*9e58f52dSBen Widawsky 310*9e58f52dSBen Widawsky /* Update state for future DVSEC additions */ 311*9e58f52dSBen Widawsky range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length); 312*9e58f52dSBen Widawsky cxl->dvsec_offset += length; 313*9e58f52dSBen Widawsky } 314