xref: /qemu/hw/mem/cxl_type3.c (revision 21596064081e8d0c0153f68714981c7f0e040973)
1 /*
2  * CXL Type 3 (memory expander) device
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  *
9  * SPDX-License-Identifier: GPL-v2-only
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "hw/qdev-properties-system.h"
21 #include "qapi/error.h"
22 #include "qemu/log.h"
23 #include "qemu/module.h"
24 #include "qemu/pmem.h"
25 #include "qemu/range.h"
26 #include "qemu/rcu.h"
27 #include "qemu/guest-random.h"
28 #include "system/hostmem.h"
29 #include "system/numa.h"
30 #include "hw/cxl/cxl.h"
31 #include "hw/pci/msix.h"
32 
33 /* type3 device private */
34 enum CXL_T3_MSIX_VECTOR {
35     CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS = 0,
36     CXL_T3_MSIX_EVENT_START = 2,
37     CXL_T3_MSIX_MBOX = CXL_T3_MSIX_EVENT_START + CXL_EVENT_TYPE_MAX,
38     CXL_T3_MSIX_VECTOR_NR
39 };
40 
41 #define DWORD_BYTE 4
42 #define CXL_CAPACITY_MULTIPLIER   (256 * MiB)
43 
44 /* Default CDAT entries for a memory region */
45 enum {
46     CT3_CDAT_DSMAS,
47     CT3_CDAT_DSLBIS0,
48     CT3_CDAT_DSLBIS1,
49     CT3_CDAT_DSLBIS2,
50     CT3_CDAT_DSLBIS3,
51     CT3_CDAT_DSEMTS,
52     CT3_CDAT_NUM_ENTRIES
53 };
54 
ct3_build_cdat_entries_for_mr(CDATSubHeader ** cdat_table,int dsmad_handle,uint64_t size,bool is_pmem,bool is_dynamic,uint64_t dpa_base)55 static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
56                                           int dsmad_handle, uint64_t size,
57                                           bool is_pmem, bool is_dynamic,
58                                           uint64_t dpa_base)
59 {
60     CDATDsmas *dsmas;
61     CDATDslbis *dslbis0;
62     CDATDslbis *dslbis1;
63     CDATDslbis *dslbis2;
64     CDATDslbis *dslbis3;
65     CDATDsemts *dsemts;
66 
67     dsmas = g_malloc(sizeof(*dsmas));
68     *dsmas = (CDATDsmas) {
69         .header = {
70             .type = CDAT_TYPE_DSMAS,
71             .length = sizeof(*dsmas),
72         },
73         .DSMADhandle = dsmad_handle,
74         .flags = (is_pmem ? CDAT_DSMAS_FLAG_NV : 0) |
75                  (is_dynamic ? CDAT_DSMAS_FLAG_DYNAMIC_CAP : 0),
76         .DPA_base = dpa_base,
77         .DPA_length = size,
78     };
79 
80     /* For now, no memory side cache, plausiblish numbers */
81     dslbis0 = g_malloc(sizeof(*dslbis0));
82     *dslbis0 = (CDATDslbis) {
83         .header = {
84             .type = CDAT_TYPE_DSLBIS,
85             .length = sizeof(*dslbis0),
86         },
87         .handle = dsmad_handle,
88         .flags = HMAT_LB_MEM_MEMORY,
89         .data_type = HMAT_LB_DATA_READ_LATENCY,
90         .entry_base_unit = 10000, /* 10ns base */
91         .entry[0] = 15, /* 150ns */
92     };
93 
94     dslbis1 = g_malloc(sizeof(*dslbis1));
95     *dslbis1 = (CDATDslbis) {
96         .header = {
97             .type = CDAT_TYPE_DSLBIS,
98             .length = sizeof(*dslbis1),
99         },
100         .handle = dsmad_handle,
101         .flags = HMAT_LB_MEM_MEMORY,
102         .data_type = HMAT_LB_DATA_WRITE_LATENCY,
103         .entry_base_unit = 10000,
104         .entry[0] = 25, /* 250ns */
105     };
106 
107     dslbis2 = g_malloc(sizeof(*dslbis2));
108     *dslbis2 = (CDATDslbis) {
109         .header = {
110             .type = CDAT_TYPE_DSLBIS,
111             .length = sizeof(*dslbis2),
112         },
113         .handle = dsmad_handle,
114         .flags = HMAT_LB_MEM_MEMORY,
115         .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
116         .entry_base_unit = 1000, /* GB/s */
117         .entry[0] = 16,
118     };
119 
120     dslbis3 = g_malloc(sizeof(*dslbis3));
121     *dslbis3 = (CDATDslbis) {
122         .header = {
123             .type = CDAT_TYPE_DSLBIS,
124             .length = sizeof(*dslbis3),
125         },
126         .handle = dsmad_handle,
127         .flags = HMAT_LB_MEM_MEMORY,
128         .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
129         .entry_base_unit = 1000, /* GB/s */
130         .entry[0] = 16,
131     };
132 
133     dsemts = g_malloc(sizeof(*dsemts));
134     *dsemts = (CDATDsemts) {
135         .header = {
136             .type = CDAT_TYPE_DSEMTS,
137             .length = sizeof(*dsemts),
138         },
139         .DSMAS_handle = dsmad_handle,
140         /*
141          * NV: Reserved - the non volatile from DSMAS matters
142          * V: EFI_MEMORY_SP
143          */
144         .EFI_memory_type_attr = is_pmem ? 2 : 1,
145         .DPA_offset = 0,
146         .DPA_length = size,
147     };
148 
149     /* Header always at start of structure */
150     cdat_table[CT3_CDAT_DSMAS] = (CDATSubHeader *)dsmas;
151     cdat_table[CT3_CDAT_DSLBIS0] = (CDATSubHeader *)dslbis0;
152     cdat_table[CT3_CDAT_DSLBIS1] = (CDATSubHeader *)dslbis1;
153     cdat_table[CT3_CDAT_DSLBIS2] = (CDATSubHeader *)dslbis2;
154     cdat_table[CT3_CDAT_DSLBIS3] = (CDATSubHeader *)dslbis3;
155     cdat_table[CT3_CDAT_DSEMTS] = (CDATSubHeader *)dsemts;
156 }
157 
ct3_build_cdat_table(CDATSubHeader *** cdat_table,void * priv)158 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
159 {
160     g_autofree CDATSubHeader **table = NULL;
161     CXLType3Dev *ct3d = priv;
162     MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
163     MemoryRegion *dc_mr = NULL;
164     uint64_t vmr_size = 0, pmr_size = 0;
165     int dsmad_handle = 0;
166     int cur_ent = 0;
167     int len = 0;
168 
169     if (!ct3d->hostpmem && !ct3d->hostvmem && !ct3d->dc.num_regions) {
170         return 0;
171     }
172 
173     if (ct3d->hostvmem) {
174         volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
175         if (!volatile_mr) {
176             return -EINVAL;
177         }
178         len += CT3_CDAT_NUM_ENTRIES;
179         vmr_size = memory_region_size(volatile_mr);
180     }
181 
182     if (ct3d->hostpmem) {
183         nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
184         if (!nonvolatile_mr) {
185             return -EINVAL;
186         }
187         len += CT3_CDAT_NUM_ENTRIES;
188         pmr_size = memory_region_size(nonvolatile_mr);
189     }
190 
191     if (ct3d->dc.num_regions) {
192         if (!ct3d->dc.host_dc) {
193             return -EINVAL;
194         }
195         dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
196         if (!dc_mr) {
197             return -EINVAL;
198         }
199         len += CT3_CDAT_NUM_ENTRIES * ct3d->dc.num_regions;
200     }
201 
202     table = g_malloc0(len * sizeof(*table));
203 
204     /* Now fill them in */
205     if (volatile_mr) {
206         ct3_build_cdat_entries_for_mr(table, dsmad_handle++, vmr_size,
207                                       false, false, 0);
208         cur_ent = CT3_CDAT_NUM_ENTRIES;
209     }
210 
211     if (nonvolatile_mr) {
212         uint64_t base = vmr_size;
213         ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
214                                       pmr_size, true, false, base);
215         cur_ent += CT3_CDAT_NUM_ENTRIES;
216     }
217 
218     if (dc_mr) {
219         int i;
220         uint64_t region_base = vmr_size + pmr_size;
221 
222         /*
223          * We assume the dynamic capacity to be volatile for now.
224          * Non-volatile dynamic capacity will be added if needed in the
225          * future.
226          */
227         for (i = 0; i < ct3d->dc.num_regions; i++) {
228             ct3_build_cdat_entries_for_mr(&(table[cur_ent]),
229                                           dsmad_handle++,
230                                           ct3d->dc.regions[i].len,
231                                           false, true, region_base);
232             ct3d->dc.regions[i].dsmadhandle = dsmad_handle - 1;
233 
234             cur_ent += CT3_CDAT_NUM_ENTRIES;
235             region_base += ct3d->dc.regions[i].len;
236         }
237     }
238 
239     assert(len == cur_ent);
240 
241     *cdat_table = g_steal_pointer(&table);
242 
243     return len;
244 }
245 
ct3_free_cdat_table(CDATSubHeader ** cdat_table,int num,void * priv)246 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
247 {
248     int i;
249 
250     for (i = 0; i < num; i++) {
251         g_free(cdat_table[i]);
252     }
253     g_free(cdat_table);
254 }
255 
cxl_doe_cdat_rsp(DOECap * doe_cap)256 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
257 {
258     CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
259     uint16_t ent;
260     void *base;
261     uint32_t len;
262     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
263     CDATRsp rsp;
264 
265     assert(cdat->entry_len);
266 
267     /* Discard if request length mismatched */
268     if (pcie_doe_get_obj_len(req) <
269         DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
270         return false;
271     }
272 
273     ent = req->entry_handle;
274     base = cdat->entry[ent].base;
275     len = cdat->entry[ent].length;
276 
277     rsp = (CDATRsp) {
278         .header = {
279             .vendor_id = CXL_VENDOR_ID,
280             .data_obj_type = CXL_DOE_TABLE_ACCESS,
281             .reserved = 0x0,
282             .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
283         },
284         .rsp_code = CXL_DOE_TAB_RSP,
285         .table_type = CXL_DOE_TAB_TYPE_CDAT,
286         .entry_handle = (ent < cdat->entry_len - 1) ?
287                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
288     };
289 
290     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
291     memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
292            base, len);
293 
294     doe_cap->read_mbox_len += rsp.header.length;
295 
296     return true;
297 }
298 
ct3d_config_read(PCIDevice * pci_dev,uint32_t addr,int size)299 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
300 {
301     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
302     uint32_t val;
303 
304     if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
305         return val;
306     }
307 
308     return pci_default_read_config(pci_dev, addr, size);
309 }
310 
ct3d_config_write(PCIDevice * pci_dev,uint32_t addr,uint32_t val,int size)311 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
312                               int size)
313 {
314     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
315 
316     pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
317     pci_default_write_config(pci_dev, addr, val, size);
318     pcie_aer_write_config(pci_dev, addr, val, size);
319 }
320 
321 /*
322  * Null value of all Fs suggested by IEEE RA guidelines for use of
323  * EU, OUI and CID
324  */
325 #define UI64_NULL ~(0ULL)
326 
build_dvsecs(CXLType3Dev * ct3d)327 static void build_dvsecs(CXLType3Dev *ct3d)
328 {
329     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
330     uint8_t *dvsec;
331     uint32_t range1_size_hi, range1_size_lo,
332              range1_base_hi = 0, range1_base_lo = 0,
333              range2_size_hi = 0, range2_size_lo = 0,
334              range2_base_hi = 0, range2_base_lo = 0;
335 
336     /*
337      * Volatile memory is mapped as (0x0)
338      * Persistent memory is mapped at (volatile->size)
339      */
340     if (ct3d->hostvmem) {
341         range1_size_hi = ct3d->hostvmem->size >> 32;
342         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
343                          (ct3d->hostvmem->size & 0xF0000000);
344         if (ct3d->hostpmem) {
345             range2_size_hi = ct3d->hostpmem->size >> 32;
346             range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
347                              (ct3d->hostpmem->size & 0xF0000000);
348         }
349     } else if (ct3d->hostpmem) {
350         range1_size_hi = ct3d->hostpmem->size >> 32;
351         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
352                          (ct3d->hostpmem->size & 0xF0000000);
353     } else {
354         /*
355          * For DCD with no static memory, set memory active, memory class bits.
356          * No range is set.
357          */
358         range1_size_hi = 0;
359         range1_size_lo = (2 << 5) | (2 << 2) | 0x3;
360     }
361 
362     dvsec = (uint8_t *)&(CXLDVSECDevice){
363         .cap = 0x1e,
364         .ctrl = 0x2,
365         .status2 = 0x2,
366         .range1_size_hi = range1_size_hi,
367         .range1_size_lo = range1_size_lo,
368         .range1_base_hi = range1_base_hi,
369         .range1_base_lo = range1_base_lo,
370         .range2_size_hi = range2_size_hi,
371         .range2_size_lo = range2_size_lo,
372         .range2_base_hi = range2_base_hi,
373         .range2_base_lo = range2_base_lo,
374     };
375     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
376                                PCIE_CXL_DEVICE_DVSEC_LENGTH,
377                                PCIE_CXL_DEVICE_DVSEC,
378                                PCIE_CXL31_DEVICE_DVSEC_REVID, dvsec);
379 
380     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
381         .rsvd         = 0,
382         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
383         .reg0_base_hi = 0,
384         .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
385         .reg1_base_hi = 0,
386     };
387     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
388                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
389                                REG_LOC_DVSEC_REVID, dvsec);
390     dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
391         .phase2_duration = 0x603, /* 3 seconds */
392         .phase2_power = 0x33, /* 0x33 miliwatts */
393     };
394     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
395                                GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
396                                GPF_DEVICE_DVSEC_REVID, dvsec);
397 
398     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
399         .cap                     = 0x26, /* 68B, IO, Mem, non-MLD */
400         .ctrl                    = 0x02, /* IO always enabled */
401         .status                  = 0x26, /* same as capabilities */
402         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
403     };
404     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
405                                PCIE_CXL3_FLEXBUS_PORT_DVSEC_LENGTH,
406                                PCIE_FLEXBUS_PORT_DVSEC,
407                                PCIE_CXL3_FLEXBUS_PORT_DVSEC_REVID, dvsec);
408 }
409 
hdm_decoder_commit(CXLType3Dev * ct3d,int which)410 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
411 {
412     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
413     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
414     uint32_t *cache_mem = cregs->cache_mem_registers;
415     uint32_t ctrl;
416 
417     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
418     /* TODO: Sanity checks that the decoder is possible */
419     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
420     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
421 
422     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
423 }
424 
hdm_decoder_uncommit(CXLType3Dev * ct3d,int which)425 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
426 {
427     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
428     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
429     uint32_t *cache_mem = cregs->cache_mem_registers;
430     uint32_t ctrl;
431 
432     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
433 
434     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
435     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
436 
437     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
438 }
439 
ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)440 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
441 {
442     switch (qmp_err) {
443     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
444         return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
445     case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
446         return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
447     case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
448         return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
449     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
450         return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
451     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
452         return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
453     case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
454         return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
455     case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
456         return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
457     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
458         return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
459     case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
460         return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
461     case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
462         return CXL_RAS_UNC_ERR_RSVD_ENCODING;
463     case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
464         return CXL_RAS_UNC_ERR_POISON_RECEIVED;
465     case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
466         return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
467     case CXL_UNCOR_ERROR_TYPE_INTERNAL:
468         return CXL_RAS_UNC_ERR_INTERNAL;
469     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
470         return CXL_RAS_UNC_ERR_CXL_IDE_TX;
471     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
472         return CXL_RAS_UNC_ERR_CXL_IDE_RX;
473     default:
474         return -EINVAL;
475     }
476 }
477 
ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)478 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
479 {
480     switch (qmp_err) {
481     case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
482         return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
483     case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
484         return CXL_RAS_COR_ERR_MEM_DATA_ECC;
485     case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
486         return CXL_RAS_COR_ERR_CRC_THRESHOLD;
487     case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
488         return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
489     case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
490         return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
491     case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
492         return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
493     case CXL_COR_ERROR_TYPE_PHYSICAL:
494         return CXL_RAS_COR_ERR_PHYSICAL;
495     default:
496         return -EINVAL;
497     }
498 }
499 
ct3d_reg_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)500 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
501                            unsigned size)
502 {
503     CXLComponentState *cxl_cstate = opaque;
504     ComponentRegisters *cregs = &cxl_cstate->crb;
505     CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
506     uint32_t *cache_mem = cregs->cache_mem_registers;
507     bool should_commit = false;
508     bool should_uncommit = false;
509     int which_hdm = -1;
510 
511     assert(size == 4);
512     g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
513 
514     switch (offset) {
515     case A_CXL_HDM_DECODER0_CTRL:
516         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
517         should_uncommit = !should_commit;
518         which_hdm = 0;
519         break;
520     case A_CXL_HDM_DECODER1_CTRL:
521         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
522         should_uncommit = !should_commit;
523         which_hdm = 1;
524         break;
525     case A_CXL_HDM_DECODER2_CTRL:
526         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
527         should_uncommit = !should_commit;
528         which_hdm = 2;
529         break;
530     case A_CXL_HDM_DECODER3_CTRL:
531         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
532         should_uncommit = !should_commit;
533         which_hdm = 3;
534         break;
535     case A_CXL_RAS_UNC_ERR_STATUS:
536     {
537         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
538         uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL,
539                                  FIRST_ERROR_POINTER);
540         CXLError *cxl_err;
541         uint32_t unc_err;
542 
543         /*
544          * If single bit written that corresponds to the first error
545          * pointer being cleared, update the status and header log.
546          */
547         if (!QTAILQ_EMPTY(&ct3d->error_list)) {
548             if ((1 << fe) ^ value) {
549                 CXLError *cxl_next;
550                 /*
551                  * Software is using wrong flow for multiple header recording
552                  * Following behavior in PCIe r6.0 and assuming multiple
553                  * header support. Implementation defined choice to clear all
554                  * matching records if more than one bit set - which corresponds
555                  * closest to behavior of hardware not capable of multiple
556                  * header recording.
557                  */
558                 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node,
559                                     cxl_next) {
560                     if ((1 << cxl_err->type) & value) {
561                         QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
562                         g_free(cxl_err);
563                     }
564                 }
565             } else {
566                 /* Done with previous FE, so drop from list */
567                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
568                 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
569                 g_free(cxl_err);
570             }
571 
572             /*
573              * If there is another FE, then put that in place and update
574              * the header log
575              */
576             if (!QTAILQ_EMPTY(&ct3d->error_list)) {
577                 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
578                 int i;
579 
580                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
581                 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
582                     stl_le_p(header_log + i, cxl_err->header[i]);
583                 }
584                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
585                                      FIRST_ERROR_POINTER, cxl_err->type);
586             } else {
587                 /*
588                  * If no more errors, then follow recommendation of PCI spec
589                  * r6.0 6.2.4.2 to set the first error pointer to a status
590                  * bit that will never be used.
591                  */
592                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
593                                      FIRST_ERROR_POINTER,
594                                      CXL_RAS_UNC_ERR_CXL_UNUSED);
595             }
596             stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
597         }
598         unc_err = 0;
599         QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
600             unc_err |= 1 << cxl_err->type;
601         }
602         stl_le_p((uint8_t *)cache_mem + offset, unc_err);
603 
604         return;
605     }
606     case A_CXL_RAS_COR_ERR_STATUS:
607     {
608         uint32_t rw1c = value;
609         uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
610         temp &= ~rw1c;
611         stl_le_p((uint8_t *)cache_mem + offset, temp);
612         return;
613     }
614     default:
615         break;
616     }
617 
618     stl_le_p((uint8_t *)cache_mem + offset, value);
619     if (should_commit) {
620         hdm_decoder_commit(ct3d, which_hdm);
621     } else if (should_uncommit) {
622         hdm_decoder_uncommit(ct3d, which_hdm);
623     }
624 }
625 
626 /*
627  * TODO: dc region configuration will be updated once host backend and address
628  * space support is added for DCD.
629  */
cxl_create_dc_regions(CXLType3Dev * ct3d,Error ** errp)630 static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
631 {
632     int i;
633     uint64_t region_base = 0;
634     uint64_t region_len;
635     uint64_t decode_len;
636     uint64_t blk_size = 2 * MiB;
637     CXLDCRegion *region;
638     MemoryRegion *mr;
639     uint64_t dc_size;
640 
641     mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
642     dc_size = memory_region_size(mr);
643     region_len = DIV_ROUND_UP(dc_size, ct3d->dc.num_regions);
644 
645     if (dc_size % (ct3d->dc.num_regions * CXL_CAPACITY_MULTIPLIER) != 0) {
646         error_setg(errp,
647                    "backend size is not multiple of region len: 0x%" PRIx64,
648                    region_len);
649         return false;
650     }
651     if (region_len % CXL_CAPACITY_MULTIPLIER != 0) {
652         error_setg(errp, "DC region size is unaligned to 0x%" PRIx64,
653                    CXL_CAPACITY_MULTIPLIER);
654         return false;
655     }
656     decode_len = region_len;
657 
658     if (ct3d->hostvmem) {
659         mr = host_memory_backend_get_memory(ct3d->hostvmem);
660         region_base += memory_region_size(mr);
661     }
662     if (ct3d->hostpmem) {
663         mr = host_memory_backend_get_memory(ct3d->hostpmem);
664         region_base += memory_region_size(mr);
665     }
666     if (region_base % CXL_CAPACITY_MULTIPLIER != 0) {
667         error_setg(errp, "DC region base not aligned to 0x%" PRIx64,
668                    CXL_CAPACITY_MULTIPLIER);
669         return false;
670     }
671 
672     for (i = 0, region = &ct3d->dc.regions[0];
673          i < ct3d->dc.num_regions;
674          i++, region++, region_base += region_len) {
675         *region = (CXLDCRegion) {
676             .base = region_base,
677             .decode_len = decode_len,
678             .len = region_len,
679             .block_size = blk_size,
680             /* dsmad_handle set when creating CDAT table entries */
681             .flags = 0,
682         };
683         ct3d->dc.total_capacity += region->len;
684         region->blk_bitmap = bitmap_new(region->len / region->block_size);
685     }
686     QTAILQ_INIT(&ct3d->dc.extents);
687     QTAILQ_INIT(&ct3d->dc.extents_pending);
688 
689     return true;
690 }
691 
cxl_destroy_dc_regions(CXLType3Dev * ct3d)692 static void cxl_destroy_dc_regions(CXLType3Dev *ct3d)
693 {
694     CXLDCExtent *ent, *ent_next;
695     CXLDCExtentGroup *group, *group_next;
696     int i;
697     CXLDCRegion *region;
698 
699     QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
700         cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
701     }
702 
703     QTAILQ_FOREACH_SAFE(group, &ct3d->dc.extents_pending, node, group_next) {
704         QTAILQ_REMOVE(&ct3d->dc.extents_pending, group, node);
705         QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
706             cxl_remove_extent_from_extent_list(&group->list, ent);
707         }
708         g_free(group);
709     }
710 
711     for (i = 0; i < ct3d->dc.num_regions; i++) {
712         region = &ct3d->dc.regions[i];
713         g_free(region->blk_bitmap);
714     }
715 }
716 
cxl_setup_memory(CXLType3Dev * ct3d,Error ** errp)717 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
718 {
719     DeviceState *ds = DEVICE(ct3d);
720 
721     if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem
722         && !ct3d->dc.num_regions) {
723         error_setg(errp, "at least one memdev property must be set");
724         return false;
725     } else if (ct3d->hostmem && ct3d->hostpmem) {
726         error_setg(errp, "[memdev] cannot be used with new "
727                          "[persistent-memdev] property");
728         return false;
729     } else if (ct3d->hostmem) {
730         /* Use of hostmem property implies pmem */
731         ct3d->hostpmem = ct3d->hostmem;
732         ct3d->hostmem = NULL;
733     }
734 
735     if (ct3d->hostpmem && !ct3d->lsa) {
736         error_setg(errp, "lsa property must be set for persistent devices");
737         return false;
738     }
739 
740     if (ct3d->hostvmem) {
741         MemoryRegion *vmr;
742         char *v_name;
743 
744         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
745         if (!vmr) {
746             error_setg(errp, "volatile memdev must have backing device");
747             return false;
748         }
749         if (host_memory_backend_is_mapped(ct3d->hostvmem)) {
750             error_setg(errp, "memory backend %s can't be used multiple times.",
751                object_get_canonical_path_component(OBJECT(ct3d->hostvmem)));
752             return false;
753         }
754         memory_region_set_nonvolatile(vmr, false);
755         memory_region_set_enabled(vmr, true);
756         host_memory_backend_set_mapped(ct3d->hostvmem, true);
757         if (ds->id) {
758             v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
759         } else {
760             v_name = g_strdup("cxl-type3-dpa-vmem-space");
761         }
762         address_space_init(&ct3d->hostvmem_as, vmr, v_name);
763         ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
764         ct3d->cxl_dstate.static_mem_size += memory_region_size(vmr);
765         g_free(v_name);
766     }
767 
768     if (ct3d->hostpmem) {
769         MemoryRegion *pmr;
770         char *p_name;
771 
772         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
773         if (!pmr) {
774             error_setg(errp, "persistent memdev must have backing device");
775             return false;
776         }
777         if (host_memory_backend_is_mapped(ct3d->hostpmem)) {
778             error_setg(errp, "memory backend %s can't be used multiple times.",
779                object_get_canonical_path_component(OBJECT(ct3d->hostpmem)));
780             return false;
781         }
782         memory_region_set_nonvolatile(pmr, true);
783         memory_region_set_enabled(pmr, true);
784         host_memory_backend_set_mapped(ct3d->hostpmem, true);
785         if (ds->id) {
786             p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
787         } else {
788             p_name = g_strdup("cxl-type3-dpa-pmem-space");
789         }
790         address_space_init(&ct3d->hostpmem_as, pmr, p_name);
791         ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
792         ct3d->cxl_dstate.static_mem_size += memory_region_size(pmr);
793         g_free(p_name);
794     }
795 
796     ct3d->dc.total_capacity = 0;
797     if (ct3d->dc.num_regions > 0) {
798         MemoryRegion *dc_mr;
799         char *dc_name;
800 
801         if (!ct3d->dc.host_dc) {
802             error_setg(errp, "dynamic capacity must have a backing device");
803             return false;
804         }
805 
806         dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
807         if (!dc_mr) {
808             error_setg(errp, "dynamic capacity must have a backing device");
809             return false;
810         }
811 
812         if (host_memory_backend_is_mapped(ct3d->dc.host_dc)) {
813             error_setg(errp, "memory backend %s can't be used multiple times.",
814                object_get_canonical_path_component(OBJECT(ct3d->dc.host_dc)));
815             return false;
816         }
817         /*
818          * Set DC regions as volatile for now, non-volatile support can
819          * be added in the future if needed.
820          */
821         memory_region_set_nonvolatile(dc_mr, false);
822         memory_region_set_enabled(dc_mr, true);
823         host_memory_backend_set_mapped(ct3d->dc.host_dc, true);
824         if (ds->id) {
825             dc_name = g_strdup_printf("cxl-dcd-dpa-dc-space:%s", ds->id);
826         } else {
827             dc_name = g_strdup("cxl-dcd-dpa-dc-space");
828         }
829         address_space_init(&ct3d->dc.host_dc_as, dc_mr, dc_name);
830         g_free(dc_name);
831 
832         if (!cxl_create_dc_regions(ct3d, errp)) {
833             error_append_hint(errp, "setup DC regions failed");
834             return false;
835         }
836     }
837 
838     return true;
839 }
840 
841 static DOEProtocol doe_cdat_prot[] = {
842     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
843     { }
844 };
845 
846 /* Initialize CXL device alerts with default threshold values. */
init_alert_config(CXLType3Dev * ct3d)847 static void init_alert_config(CXLType3Dev *ct3d)
848 {
849     ct3d->alert_config = (CXLAlertConfig) {
850         .life_used_crit_alert_thresh = 75,
851         .life_used_warn_thresh = 40,
852         .over_temp_crit_alert_thresh = 35,
853         .under_temp_crit_alert_thresh = 10,
854         .over_temp_warn_thresh = 25,
855         .under_temp_warn_thresh = 20
856     };
857 }
858 
ct3_realize(PCIDevice * pci_dev,Error ** errp)859 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
860 {
861     ERRP_GUARD();
862     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
863     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
864     ComponentRegisters *regs = &cxl_cstate->crb;
865     MemoryRegion *mr = &regs->component_registers;
866     uint8_t *pci_conf = pci_dev->config;
867     int i, rc;
868     uint16_t count;
869 
870     QTAILQ_INIT(&ct3d->error_list);
871 
872     if (!cxl_setup_memory(ct3d, errp)) {
873         return;
874     }
875 
876     pci_config_set_prog_interface(pci_conf, 0x10);
877 
878     pcie_endpoint_cap_init(pci_dev, 0x80);
879     if (ct3d->sn != UI64_NULL) {
880         pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
881         cxl_cstate->dvsec_offset = 0x100 + 0x0c;
882     } else {
883         cxl_cstate->dvsec_offset = 0x100;
884     }
885 
886     ct3d->cxl_cstate.pdev = pci_dev;
887     build_dvsecs(ct3d);
888 
889     regs->special_ops = g_new0(MemoryRegionOps, 1);
890     regs->special_ops->write = ct3d_reg_write;
891 
892     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
893                                       TYPE_CXL_TYPE3);
894 
895     pci_register_bar(
896         pci_dev, CXL_COMPONENT_REG_BAR_IDX,
897         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
898 
899     cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate,
900                                    &ct3d->cci);
901     pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
902                      PCI_BASE_ADDRESS_SPACE_MEMORY |
903                          PCI_BASE_ADDRESS_MEM_TYPE_64,
904                      &ct3d->cxl_dstate.device_registers);
905 
906     /* MSI(-X) Initialization */
907     rc = msix_init_exclusive_bar(pci_dev, CXL_T3_MSIX_VECTOR_NR, 4, errp);
908     if (rc) {
909         goto err_free_special_ops;
910     }
911     for (i = 0; i < CXL_T3_MSIX_VECTOR_NR; i++) {
912         msix_vector_use(pci_dev, i);
913     }
914 
915     /* DOE Initialization */
916     pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true,
917                   CXL_T3_MSIX_PCIE_DOE_TABLE_ACCESS);
918 
919     cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
920     cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
921     cxl_cstate->cdat.private = ct3d;
922     if (!cxl_doe_cdat_init(cxl_cstate, errp)) {
923         goto err_msix_uninit;
924     }
925 
926     init_alert_config(ct3d);
927     pcie_cap_deverr_init(pci_dev);
928     /* Leave a bit of room for expansion */
929     rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, errp);
930     if (rc) {
931         goto err_release_cdat;
932     }
933     cxl_event_init(&ct3d->cxl_dstate, CXL_T3_MSIX_EVENT_START);
934 
935     /* Set default value for patrol scrub attributes */
936     ct3d->patrol_scrub_attrs.scrub_cycle_cap =
937                            CXL_MEMDEV_PS_SCRUB_CYCLE_CHANGE_CAP_DEFAULT |
938                            CXL_MEMDEV_PS_SCRUB_REALTIME_REPORT_CAP_DEFAULT;
939     ct3d->patrol_scrub_attrs.scrub_cycle =
940                            CXL_MEMDEV_PS_CUR_SCRUB_CYCLE_DEFAULT |
941                            (CXL_MEMDEV_PS_MIN_SCRUB_CYCLE_DEFAULT << 8);
942     ct3d->patrol_scrub_attrs.scrub_flags = CXL_MEMDEV_PS_ENABLE_DEFAULT;
943 
944     /* Set default value for DDR5 ECS read attributes */
945     ct3d->ecs_attrs.ecs_log_cap = CXL_ECS_LOG_ENTRY_TYPE_DEFAULT;
946     for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
947         ct3d->ecs_attrs.fru_attrs[count].ecs_cap =
948                             CXL_ECS_REALTIME_REPORT_CAP_DEFAULT;
949         ct3d->ecs_attrs.fru_attrs[count].ecs_config =
950                             CXL_ECS_THRESHOLD_COUNT_DEFAULT |
951                             (CXL_ECS_MODE_DEFAULT << 3);
952         /* Reserved */
953         ct3d->ecs_attrs.fru_attrs[count].ecs_flags = 0;
954     }
955 
956     return;
957 
958 err_release_cdat:
959     cxl_doe_cdat_release(cxl_cstate);
960 err_msix_uninit:
961     msix_uninit_exclusive_bar(pci_dev);
962 err_free_special_ops:
963     g_free(regs->special_ops);
964     if (ct3d->dc.host_dc) {
965         cxl_destroy_dc_regions(ct3d);
966         address_space_destroy(&ct3d->dc.host_dc_as);
967     }
968     if (ct3d->hostpmem) {
969         address_space_destroy(&ct3d->hostpmem_as);
970     }
971     if (ct3d->hostvmem) {
972         address_space_destroy(&ct3d->hostvmem_as);
973     }
974 }
975 
ct3_exit(PCIDevice * pci_dev)976 static void ct3_exit(PCIDevice *pci_dev)
977 {
978     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
979     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
980     ComponentRegisters *regs = &cxl_cstate->crb;
981 
982     pcie_aer_exit(pci_dev);
983     cxl_doe_cdat_release(cxl_cstate);
984     msix_uninit_exclusive_bar(pci_dev);
985     g_free(regs->special_ops);
986     cxl_destroy_cci(&ct3d->cci);
987     if (ct3d->dc.host_dc) {
988         cxl_destroy_dc_regions(ct3d);
989         address_space_destroy(&ct3d->dc.host_dc_as);
990     }
991     if (ct3d->hostpmem) {
992         address_space_destroy(&ct3d->hostpmem_as);
993     }
994     if (ct3d->hostvmem) {
995         address_space_destroy(&ct3d->hostvmem_as);
996     }
997 }
998 
999 /*
1000  * Mark the DPA range [dpa, dap + len - 1] to be backed and accessible. This
1001  * happens when a DC extent is added and accepted by the host.
1002  */
ct3_set_region_block_backed(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)1003 void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
1004                                  uint64_t len)
1005 {
1006     CXLDCRegion *region;
1007 
1008     region = cxl_find_dc_region(ct3d, dpa, len);
1009     if (!region) {
1010         return;
1011     }
1012 
1013     bitmap_set(region->blk_bitmap, (dpa - region->base) / region->block_size,
1014                len / region->block_size);
1015 }
1016 
1017 /*
1018  * Check whether the DPA range [dpa, dpa + len - 1] is backed with DC extents.
1019  * Used when validating read/write to dc regions
1020  */
ct3_test_region_block_backed(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)1021 bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
1022                                   uint64_t len)
1023 {
1024     CXLDCRegion *region;
1025     uint64_t nbits;
1026     long nr;
1027 
1028     region = cxl_find_dc_region(ct3d, dpa, len);
1029     if (!region) {
1030         return false;
1031     }
1032 
1033     nr = (dpa - region->base) / region->block_size;
1034     nbits = DIV_ROUND_UP(len, region->block_size);
1035     /*
1036      * if bits between [dpa, dpa + len) are all 1s, meaning the DPA range is
1037      * backed with DC extents, return true; else return false.
1038      */
1039     return find_next_zero_bit(region->blk_bitmap, nr + nbits, nr) == nr + nbits;
1040 }
1041 
1042 /*
1043  * Mark the DPA range [dpa, dap + len - 1] to be unbacked and inaccessible.
1044  * This happens when a dc extent is released by the host.
1045  */
ct3_clear_region_block_backed(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)1046 void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
1047                                    uint64_t len)
1048 {
1049     CXLDCRegion *region;
1050     uint64_t nbits;
1051     long nr;
1052 
1053     region = cxl_find_dc_region(ct3d, dpa, len);
1054     if (!region) {
1055         return;
1056     }
1057 
1058     nr = (dpa - region->base) / region->block_size;
1059     nbits = len / region->block_size;
1060     bitmap_clear(region->blk_bitmap, nr, nbits);
1061 }
1062 
cxl_type3_dpa(CXLType3Dev * ct3d,hwaddr host_addr,uint64_t * dpa)1063 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
1064 {
1065     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
1066     uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1067     unsigned int hdm_count;
1068     uint32_t cap;
1069     uint64_t dpa_base = 0;
1070     int i;
1071 
1072     cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY);
1073     hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap,
1074                                                  CXL_HDM_DECODER_CAPABILITY,
1075                                                  DECODER_COUNT));
1076 
1077     for (i = 0; i < hdm_count; i++) {
1078         uint64_t decoder_base, decoder_size, hpa_offset, skip;
1079         uint32_t hdm_ctrl, low, high;
1080         int ig, iw;
1081 
1082         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc);
1083         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc);
1084         decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000);
1085 
1086         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc);
1087         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc);
1088         decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000);
1089 
1090         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO +
1091                        i * hdm_inc);
1092         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI +
1093                         i * hdm_inc);
1094         skip = ((uint64_t)high << 32) | (low & 0xf0000000);
1095         dpa_base += skip;
1096 
1097         hpa_offset = (uint64_t)host_addr - decoder_base;
1098 
1099         hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc);
1100         iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW);
1101         ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG);
1102         if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) {
1103             return false;
1104         }
1105         if (((uint64_t)host_addr < decoder_base) ||
1106             (hpa_offset >= decoder_size)) {
1107             int decoded_iw = cxl_interleave_ways_dec(iw, &error_fatal);
1108 
1109             if (decoded_iw == 0) {
1110                 return false;
1111             }
1112 
1113             dpa_base += decoder_size / decoded_iw;
1114             continue;
1115         }
1116 
1117         if (iw < 8) {
1118             *dpa = dpa_base +
1119                 ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
1120                  ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
1121                   >> iw));
1122         } else {
1123             *dpa = dpa_base +
1124                 ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
1125                  ((((MAKE_64BIT_MASK(ig + iw, 64 - ig - iw) & hpa_offset)
1126                    >> (ig + iw)) / 3) << (ig + 8)));
1127         }
1128 
1129         return true;
1130     }
1131     return false;
1132 }
1133 
cxl_type3_hpa_to_as_and_dpa(CXLType3Dev * ct3d,hwaddr host_addr,unsigned int size,AddressSpace ** as,uint64_t * dpa_offset)1134 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
1135                                        hwaddr host_addr,
1136                                        unsigned int size,
1137                                        AddressSpace **as,
1138                                        uint64_t *dpa_offset)
1139 {
1140     MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
1141     uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
1142 
1143     if (ct3d->hostvmem) {
1144         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1145         vmr_size = memory_region_size(vmr);
1146     }
1147     if (ct3d->hostpmem) {
1148         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1149         pmr_size = memory_region_size(pmr);
1150     }
1151     if (ct3d->dc.host_dc) {
1152         dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
1153         dc_size = memory_region_size(dc_mr);
1154     }
1155 
1156     if (!vmr && !pmr && !dc_mr) {
1157         return -ENODEV;
1158     }
1159 
1160     if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
1161         return -EINVAL;
1162     }
1163 
1164     if (*dpa_offset >= vmr_size + pmr_size + dc_size) {
1165         return -EINVAL;
1166     }
1167 
1168     if (*dpa_offset < vmr_size) {
1169         *as = &ct3d->hostvmem_as;
1170     } else if (*dpa_offset < vmr_size + pmr_size) {
1171         *as = &ct3d->hostpmem_as;
1172         *dpa_offset -= vmr_size;
1173     } else {
1174         if (!ct3_test_region_block_backed(ct3d, *dpa_offset, size)) {
1175             return -ENODEV;
1176         }
1177 
1178         *as = &ct3d->dc.host_dc_as;
1179         *dpa_offset -= (vmr_size + pmr_size);
1180     }
1181 
1182     return 0;
1183 }
1184 
cxl_type3_read(PCIDevice * d,hwaddr host_addr,uint64_t * data,unsigned size,MemTxAttrs attrs)1185 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
1186                            unsigned size, MemTxAttrs attrs)
1187 {
1188     CXLType3Dev *ct3d = CXL_TYPE3(d);
1189     uint64_t dpa_offset = 0;
1190     AddressSpace *as = NULL;
1191     int res;
1192 
1193     res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
1194                                       &as, &dpa_offset);
1195     if (res) {
1196         return MEMTX_ERROR;
1197     }
1198 
1199     if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
1200         qemu_guest_getrandom_nofail(data, size);
1201         return MEMTX_OK;
1202     }
1203 
1204     return address_space_read(as, dpa_offset, attrs, data, size);
1205 }
1206 
cxl_type3_write(PCIDevice * d,hwaddr host_addr,uint64_t data,unsigned size,MemTxAttrs attrs)1207 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
1208                             unsigned size, MemTxAttrs attrs)
1209 {
1210     CXLType3Dev *ct3d = CXL_TYPE3(d);
1211     uint64_t dpa_offset = 0;
1212     AddressSpace *as = NULL;
1213     int res;
1214 
1215     res = cxl_type3_hpa_to_as_and_dpa(ct3d, host_addr, size,
1216                                       &as, &dpa_offset);
1217     if (res) {
1218         return MEMTX_ERROR;
1219     }
1220 
1221     if (cxl_dev_media_disabled(&ct3d->cxl_dstate)) {
1222         return MEMTX_OK;
1223     }
1224 
1225     return address_space_write(as, dpa_offset, attrs, &data, size);
1226 }
1227 
ct3d_reset(DeviceState * dev)1228 static void ct3d_reset(DeviceState *dev)
1229 {
1230     CXLType3Dev *ct3d = CXL_TYPE3(dev);
1231     uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1232     uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
1233 
1234     pcie_cap_fill_link_ep_usp(PCI_DEVICE(dev), ct3d->width, ct3d->speed);
1235     cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
1236     cxl_device_register_init_t3(ct3d, CXL_T3_MSIX_MBOX);
1237 
1238     /*
1239      * Bring up an endpoint to target with MCTP over VDM.
1240      * This device is emulating an MLD with single LD for now.
1241      */
1242     if (ct3d->vdm_fm_owned_ld_mctp_cci.initialized) {
1243         cxl_destroy_cci(&ct3d->vdm_fm_owned_ld_mctp_cci);
1244     }
1245     cxl_initialize_t3_fm_owned_ld_mctpcci(&ct3d->vdm_fm_owned_ld_mctp_cci,
1246                                           DEVICE(ct3d), DEVICE(ct3d),
1247                                           512); /* Max payload made up */
1248     if (ct3d->ld0_cci.initialized) {
1249         cxl_destroy_cci(&ct3d->ld0_cci);
1250     }
1251     cxl_initialize_t3_ld_cci(&ct3d->ld0_cci, DEVICE(ct3d), DEVICE(ct3d),
1252                              512); /* Max payload made up */
1253 }
1254 
1255 static const Property ct3_props[] = {
1256     DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
1257                      HostMemoryBackend *), /* for backward compatibility */
1258     DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
1259                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
1260     DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
1261                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
1262     DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
1263                      HostMemoryBackend *),
1264     DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
1265     DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
1266     DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0),
1267     DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc,
1268                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
1269     DEFINE_PROP_PCIE_LINK_SPEED("x-speed", CXLType3Dev,
1270                                 speed, PCIE_LINK_SPEED_32),
1271     DEFINE_PROP_PCIE_LINK_WIDTH("x-width", CXLType3Dev,
1272                                 width, PCIE_LINK_WIDTH_16),
1273 };
1274 
get_lsa_size(CXLType3Dev * ct3d)1275 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
1276 {
1277     MemoryRegion *mr;
1278 
1279     if (!ct3d->lsa) {
1280         return 0;
1281     }
1282 
1283     mr = host_memory_backend_get_memory(ct3d->lsa);
1284     return memory_region_size(mr);
1285 }
1286 
validate_lsa_access(MemoryRegion * mr,uint64_t size,uint64_t offset)1287 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
1288                                 uint64_t offset)
1289 {
1290     assert(offset + size <= memory_region_size(mr));
1291     assert(offset + size > offset);
1292 }
1293 
get_lsa(CXLType3Dev * ct3d,void * buf,uint64_t size,uint64_t offset)1294 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
1295                     uint64_t offset)
1296 {
1297     MemoryRegion *mr;
1298     void *lsa;
1299 
1300     if (!ct3d->lsa) {
1301         return 0;
1302     }
1303 
1304     mr = host_memory_backend_get_memory(ct3d->lsa);
1305     validate_lsa_access(mr, size, offset);
1306 
1307     lsa = memory_region_get_ram_ptr(mr) + offset;
1308     memcpy(buf, lsa, size);
1309 
1310     return size;
1311 }
1312 
set_lsa(CXLType3Dev * ct3d,const void * buf,uint64_t size,uint64_t offset)1313 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
1314                     uint64_t offset)
1315 {
1316     MemoryRegion *mr;
1317     void *lsa;
1318 
1319     if (!ct3d->lsa) {
1320         return;
1321     }
1322 
1323     mr = host_memory_backend_get_memory(ct3d->lsa);
1324     validate_lsa_access(mr, size, offset);
1325 
1326     lsa = memory_region_get_ram_ptr(mr) + offset;
1327     memcpy(lsa, buf, size);
1328     memory_region_set_dirty(mr, offset, size);
1329 
1330     /*
1331      * Just like the PMEM, if the guest is not allowed to exit gracefully, label
1332      * updates will get lost.
1333      */
1334 }
1335 
set_cacheline(CXLType3Dev * ct3d,uint64_t dpa_offset,uint8_t * data)1336 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
1337 {
1338     MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
1339     AddressSpace *as;
1340     uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
1341 
1342     if (ct3d->hostvmem) {
1343         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1344         vmr_size = memory_region_size(vmr);
1345     }
1346     if (ct3d->hostpmem) {
1347         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1348         pmr_size = memory_region_size(pmr);
1349     }
1350     if (ct3d->dc.host_dc) {
1351         dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
1352         dc_size = memory_region_size(dc_mr);
1353      }
1354 
1355     if (!vmr && !pmr && !dc_mr) {
1356         return false;
1357     }
1358 
1359     if (dpa_offset + CXL_CACHE_LINE_SIZE > vmr_size + pmr_size + dc_size) {
1360         return false;
1361     }
1362 
1363     if (dpa_offset < vmr_size) {
1364         as = &ct3d->hostvmem_as;
1365     } else if (dpa_offset < vmr_size + pmr_size) {
1366         as = &ct3d->hostpmem_as;
1367         dpa_offset -= vmr_size;
1368     } else {
1369         as = &ct3d->dc.host_dc_as;
1370         dpa_offset -= (vmr_size + pmr_size);
1371     }
1372 
1373     address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, data,
1374                         CXL_CACHE_LINE_SIZE);
1375     return true;
1376 }
1377 
cxl_set_poison_list_overflowed(CXLType3Dev * ct3d)1378 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
1379 {
1380         ct3d->poison_list_overflowed = true;
1381         ct3d->poison_list_overflow_ts =
1382             cxl_device_get_timestamp(&ct3d->cxl_dstate);
1383 }
1384 
cxl_clear_poison_list_overflowed(CXLType3Dev * ct3d)1385 void cxl_clear_poison_list_overflowed(CXLType3Dev *ct3d)
1386 {
1387     ct3d->poison_list_overflowed = false;
1388     ct3d->poison_list_overflow_ts = 0;
1389 }
1390 
qmp_cxl_inject_poison(const char * path,uint64_t start,uint64_t length,Error ** errp)1391 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1392                            Error **errp)
1393 {
1394     Object *obj = object_resolve_path(path, NULL);
1395     CXLType3Dev *ct3d;
1396     CXLPoison *p;
1397 
1398     if (length % 64) {
1399         error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1400         return;
1401     }
1402     if (start % 64) {
1403         error_setg(errp, "Poison start address must be 64 byte aligned");
1404         return;
1405     }
1406     if (!obj) {
1407         error_setg(errp, "Unable to resolve path");
1408         return;
1409     }
1410     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1411         error_setg(errp, "Path does not point to a CXL type 3 device");
1412         return;
1413     }
1414 
1415     ct3d = CXL_TYPE3(obj);
1416 
1417     QLIST_FOREACH(p, &ct3d->poison_list, node) {
1418         if ((start < p->start + p->length) && (start + length > p->start)) {
1419             error_setg(errp,
1420                        "Overlap with existing poisoned region not supported");
1421             return;
1422         }
1423     }
1424 
1425     p = g_new0(CXLPoison, 1);
1426     p->length = length;
1427     p->start = start;
1428     /* Different from injected via the mbox */
1429     p->type = CXL_POISON_TYPE_INTERNAL;
1430 
1431     if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
1432         QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1433         ct3d->poison_list_cnt++;
1434     } else {
1435         if (!ct3d->poison_list_overflowed) {
1436             cxl_set_poison_list_overflowed(ct3d);
1437         }
1438         QLIST_INSERT_HEAD(&ct3d->poison_list_bkp, p, node);
1439     }
1440 }
1441 
1442 /* For uncorrectable errors include support for multiple header recording */
qmp_cxl_inject_uncorrectable_errors(const char * path,CXLUncorErrorRecordList * errors,Error ** errp)1443 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1444                                          CXLUncorErrorRecordList *errors,
1445                                          Error **errp)
1446 {
1447     Object *obj = object_resolve_path(path, NULL);
1448     static PCIEAERErr err = {};
1449     CXLType3Dev *ct3d;
1450     CXLError *cxl_err;
1451     uint32_t *reg_state;
1452     uint32_t unc_err;
1453     bool first;
1454 
1455     if (!obj) {
1456         error_setg(errp, "Unable to resolve path");
1457         return;
1458     }
1459 
1460     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1461         error_setg(errp, "Path does not point to a CXL type 3 device");
1462         return;
1463     }
1464 
1465     err.status = PCI_ERR_UNC_INTN;
1466     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1467     err.flags = 0;
1468 
1469     ct3d = CXL_TYPE3(obj);
1470 
1471     first = QTAILQ_EMPTY(&ct3d->error_list);
1472     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1473     while (errors) {
1474         uint32List *header = errors->value->header;
1475         uint8_t header_count = 0;
1476         int cxl_err_code;
1477 
1478         cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1479         if (cxl_err_code < 0) {
1480             error_setg(errp, "Unknown error code");
1481             return;
1482         }
1483 
1484         /* If the error is masked, nothing to do here */
1485         if (!((1 << cxl_err_code) &
1486               ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1487             errors = errors->next;
1488             continue;
1489         }
1490 
1491         cxl_err = g_malloc0(sizeof(*cxl_err));
1492 
1493         cxl_err->type = cxl_err_code;
1494         while (header && header_count < 32) {
1495             cxl_err->header[header_count++] = header->value;
1496             header = header->next;
1497         }
1498         if (header_count > 32) {
1499             error_setg(errp, "Header must be 32 DWORD or less");
1500             return;
1501         }
1502         QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1503 
1504         errors = errors->next;
1505     }
1506 
1507     if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1508         uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1509         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1510         uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1511         int i;
1512 
1513         cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1514         for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1515             stl_le_p(header_log + i, cxl_err->header[i]);
1516         }
1517 
1518         capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1519                              FIRST_ERROR_POINTER, cxl_err->type);
1520         stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1521     }
1522 
1523     unc_err = 0;
1524     QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1525         unc_err |= (1 << cxl_err->type);
1526     }
1527     if (!unc_err) {
1528         return;
1529     }
1530 
1531     stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1532     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1533 }
1534 
qmp_cxl_inject_correctable_error(const char * path,CxlCorErrorType type,Error ** errp)1535 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1536                                       Error **errp)
1537 {
1538     static PCIEAERErr err = {};
1539     Object *obj = object_resolve_path(path, NULL);
1540     CXLType3Dev *ct3d;
1541     uint32_t *reg_state;
1542     uint32_t cor_err;
1543     int cxl_err_type;
1544 
1545     if (!obj) {
1546         error_setg(errp, "Unable to resolve path");
1547         return;
1548     }
1549     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1550         error_setg(errp, "Path does not point to a CXL type 3 device");
1551         return;
1552     }
1553 
1554     err.status = PCI_ERR_COR_INTERNAL;
1555     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1556     err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1557 
1558     ct3d = CXL_TYPE3(obj);
1559     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1560     cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1561 
1562     cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1563     if (cxl_err_type < 0) {
1564         error_setg(errp, "Invalid COR error");
1565         return;
1566     }
1567     /* If the error is masked, nothting to do here */
1568     if (!((1 << cxl_err_type) &
1569           ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1570         return;
1571     }
1572 
1573     cor_err |= (1 << cxl_err_type);
1574     stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1575 
1576     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1577 }
1578 
cxl_assign_event_header(CXLEventRecordHdr * hdr,const QemuUUID * uuid,uint32_t flags,uint8_t length,uint64_t timestamp)1579 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1580                                     const QemuUUID *uuid, uint32_t flags,
1581                                     uint8_t length, uint64_t timestamp)
1582 {
1583     st24_le_p(&hdr->flags, flags);
1584     hdr->length = length;
1585     memcpy(&hdr->id, uuid, sizeof(hdr->id));
1586     stq_le_p(&hdr->timestamp, timestamp);
1587 }
1588 
1589 static const QemuUUID gen_media_uuid = {
1590     .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1591                  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1592 };
1593 
1594 static const QemuUUID dram_uuid = {
1595     .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1596                  0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1597 };
1598 
1599 static const QemuUUID memory_module_uuid = {
1600     .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1601                  0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1602 };
1603 
1604 #define CXL_GMER_VALID_CHANNEL                          BIT(0)
1605 #define CXL_GMER_VALID_RANK                             BIT(1)
1606 #define CXL_GMER_VALID_DEVICE                           BIT(2)
1607 #define CXL_GMER_VALID_COMPONENT                        BIT(3)
1608 
ct3d_qmp_cxl_event_log_enc(CxlEventLog log)1609 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1610 {
1611     switch (log) {
1612     case CXL_EVENT_LOG_INFORMATIONAL:
1613         return CXL_EVENT_TYPE_INFO;
1614     case CXL_EVENT_LOG_WARNING:
1615         return CXL_EVENT_TYPE_WARN;
1616     case CXL_EVENT_LOG_FAILURE:
1617         return CXL_EVENT_TYPE_FAIL;
1618     case CXL_EVENT_LOG_FATAL:
1619         return CXL_EVENT_TYPE_FATAL;
1620     default:
1621         return -EINVAL;
1622     }
1623 }
1624 /* Component ID is device specific.  Define this as a string. */
qmp_cxl_inject_general_media_event(const char * path,CxlEventLog log,uint8_t flags,uint64_t dpa,uint8_t descriptor,uint8_t type,uint8_t transaction_type,bool has_channel,uint8_t channel,bool has_rank,uint8_t rank,bool has_device,uint32_t device,const char * component_id,Error ** errp)1625 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1626                                         uint8_t flags, uint64_t dpa,
1627                                         uint8_t descriptor, uint8_t type,
1628                                         uint8_t transaction_type,
1629                                         bool has_channel, uint8_t channel,
1630                                         bool has_rank, uint8_t rank,
1631                                         bool has_device, uint32_t device,
1632                                         const char *component_id,
1633                                         Error **errp)
1634 {
1635     Object *obj = object_resolve_path(path, NULL);
1636     CXLEventGenMedia gem;
1637     CXLEventRecordHdr *hdr = &gem.hdr;
1638     CXLDeviceState *cxlds;
1639     CXLType3Dev *ct3d;
1640     uint16_t valid_flags = 0;
1641     uint8_t enc_log;
1642     int rc;
1643 
1644     if (!obj) {
1645         error_setg(errp, "Unable to resolve path");
1646         return;
1647     }
1648     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1649         error_setg(errp, "Path does not point to a CXL type 3 device");
1650         return;
1651     }
1652     ct3d = CXL_TYPE3(obj);
1653     cxlds = &ct3d->cxl_dstate;
1654 
1655     rc = ct3d_qmp_cxl_event_log_enc(log);
1656     if (rc < 0) {
1657         error_setg(errp, "Unhandled error log type");
1658         return;
1659     }
1660     enc_log = rc;
1661 
1662     memset(&gem, 0, sizeof(gem));
1663     cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1664                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1665 
1666     stq_le_p(&gem.phys_addr, dpa);
1667     gem.descriptor = descriptor;
1668     gem.type = type;
1669     gem.transaction_type = transaction_type;
1670 
1671     if (has_channel) {
1672         gem.channel = channel;
1673         valid_flags |= CXL_GMER_VALID_CHANNEL;
1674     }
1675 
1676     if (has_rank) {
1677         gem.rank = rank;
1678         valid_flags |= CXL_GMER_VALID_RANK;
1679     }
1680 
1681     if (has_device) {
1682         st24_le_p(gem.device, device);
1683         valid_flags |= CXL_GMER_VALID_DEVICE;
1684     }
1685 
1686     if (component_id) {
1687         strncpy((char *)gem.component_id, component_id,
1688                 sizeof(gem.component_id) - 1);
1689         valid_flags |= CXL_GMER_VALID_COMPONENT;
1690     }
1691 
1692     stw_le_p(&gem.validity_flags, valid_flags);
1693 
1694     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1695         cxl_event_irq_assert(ct3d);
1696     }
1697 }
1698 
1699 #define CXL_DRAM_VALID_CHANNEL                          BIT(0)
1700 #define CXL_DRAM_VALID_RANK                             BIT(1)
1701 #define CXL_DRAM_VALID_NIBBLE_MASK                      BIT(2)
1702 #define CXL_DRAM_VALID_BANK_GROUP                       BIT(3)
1703 #define CXL_DRAM_VALID_BANK                             BIT(4)
1704 #define CXL_DRAM_VALID_ROW                              BIT(5)
1705 #define CXL_DRAM_VALID_COLUMN                           BIT(6)
1706 #define CXL_DRAM_VALID_CORRECTION_MASK                  BIT(7)
1707 
qmp_cxl_inject_dram_event(const char * path,CxlEventLog log,uint8_t flags,uint64_t dpa,uint8_t descriptor,uint8_t type,uint8_t transaction_type,bool has_channel,uint8_t channel,bool has_rank,uint8_t rank,bool has_nibble_mask,uint32_t nibble_mask,bool has_bank_group,uint8_t bank_group,bool has_bank,uint8_t bank,bool has_row,uint32_t row,bool has_column,uint16_t column,bool has_correction_mask,uint64List * correction_mask,Error ** errp)1708 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1709                                uint64_t dpa, uint8_t descriptor,
1710                                uint8_t type, uint8_t transaction_type,
1711                                bool has_channel, uint8_t channel,
1712                                bool has_rank, uint8_t rank,
1713                                bool has_nibble_mask, uint32_t nibble_mask,
1714                                bool has_bank_group, uint8_t bank_group,
1715                                bool has_bank, uint8_t bank,
1716                                bool has_row, uint32_t row,
1717                                bool has_column, uint16_t column,
1718                                bool has_correction_mask,
1719                                uint64List *correction_mask,
1720                                Error **errp)
1721 {
1722     Object *obj = object_resolve_path(path, NULL);
1723     CXLEventDram dram;
1724     CXLEventRecordHdr *hdr = &dram.hdr;
1725     CXLDeviceState *cxlds;
1726     CXLType3Dev *ct3d;
1727     uint16_t valid_flags = 0;
1728     uint8_t enc_log;
1729     int rc;
1730 
1731     if (!obj) {
1732         error_setg(errp, "Unable to resolve path");
1733         return;
1734     }
1735     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1736         error_setg(errp, "Path does not point to a CXL type 3 device");
1737         return;
1738     }
1739     ct3d = CXL_TYPE3(obj);
1740     cxlds = &ct3d->cxl_dstate;
1741 
1742     rc = ct3d_qmp_cxl_event_log_enc(log);
1743     if (rc < 0) {
1744         error_setg(errp, "Unhandled error log type");
1745         return;
1746     }
1747     enc_log = rc;
1748 
1749     memset(&dram, 0, sizeof(dram));
1750     cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1751                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1752     stq_le_p(&dram.phys_addr, dpa);
1753     dram.descriptor = descriptor;
1754     dram.type = type;
1755     dram.transaction_type = transaction_type;
1756 
1757     if (has_channel) {
1758         dram.channel = channel;
1759         valid_flags |= CXL_DRAM_VALID_CHANNEL;
1760     }
1761 
1762     if (has_rank) {
1763         dram.rank = rank;
1764         valid_flags |= CXL_DRAM_VALID_RANK;
1765     }
1766 
1767     if (has_nibble_mask) {
1768         st24_le_p(dram.nibble_mask, nibble_mask);
1769         valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1770     }
1771 
1772     if (has_bank_group) {
1773         dram.bank_group = bank_group;
1774         valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1775     }
1776 
1777     if (has_bank) {
1778         dram.bank = bank;
1779         valid_flags |= CXL_DRAM_VALID_BANK;
1780     }
1781 
1782     if (has_row) {
1783         st24_le_p(dram.row, row);
1784         valid_flags |= CXL_DRAM_VALID_ROW;
1785     }
1786 
1787     if (has_column) {
1788         stw_le_p(&dram.column, column);
1789         valid_flags |= CXL_DRAM_VALID_COLUMN;
1790     }
1791 
1792     if (has_correction_mask) {
1793         int count = 0;
1794         while (correction_mask && count < 4) {
1795             stq_le_p(&dram.correction_mask[count],
1796                      correction_mask->value);
1797             count++;
1798             correction_mask = correction_mask->next;
1799         }
1800         valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1801     }
1802 
1803     stw_le_p(&dram.validity_flags, valid_flags);
1804 
1805     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1806         cxl_event_irq_assert(ct3d);
1807     }
1808 }
1809 
qmp_cxl_inject_memory_module_event(const char * path,CxlEventLog log,uint8_t flags,uint8_t type,uint8_t health_status,uint8_t media_status,uint8_t additional_status,uint8_t life_used,int16_t temperature,uint32_t dirty_shutdown_count,uint32_t corrected_volatile_error_count,uint32_t corrected_persist_error_count,Error ** errp)1810 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1811                                         uint8_t flags, uint8_t type,
1812                                         uint8_t health_status,
1813                                         uint8_t media_status,
1814                                         uint8_t additional_status,
1815                                         uint8_t life_used,
1816                                         int16_t temperature,
1817                                         uint32_t dirty_shutdown_count,
1818                                         uint32_t corrected_volatile_error_count,
1819                                         uint32_t corrected_persist_error_count,
1820                                         Error **errp)
1821 {
1822     Object *obj = object_resolve_path(path, NULL);
1823     CXLEventMemoryModule module;
1824     CXLEventRecordHdr *hdr = &module.hdr;
1825     CXLDeviceState *cxlds;
1826     CXLType3Dev *ct3d;
1827     uint8_t enc_log;
1828     int rc;
1829 
1830     if (!obj) {
1831         error_setg(errp, "Unable to resolve path");
1832         return;
1833     }
1834     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1835         error_setg(errp, "Path does not point to a CXL type 3 device");
1836         return;
1837     }
1838     ct3d = CXL_TYPE3(obj);
1839     cxlds = &ct3d->cxl_dstate;
1840 
1841     rc = ct3d_qmp_cxl_event_log_enc(log);
1842     if (rc < 0) {
1843         error_setg(errp, "Unhandled error log type");
1844         return;
1845     }
1846     enc_log = rc;
1847 
1848     memset(&module, 0, sizeof(module));
1849     cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1850                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1851 
1852     module.type = type;
1853     module.health_status = health_status;
1854     module.media_status = media_status;
1855     module.additional_status = additional_status;
1856     module.life_used = life_used;
1857     stw_le_p(&module.temperature, temperature);
1858     stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1859     stl_le_p(&module.corrected_volatile_error_count,
1860              corrected_volatile_error_count);
1861     stl_le_p(&module.corrected_persistent_error_count,
1862              corrected_persist_error_count);
1863 
1864     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1865         cxl_event_irq_assert(ct3d);
1866     }
1867 }
1868 
1869 /* CXL r3.1 Table 8-50: Dynamic Capacity Event Record */
1870 static const QemuUUID dynamic_capacity_uuid = {
1871     .data = UUID(0xca95afa7, 0xf183, 0x4018, 0x8c, 0x2f,
1872                  0x95, 0x26, 0x8e, 0x10, 0x1a, 0x2a),
1873 };
1874 
1875 typedef enum CXLDCEventType {
1876     DC_EVENT_ADD_CAPACITY = 0x0,
1877     DC_EVENT_RELEASE_CAPACITY = 0x1,
1878     DC_EVENT_FORCED_RELEASE_CAPACITY = 0x2,
1879     DC_EVENT_REGION_CONFIG_UPDATED = 0x3,
1880     DC_EVENT_ADD_CAPACITY_RSP = 0x4,
1881     DC_EVENT_CAPACITY_RELEASED = 0x5,
1882 } CXLDCEventType;
1883 
1884 /*
1885  * Check whether the range [dpa, dpa + len - 1] has overlaps with extents in
1886  * the list.
1887  * Return value: return true if has overlaps; otherwise, return false
1888  */
cxl_extents_overlaps_dpa_range(CXLDCExtentList * list,uint64_t dpa,uint64_t len)1889 static bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list,
1890                                            uint64_t dpa, uint64_t len)
1891 {
1892     CXLDCExtent *ent;
1893     Range range1, range2;
1894 
1895     if (!list) {
1896         return false;
1897     }
1898 
1899     range_init_nofail(&range1, dpa, len);
1900     QTAILQ_FOREACH(ent, list, node) {
1901         range_init_nofail(&range2, ent->start_dpa, ent->len);
1902         if (range_overlaps_range(&range1, &range2)) {
1903             return true;
1904         }
1905     }
1906     return false;
1907 }
1908 
1909 /*
1910  * Check whether the range [dpa, dpa + len - 1] is contained by extents in
1911  * the list.
1912  * Will check multiple extents containment once superset release is added.
1913  * Return value: return true if range is contained; otherwise, return false
1914  */
cxl_extents_contains_dpa_range(CXLDCExtentList * list,uint64_t dpa,uint64_t len)1915 bool cxl_extents_contains_dpa_range(CXLDCExtentList *list,
1916                                     uint64_t dpa, uint64_t len)
1917 {
1918     CXLDCExtent *ent;
1919     Range range1, range2;
1920 
1921     if (!list) {
1922         return false;
1923     }
1924 
1925     range_init_nofail(&range1, dpa, len);
1926     QTAILQ_FOREACH(ent, list, node) {
1927         range_init_nofail(&range2, ent->start_dpa, ent->len);
1928         if (range_contains_range(&range2, &range1)) {
1929             return true;
1930         }
1931     }
1932     return false;
1933 }
1934 
cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList * list,uint64_t dpa,uint64_t len)1935 static bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list,
1936                                                  uint64_t dpa, uint64_t len)
1937 {
1938     CXLDCExtentGroup *group;
1939 
1940     if (!list) {
1941         return false;
1942     }
1943 
1944     QTAILQ_FOREACH(group, list, node) {
1945         if (cxl_extents_overlaps_dpa_range(&group->list, dpa, len)) {
1946             return true;
1947         }
1948     }
1949     return false;
1950 }
1951 
1952 /*
1953  * The main function to process dynamic capacity event with extent list.
1954  * Currently DC extents add/release requests are processed.
1955  */
qmp_cxl_process_dynamic_capacity_prescriptive(const char * path,uint16_t hid,CXLDCEventType type,uint8_t rid,CxlDynamicCapacityExtentList * records,Error ** errp)1956 static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path,
1957         uint16_t hid, CXLDCEventType type, uint8_t rid,
1958         CxlDynamicCapacityExtentList *records, Error **errp)
1959 {
1960     Object *obj;
1961     CXLEventDynamicCapacity dCap = {};
1962     CXLEventRecordHdr *hdr = &dCap.hdr;
1963     CXLType3Dev *dcd;
1964     uint8_t flags = 1 << CXL_EVENT_TYPE_INFO;
1965     uint32_t num_extents = 0;
1966     CxlDynamicCapacityExtentList *list;
1967     CXLDCExtentGroup *group = NULL;
1968     g_autofree CXLDCExtentRaw *extents = NULL;
1969     uint8_t enc_log = CXL_EVENT_TYPE_DYNAMIC_CAP;
1970     uint64_t dpa, offset, len, block_size;
1971     g_autofree unsigned long *blk_bitmap = NULL;
1972     int i;
1973 
1974     obj = object_resolve_path_type(path, TYPE_CXL_TYPE3, NULL);
1975     if (!obj) {
1976         error_setg(errp, "Unable to resolve CXL type 3 device");
1977         return;
1978     }
1979 
1980     dcd = CXL_TYPE3(obj);
1981     if (!dcd->dc.num_regions) {
1982         error_setg(errp, "No dynamic capacity support from the device");
1983         return;
1984     }
1985 
1986 
1987     if (rid >= dcd->dc.num_regions) {
1988         error_setg(errp, "region id is too large");
1989         return;
1990     }
1991     block_size = dcd->dc.regions[rid].block_size;
1992     blk_bitmap = bitmap_new(dcd->dc.regions[rid].len / block_size);
1993 
1994     /* Sanity check and count the extents */
1995     list = records;
1996     while (list) {
1997         offset = list->value->offset;
1998         len = list->value->len;
1999         dpa = offset + dcd->dc.regions[rid].base;
2000 
2001         if (len == 0) {
2002             error_setg(errp, "extent with 0 length is not allowed");
2003             return;
2004         }
2005 
2006         if (offset % block_size || len % block_size) {
2007             error_setg(errp, "dpa or len is not aligned to region block size");
2008             return;
2009         }
2010 
2011         if (offset + len > dcd->dc.regions[rid].len) {
2012             error_setg(errp, "extent range is beyond the region end");
2013             return;
2014         }
2015 
2016         /* No duplicate or overlapped extents are allowed */
2017         if (test_any_bits_set(blk_bitmap, offset / block_size,
2018                               len / block_size)) {
2019             error_setg(errp, "duplicate or overlapped extents are detected");
2020             return;
2021         }
2022         bitmap_set(blk_bitmap, offset / block_size, len / block_size);
2023 
2024         if (type == DC_EVENT_RELEASE_CAPACITY) {
2025             if (cxl_extent_groups_overlaps_dpa_range(&dcd->dc.extents_pending,
2026                                                      dpa, len)) {
2027                 error_setg(errp,
2028                            "cannot release extent with pending DPA range");
2029                 return;
2030             }
2031             if (!ct3_test_region_block_backed(dcd, dpa, len)) {
2032                 error_setg(errp,
2033                            "cannot release extent with non-existing DPA range");
2034                 return;
2035             }
2036         } else if (type == DC_EVENT_ADD_CAPACITY) {
2037             if (cxl_extents_overlaps_dpa_range(&dcd->dc.extents, dpa, len)) {
2038                 error_setg(errp,
2039                            "cannot add DPA already accessible to the same LD");
2040                 return;
2041             }
2042             if (cxl_extent_groups_overlaps_dpa_range(&dcd->dc.extents_pending,
2043                                                      dpa, len)) {
2044                 error_setg(errp,
2045                            "cannot add DPA again while still pending");
2046                 return;
2047             }
2048         }
2049         list = list->next;
2050         num_extents++;
2051     }
2052 
2053     /* Create extent list for event being passed to host */
2054     i = 0;
2055     list = records;
2056     extents = g_new0(CXLDCExtentRaw, num_extents);
2057     while (list) {
2058         offset = list->value->offset;
2059         len = list->value->len;
2060         dpa = dcd->dc.regions[rid].base + offset;
2061 
2062         extents[i].start_dpa = dpa;
2063         extents[i].len = len;
2064         memset(extents[i].tag, 0, 0x10);
2065         extents[i].shared_seq = 0;
2066         if (type == DC_EVENT_ADD_CAPACITY) {
2067             group = cxl_insert_extent_to_extent_group(group,
2068                                                       extents[i].start_dpa,
2069                                                       extents[i].len,
2070                                                       extents[i].tag,
2071                                                       extents[i].shared_seq);
2072         }
2073 
2074         list = list->next;
2075         i++;
2076     }
2077     if (group) {
2078         cxl_extent_group_list_insert_tail(&dcd->dc.extents_pending, group);
2079     }
2080 
2081     /*
2082      * CXL r3.1 section 8.2.9.2.1.6: Dynamic Capacity Event Record
2083      *
2084      * All Dynamic Capacity event records shall set the Event Record Severity
2085      * field in the Common Event Record Format to Informational Event. All
2086      * Dynamic Capacity related events shall be logged in the Dynamic Capacity
2087      * Event Log.
2088      */
2089     cxl_assign_event_header(hdr, &dynamic_capacity_uuid, flags, sizeof(dCap),
2090                             cxl_device_get_timestamp(&dcd->cxl_dstate));
2091 
2092     dCap.type = type;
2093     /* FIXME: for now, validity flag is cleared */
2094     dCap.validity_flags = 0;
2095     stw_le_p(&dCap.host_id, hid);
2096     /* only valid for DC_REGION_CONFIG_UPDATED event */
2097     dCap.updated_region_id = 0;
2098     for (i = 0; i < num_extents; i++) {
2099         memcpy(&dCap.dynamic_capacity_extent, &extents[i],
2100                sizeof(CXLDCExtentRaw));
2101 
2102         dCap.flags = 0;
2103         if (i < num_extents - 1) {
2104             /* Set "More" flag */
2105             dCap.flags |= BIT(0);
2106         }
2107 
2108         if (cxl_event_insert(&dcd->cxl_dstate, enc_log,
2109                              (CXLEventRecordRaw *)&dCap)) {
2110             cxl_event_irq_assert(dcd);
2111         }
2112     }
2113 }
2114 
qmp_cxl_add_dynamic_capacity(const char * path,uint16_t host_id,CxlExtentSelectionPolicy sel_policy,uint8_t region,const char * tag,CxlDynamicCapacityExtentList * extents,Error ** errp)2115 void qmp_cxl_add_dynamic_capacity(const char *path, uint16_t host_id,
2116                                   CxlExtentSelectionPolicy sel_policy,
2117                                   uint8_t region, const char *tag,
2118                                   CxlDynamicCapacityExtentList  *extents,
2119                                   Error **errp)
2120 {
2121     switch (sel_policy) {
2122     case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE:
2123         qmp_cxl_process_dynamic_capacity_prescriptive(path, host_id,
2124                                                       DC_EVENT_ADD_CAPACITY,
2125                                                       region, extents, errp);
2126         return;
2127     default:
2128         error_setg(errp, "Selection policy not supported");
2129         return;
2130     }
2131 }
2132 
qmp_cxl_release_dynamic_capacity(const char * path,uint16_t host_id,CxlExtentRemovalPolicy removal_policy,bool has_forced_removal,bool forced_removal,bool has_sanitize_on_release,bool sanitize_on_release,uint8_t region,const char * tag,CxlDynamicCapacityExtentList * extents,Error ** errp)2133 void qmp_cxl_release_dynamic_capacity(const char *path, uint16_t host_id,
2134                                       CxlExtentRemovalPolicy removal_policy,
2135                                       bool has_forced_removal,
2136                                       bool forced_removal,
2137                                       bool has_sanitize_on_release,
2138                                       bool sanitize_on_release,
2139                                       uint8_t region,
2140                                       const char *tag,
2141                                       CxlDynamicCapacityExtentList  *extents,
2142                                       Error **errp)
2143 {
2144     CXLDCEventType type = DC_EVENT_RELEASE_CAPACITY;
2145 
2146     if (has_forced_removal && forced_removal) {
2147         /* TODO: enable forced removal in the future */
2148         type = DC_EVENT_FORCED_RELEASE_CAPACITY;
2149         error_setg(errp, "Forced removal not supported yet");
2150         return;
2151     }
2152 
2153     switch (removal_policy) {
2154     case CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE:
2155         qmp_cxl_process_dynamic_capacity_prescriptive(path, host_id, type,
2156                                                       region, extents, errp);
2157         return;
2158     default:
2159         error_setg(errp, "Removal policy not supported");
2160         return;
2161     }
2162 }
2163 
ct3_class_init(ObjectClass * oc,const void * data)2164 static void ct3_class_init(ObjectClass *oc, const void *data)
2165 {
2166     DeviceClass *dc = DEVICE_CLASS(oc);
2167     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
2168     CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
2169 
2170     pc->realize = ct3_realize;
2171     pc->exit = ct3_exit;
2172     pc->class_id = PCI_CLASS_MEMORY_CXL;
2173     pc->vendor_id = PCI_VENDOR_ID_INTEL;
2174     pc->device_id = 0xd93; /* LVF for now */
2175     pc->revision = 1;
2176 
2177     pc->config_write = ct3d_config_write;
2178     pc->config_read = ct3d_config_read;
2179 
2180     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2181     dc->desc = "CXL Memory Device (Type 3)";
2182     device_class_set_legacy_reset(dc, ct3d_reset);
2183     device_class_set_props(dc, ct3_props);
2184 
2185     cvc->get_lsa_size = get_lsa_size;
2186     cvc->get_lsa = get_lsa;
2187     cvc->set_lsa = set_lsa;
2188     cvc->set_cacheline = set_cacheline;
2189 }
2190 
2191 static const TypeInfo ct3d_info = {
2192     .name = TYPE_CXL_TYPE3,
2193     .parent = TYPE_PCI_DEVICE,
2194     .class_size = sizeof(struct CXLType3Class),
2195     .class_init = ct3_class_init,
2196     .instance_size = sizeof(CXLType3Dev),
2197     .interfaces = (const InterfaceInfo[]) {
2198         { INTERFACE_CXL_DEVICE },
2199         { INTERFACE_PCIE_DEVICE },
2200         {}
2201     },
2202 };
2203 
ct3d_registers(void)2204 static void ct3d_registers(void)
2205 {
2206     type_register_static(&ct3d_info);
2207 }
2208 
2209 type_init(ct3d_registers);
2210