xref: /qemu/hw/mem/cxl_type3.c (revision 2710d49a7c8b9b117a46847c7ace5eb21d48e882)
1 /*
2  * CXL Type 3 (memory expander) device
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  *
9  * SPDX-License-Identifier: GPL-v2-only
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/units.h"
14 #include "qemu/error-report.h"
15 #include "qapi/qapi-commands-cxl.h"
16 #include "hw/mem/memory-device.h"
17 #include "hw/mem/pc-dimm.h"
18 #include "hw/pci/pci.h"
19 #include "hw/qdev-properties.h"
20 #include "qapi/error.h"
21 #include "qemu/log.h"
22 #include "qemu/module.h"
23 #include "qemu/pmem.h"
24 #include "qemu/range.h"
25 #include "qemu/rcu.h"
26 #include "sysemu/hostmem.h"
27 #include "sysemu/numa.h"
28 #include "hw/cxl/cxl.h"
29 #include "hw/pci/msix.h"
30 
31 #define DWORD_BYTE 4
32 
33 /* Default CDAT entries for a memory region */
34 enum {
35     CT3_CDAT_DSMAS,
36     CT3_CDAT_DSLBIS0,
37     CT3_CDAT_DSLBIS1,
38     CT3_CDAT_DSLBIS2,
39     CT3_CDAT_DSLBIS3,
40     CT3_CDAT_DSEMTS,
41     CT3_CDAT_NUM_ENTRIES
42 };
43 
44 static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
45                                          int dsmad_handle, MemoryRegion *mr,
46                                          bool is_pmem, uint64_t dpa_base)
47 {
48     g_autofree CDATDsmas *dsmas = NULL;
49     g_autofree CDATDslbis *dslbis0 = NULL;
50     g_autofree CDATDslbis *dslbis1 = NULL;
51     g_autofree CDATDslbis *dslbis2 = NULL;
52     g_autofree CDATDslbis *dslbis3 = NULL;
53     g_autofree CDATDsemts *dsemts = NULL;
54 
55     dsmas = g_malloc(sizeof(*dsmas));
56     if (!dsmas) {
57         return -ENOMEM;
58     }
59     *dsmas = (CDATDsmas) {
60         .header = {
61             .type = CDAT_TYPE_DSMAS,
62             .length = sizeof(*dsmas),
63         },
64         .DSMADhandle = dsmad_handle,
65         .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
66         .DPA_base = dpa_base,
67         .DPA_length = memory_region_size(mr),
68     };
69 
70     /* For now, no memory side cache, plausiblish numbers */
71     dslbis0 = g_malloc(sizeof(*dslbis0));
72     if (!dslbis0) {
73         return -ENOMEM;
74     }
75     *dslbis0 = (CDATDslbis) {
76         .header = {
77             .type = CDAT_TYPE_DSLBIS,
78             .length = sizeof(*dslbis0),
79         },
80         .handle = dsmad_handle,
81         .flags = HMAT_LB_MEM_MEMORY,
82         .data_type = HMAT_LB_DATA_READ_LATENCY,
83         .entry_base_unit = 10000, /* 10ns base */
84         .entry[0] = 15, /* 150ns */
85     };
86 
87     dslbis1 = g_malloc(sizeof(*dslbis1));
88     if (!dslbis1) {
89         return -ENOMEM;
90     }
91     *dslbis1 = (CDATDslbis) {
92         .header = {
93             .type = CDAT_TYPE_DSLBIS,
94             .length = sizeof(*dslbis1),
95         },
96         .handle = dsmad_handle,
97         .flags = HMAT_LB_MEM_MEMORY,
98         .data_type = HMAT_LB_DATA_WRITE_LATENCY,
99         .entry_base_unit = 10000,
100         .entry[0] = 25, /* 250ns */
101     };
102 
103     dslbis2 = g_malloc(sizeof(*dslbis2));
104     if (!dslbis2) {
105         return -ENOMEM;
106     }
107     *dslbis2 = (CDATDslbis) {
108         .header = {
109             .type = CDAT_TYPE_DSLBIS,
110             .length = sizeof(*dslbis2),
111         },
112         .handle = dsmad_handle,
113         .flags = HMAT_LB_MEM_MEMORY,
114         .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
115         .entry_base_unit = 1000, /* GB/s */
116         .entry[0] = 16,
117     };
118 
119     dslbis3 = g_malloc(sizeof(*dslbis3));
120     if (!dslbis3) {
121         return -ENOMEM;
122     }
123     *dslbis3 = (CDATDslbis) {
124         .header = {
125             .type = CDAT_TYPE_DSLBIS,
126             .length = sizeof(*dslbis3),
127         },
128         .handle = dsmad_handle,
129         .flags = HMAT_LB_MEM_MEMORY,
130         .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
131         .entry_base_unit = 1000, /* GB/s */
132         .entry[0] = 16,
133     };
134 
135     dsemts = g_malloc(sizeof(*dsemts));
136     if (!dsemts) {
137         return -ENOMEM;
138     }
139     *dsemts = (CDATDsemts) {
140         .header = {
141             .type = CDAT_TYPE_DSEMTS,
142             .length = sizeof(*dsemts),
143         },
144         .DSMAS_handle = dsmad_handle,
145         /*
146          * NV: Reserved - the non volatile from DSMAS matters
147          * V: EFI_MEMORY_SP
148          */
149         .EFI_memory_type_attr = is_pmem ? 2 : 1,
150         .DPA_offset = 0,
151         .DPA_length = memory_region_size(mr),
152     };
153 
154     /* Header always at start of structure */
155     cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
156     cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
157     cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
158     cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
159     cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
160     cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
161 
162     return 0;
163 }
164 
165 static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
166 {
167     g_autofree CDATSubHeader **table = NULL;
168     CXLType3Dev *ct3d = priv;
169     MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
170     int dsmad_handle = 0;
171     int cur_ent = 0;
172     int len = 0;
173     int rc, i;
174 
175     if (!ct3d->hostpmem && !ct3d->hostvmem) {
176         return 0;
177     }
178 
179     if (ct3d->hostvmem) {
180         volatile_mr = host_memory_backend_get_memory(ct3d->hostvmem);
181         if (!volatile_mr) {
182             return -EINVAL;
183         }
184         len += CT3_CDAT_NUM_ENTRIES;
185     }
186 
187     if (ct3d->hostpmem) {
188         nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostpmem);
189         if (!nonvolatile_mr) {
190             return -EINVAL;
191         }
192         len += CT3_CDAT_NUM_ENTRIES;
193     }
194 
195     table = g_malloc0(len * sizeof(*table));
196     if (!table) {
197         return -ENOMEM;
198     }
199 
200     /* Now fill them in */
201     if (volatile_mr) {
202         rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, volatile_mr,
203                                            false, 0);
204         if (rc < 0) {
205             return rc;
206         }
207         cur_ent = CT3_CDAT_NUM_ENTRIES;
208     }
209 
210     if (nonvolatile_mr) {
211         uint64_t base = volatile_mr ? memory_region_size(volatile_mr) : 0;
212         rc = ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
213                                            nonvolatile_mr, true, base);
214         if (rc < 0) {
215             goto error_cleanup;
216         }
217         cur_ent += CT3_CDAT_NUM_ENTRIES;
218     }
219     assert(len == cur_ent);
220 
221     *cdat_table = g_steal_pointer(&table);
222 
223     return len;
224 error_cleanup:
225     for (i = 0; i < cur_ent; i++) {
226         g_free(table[i]);
227     }
228     return rc;
229 }
230 
231 static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
232 {
233     int i;
234 
235     for (i = 0; i < num; i++) {
236         g_free(cdat_table[i]);
237     }
238     g_free(cdat_table);
239 }
240 
241 static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
242 {
243     CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
244     uint16_t ent;
245     void *base;
246     uint32_t len;
247     CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
248     CDATRsp rsp;
249 
250     assert(cdat->entry_len);
251 
252     /* Discard if request length mismatched */
253     if (pcie_doe_get_obj_len(req) <
254         DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
255         return false;
256     }
257 
258     ent = req->entry_handle;
259     base = cdat->entry[ent].base;
260     len = cdat->entry[ent].length;
261 
262     rsp = (CDATRsp) {
263         .header = {
264             .vendor_id = CXL_VENDOR_ID,
265             .data_obj_type = CXL_DOE_TABLE_ACCESS,
266             .reserved = 0x0,
267             .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
268         },
269         .rsp_code = CXL_DOE_TAB_RSP,
270         .table_type = CXL_DOE_TAB_TYPE_CDAT,
271         .entry_handle = (ent < cdat->entry_len - 1) ?
272                         ent + 1 : CXL_DOE_TAB_ENT_MAX,
273     };
274 
275     memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
276     memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
277            base, len);
278 
279     doe_cap->read_mbox_len += rsp.header.length;
280 
281     return true;
282 }
283 
284 static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
285 {
286     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
287     uint32_t val;
288 
289     if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
290         return val;
291     }
292 
293     return pci_default_read_config(pci_dev, addr, size);
294 }
295 
296 static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
297                               int size)
298 {
299     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
300 
301     pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
302     pci_default_write_config(pci_dev, addr, val, size);
303     pcie_aer_write_config(pci_dev, addr, val, size);
304 }
305 
306 /*
307  * Null value of all Fs suggested by IEEE RA guidelines for use of
308  * EU, OUI and CID
309  */
310 #define UI64_NULL ~(0ULL)
311 
312 static void build_dvsecs(CXLType3Dev *ct3d)
313 {
314     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
315     uint8_t *dvsec;
316     uint32_t range1_size_hi, range1_size_lo,
317              range1_base_hi = 0, range1_base_lo = 0,
318              range2_size_hi = 0, range2_size_lo = 0,
319              range2_base_hi = 0, range2_base_lo = 0;
320 
321     /*
322      * Volatile memory is mapped as (0x0)
323      * Persistent memory is mapped at (volatile->size)
324      */
325     if (ct3d->hostvmem) {
326         range1_size_hi = ct3d->hostvmem->size >> 32;
327         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
328                          (ct3d->hostvmem->size & 0xF0000000);
329         if (ct3d->hostpmem) {
330             range2_size_hi = ct3d->hostpmem->size >> 32;
331             range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
332                              (ct3d->hostpmem->size & 0xF0000000);
333         }
334     } else {
335         range1_size_hi = ct3d->hostpmem->size >> 32;
336         range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
337                          (ct3d->hostpmem->size & 0xF0000000);
338     }
339 
340     dvsec = (uint8_t *)&(CXLDVSECDevice){
341         .cap = 0x1e,
342         .ctrl = 0x2,
343         .status2 = 0x2,
344         .range1_size_hi = range1_size_hi,
345         .range1_size_lo = range1_size_lo,
346         .range1_base_hi = range1_base_hi,
347         .range1_base_lo = range1_base_lo,
348         .range2_size_hi = range2_size_hi,
349         .range2_size_lo = range2_size_lo,
350         .range2_base_hi = range2_base_hi,
351         .range2_base_lo = range2_base_lo,
352     };
353     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
354                                PCIE_CXL_DEVICE_DVSEC_LENGTH,
355                                PCIE_CXL_DEVICE_DVSEC,
356                                PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
357 
358     dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
359         .rsvd         = 0,
360         .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
361         .reg0_base_hi = 0,
362         .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
363         .reg1_base_hi = 0,
364     };
365     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
366                                REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
367                                REG_LOC_DVSEC_REVID, dvsec);
368     dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
369         .phase2_duration = 0x603, /* 3 seconds */
370         .phase2_power = 0x33, /* 0x33 miliwatts */
371     };
372     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
373                                GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
374                                GPF_DEVICE_DVSEC_REVID, dvsec);
375 
376     dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
377         .cap                     = 0x26, /* 68B, IO, Mem, non-MLD */
378         .ctrl                    = 0x02, /* IO always enabled */
379         .status                  = 0x26, /* same as capabilities */
380         .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
381     };
382     cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
383                                PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
384                                PCIE_FLEXBUS_PORT_DVSEC,
385                                PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
386 }
387 
388 static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
389 {
390     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
391     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
392     uint32_t *cache_mem = cregs->cache_mem_registers;
393     uint32_t ctrl;
394 
395     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
396     /* TODO: Sanity checks that the decoder is possible */
397     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
398     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
399 
400     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
401 }
402 
403 static void hdm_decoder_uncommit(CXLType3Dev *ct3d, int which)
404 {
405     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
406     ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
407     uint32_t *cache_mem = cregs->cache_mem_registers;
408     uint32_t ctrl;
409 
410     ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc);
411 
412     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, ERR, 0);
413     ctrl = FIELD_DP32(ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
414 
415     stl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + which * hdm_inc, ctrl);
416 }
417 
418 static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
419 {
420     switch (qmp_err) {
421     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
422         return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
423     case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
424         return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
425     case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
426         return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
427     case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
428         return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
429     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
430         return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
431     case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
432         return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
433     case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
434         return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
435     case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
436         return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
437     case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
438         return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
439     case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
440         return CXL_RAS_UNC_ERR_RSVD_ENCODING;
441     case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
442         return CXL_RAS_UNC_ERR_POISON_RECEIVED;
443     case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
444         return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
445     case CXL_UNCOR_ERROR_TYPE_INTERNAL:
446         return CXL_RAS_UNC_ERR_INTERNAL;
447     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
448         return CXL_RAS_UNC_ERR_CXL_IDE_TX;
449     case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
450         return CXL_RAS_UNC_ERR_CXL_IDE_RX;
451     default:
452         return -EINVAL;
453     }
454 }
455 
456 static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
457 {
458     switch (qmp_err) {
459     case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
460         return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
461     case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
462         return CXL_RAS_COR_ERR_MEM_DATA_ECC;
463     case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
464         return CXL_RAS_COR_ERR_CRC_THRESHOLD;
465     case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
466         return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
467     case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
468         return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
469     case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
470         return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
471     case CXL_COR_ERROR_TYPE_PHYSICAL:
472         return CXL_RAS_COR_ERR_PHYSICAL;
473     default:
474         return -EINVAL;
475     }
476 }
477 
478 static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
479                            unsigned size)
480 {
481     CXLComponentState *cxl_cstate = opaque;
482     ComponentRegisters *cregs = &cxl_cstate->crb;
483     CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
484     uint32_t *cache_mem = cregs->cache_mem_registers;
485     bool should_commit = false;
486     bool should_uncommit = false;
487     int which_hdm = -1;
488 
489     assert(size == 4);
490     g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
491 
492     switch (offset) {
493     case A_CXL_HDM_DECODER0_CTRL:
494         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
495         should_uncommit = !should_commit;
496         which_hdm = 0;
497         break;
498     case A_CXL_HDM_DECODER1_CTRL:
499         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
500         should_uncommit = !should_commit;
501         which_hdm = 1;
502         break;
503     case A_CXL_HDM_DECODER2_CTRL:
504         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
505         should_uncommit = !should_commit;
506         which_hdm = 2;
507         break;
508     case A_CXL_HDM_DECODER3_CTRL:
509         should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
510         should_uncommit = !should_commit;
511         which_hdm = 3;
512         break;
513     case A_CXL_RAS_UNC_ERR_STATUS:
514     {
515         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
516         uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL,
517                                  FIRST_ERROR_POINTER);
518         CXLError *cxl_err;
519         uint32_t unc_err;
520 
521         /*
522          * If single bit written that corresponds to the first error
523          * pointer being cleared, update the status and header log.
524          */
525         if (!QTAILQ_EMPTY(&ct3d->error_list)) {
526             if ((1 << fe) ^ value) {
527                 CXLError *cxl_next;
528                 /*
529                  * Software is using wrong flow for multiple header recording
530                  * Following behavior in PCIe r6.0 and assuming multiple
531                  * header support. Implementation defined choice to clear all
532                  * matching records if more than one bit set - which corresponds
533                  * closest to behavior of hardware not capable of multiple
534                  * header recording.
535                  */
536                 QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node,
537                                     cxl_next) {
538                     if ((1 << cxl_err->type) & value) {
539                         QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
540                         g_free(cxl_err);
541                     }
542                 }
543             } else {
544                 /* Done with previous FE, so drop from list */
545                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
546                 QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
547                 g_free(cxl_err);
548             }
549 
550             /*
551              * If there is another FE, then put that in place and update
552              * the header log
553              */
554             if (!QTAILQ_EMPTY(&ct3d->error_list)) {
555                 uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
556                 int i;
557 
558                 cxl_err = QTAILQ_FIRST(&ct3d->error_list);
559                 for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
560                     stl_le_p(header_log + i, cxl_err->header[i]);
561                 }
562                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
563                                      FIRST_ERROR_POINTER, cxl_err->type);
564             } else {
565                 /*
566                  * If no more errors, then follow recommendation of PCI spec
567                  * r6.0 6.2.4.2 to set the first error pointer to a status
568                  * bit that will never be used.
569                  */
570                 capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
571                                      FIRST_ERROR_POINTER,
572                                      CXL_RAS_UNC_ERR_CXL_UNUSED);
573             }
574             stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
575         }
576         unc_err = 0;
577         QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
578             unc_err |= 1 << cxl_err->type;
579         }
580         stl_le_p((uint8_t *)cache_mem + offset, unc_err);
581 
582         return;
583     }
584     case A_CXL_RAS_COR_ERR_STATUS:
585     {
586         uint32_t rw1c = value;
587         uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
588         temp &= ~rw1c;
589         stl_le_p((uint8_t *)cache_mem + offset, temp);
590         return;
591     }
592     default:
593         break;
594     }
595 
596     stl_le_p((uint8_t *)cache_mem + offset, value);
597     if (should_commit) {
598         hdm_decoder_commit(ct3d, which_hdm);
599     } else if (should_uncommit) {
600         hdm_decoder_uncommit(ct3d, which_hdm);
601     }
602 }
603 
604 static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
605 {
606     DeviceState *ds = DEVICE(ct3d);
607 
608     if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
609         error_setg(errp, "at least one memdev property must be set");
610         return false;
611     } else if (ct3d->hostmem && ct3d->hostpmem) {
612         error_setg(errp, "[memdev] cannot be used with new "
613                          "[persistent-memdev] property");
614         return false;
615     } else if (ct3d->hostmem) {
616         /* Use of hostmem property implies pmem */
617         ct3d->hostpmem = ct3d->hostmem;
618         ct3d->hostmem = NULL;
619     }
620 
621     if (ct3d->hostpmem && !ct3d->lsa) {
622         error_setg(errp, "lsa property must be set for persistent devices");
623         return false;
624     }
625 
626     if (ct3d->hostvmem) {
627         MemoryRegion *vmr;
628         char *v_name;
629 
630         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
631         if (!vmr) {
632             error_setg(errp, "volatile memdev must have backing device");
633             return false;
634         }
635         memory_region_set_nonvolatile(vmr, false);
636         memory_region_set_enabled(vmr, true);
637         host_memory_backend_set_mapped(ct3d->hostvmem, true);
638         if (ds->id) {
639             v_name = g_strdup_printf("cxl-type3-dpa-vmem-space:%s", ds->id);
640         } else {
641             v_name = g_strdup("cxl-type3-dpa-vmem-space");
642         }
643         address_space_init(&ct3d->hostvmem_as, vmr, v_name);
644         ct3d->cxl_dstate.vmem_size = memory_region_size(vmr);
645         ct3d->cxl_dstate.mem_size += memory_region_size(vmr);
646         g_free(v_name);
647     }
648 
649     if (ct3d->hostpmem) {
650         MemoryRegion *pmr;
651         char *p_name;
652 
653         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
654         if (!pmr) {
655             error_setg(errp, "persistent memdev must have backing device");
656             return false;
657         }
658         memory_region_set_nonvolatile(pmr, true);
659         memory_region_set_enabled(pmr, true);
660         host_memory_backend_set_mapped(ct3d->hostpmem, true);
661         if (ds->id) {
662             p_name = g_strdup_printf("cxl-type3-dpa-pmem-space:%s", ds->id);
663         } else {
664             p_name = g_strdup("cxl-type3-dpa-pmem-space");
665         }
666         address_space_init(&ct3d->hostpmem_as, pmr, p_name);
667         ct3d->cxl_dstate.pmem_size = memory_region_size(pmr);
668         ct3d->cxl_dstate.mem_size += memory_region_size(pmr);
669         g_free(p_name);
670     }
671 
672     return true;
673 }
674 
675 static DOEProtocol doe_cdat_prot[] = {
676     { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
677     { }
678 };
679 
680 static void ct3_realize(PCIDevice *pci_dev, Error **errp)
681 {
682     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
683     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
684     ComponentRegisters *regs = &cxl_cstate->crb;
685     MemoryRegion *mr = &regs->component_registers;
686     uint8_t *pci_conf = pci_dev->config;
687     unsigned short msix_num = 6;
688     int i, rc;
689 
690     QTAILQ_INIT(&ct3d->error_list);
691 
692     if (!cxl_setup_memory(ct3d, errp)) {
693         return;
694     }
695 
696     pci_config_set_prog_interface(pci_conf, 0x10);
697 
698     pcie_endpoint_cap_init(pci_dev, 0x80);
699     if (ct3d->sn != UI64_NULL) {
700         pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
701         cxl_cstate->dvsec_offset = 0x100 + 0x0c;
702     } else {
703         cxl_cstate->dvsec_offset = 0x100;
704     }
705 
706     ct3d->cxl_cstate.pdev = pci_dev;
707     build_dvsecs(ct3d);
708 
709     regs->special_ops = g_new0(MemoryRegionOps, 1);
710     regs->special_ops->write = ct3d_reg_write;
711 
712     cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
713                                       TYPE_CXL_TYPE3);
714 
715     pci_register_bar(
716         pci_dev, CXL_COMPONENT_REG_BAR_IDX,
717         PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
718 
719     cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate,
720                                    &ct3d->cci);
721     pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
722                      PCI_BASE_ADDRESS_SPACE_MEMORY |
723                          PCI_BASE_ADDRESS_MEM_TYPE_64,
724                      &ct3d->cxl_dstate.device_registers);
725 
726     /* MSI(-X) Initialization */
727     rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
728     if (rc) {
729         goto err_address_space_free;
730     }
731     for (i = 0; i < msix_num; i++) {
732         msix_vector_use(pci_dev, i);
733     }
734 
735     /* DOE Initialization */
736     pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
737 
738     cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
739     cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
740     cxl_cstate->cdat.private = ct3d;
741     cxl_doe_cdat_init(cxl_cstate, errp);
742     if (*errp) {
743         goto err_free_special_ops;
744     }
745 
746     pcie_cap_deverr_init(pci_dev);
747     /* Leave a bit of room for expansion */
748     rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
749     if (rc) {
750         goto err_release_cdat;
751     }
752     cxl_event_init(&ct3d->cxl_dstate, 2);
753 
754     return;
755 
756 err_release_cdat:
757     cxl_doe_cdat_release(cxl_cstate);
758 err_free_special_ops:
759     g_free(regs->special_ops);
760 err_address_space_free:
761     if (ct3d->hostpmem) {
762         address_space_destroy(&ct3d->hostpmem_as);
763     }
764     if (ct3d->hostvmem) {
765         address_space_destroy(&ct3d->hostvmem_as);
766     }
767     return;
768 }
769 
770 static void ct3_exit(PCIDevice *pci_dev)
771 {
772     CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
773     CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
774     ComponentRegisters *regs = &cxl_cstate->crb;
775 
776     pcie_aer_exit(pci_dev);
777     cxl_doe_cdat_release(cxl_cstate);
778     g_free(regs->special_ops);
779     if (ct3d->hostpmem) {
780         address_space_destroy(&ct3d->hostpmem_as);
781     }
782     if (ct3d->hostvmem) {
783         address_space_destroy(&ct3d->hostvmem_as);
784     }
785 }
786 
787 static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
788 {
789     int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
790     uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
791     unsigned int hdm_count;
792     uint32_t cap;
793     uint64_t dpa_base = 0;
794     int i;
795 
796     cap = ldl_le_p(cache_mem + R_CXL_HDM_DECODER_CAPABILITY);
797     hdm_count = cxl_decoder_count_dec(FIELD_EX32(cap,
798                                                  CXL_HDM_DECODER_CAPABILITY,
799                                                  DECODER_COUNT));
800 
801     for (i = 0; i < hdm_count; i++) {
802         uint64_t decoder_base, decoder_size, hpa_offset, skip;
803         uint32_t hdm_ctrl, low, high;
804         int ig, iw;
805 
806         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc);
807         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc);
808         decoder_base = ((uint64_t)high << 32) | (low & 0xf0000000);
809 
810         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc);
811         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc);
812         decoder_size = ((uint64_t)high << 32) | (low & 0xf0000000);
813 
814         low = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_LO +
815                        i * hdm_inc);
816         high = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_DPA_SKIP_HI +
817                         i * hdm_inc);
818         skip = ((uint64_t)high << 32) | (low & 0xf0000000);
819         dpa_base += skip;
820 
821         hpa_offset = (uint64_t)host_addr - decoder_base;
822 
823         hdm_ctrl = ldl_le_p(cache_mem + R_CXL_HDM_DECODER0_CTRL + i * hdm_inc);
824         iw = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IW);
825         ig = FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, IG);
826         if (!FIELD_EX32(hdm_ctrl, CXL_HDM_DECODER0_CTRL, COMMITTED)) {
827             return false;
828         }
829         if (((uint64_t)host_addr < decoder_base) ||
830             (hpa_offset >= decoder_size)) {
831             dpa_base += decoder_size /
832                 cxl_interleave_ways_dec(iw, &error_fatal);
833             continue;
834         }
835 
836         *dpa = dpa_base +
837             ((MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
838              ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset)
839               >> iw));
840 
841         return true;
842     }
843     return false;
844 }
845 
846 static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
847                                        hwaddr host_addr,
848                                        unsigned int size,
849                                        AddressSpace **as,
850                                        uint64_t *dpa_offset)
851 {
852     MemoryRegion *vmr = NULL, *pmr = NULL;
853 
854     if (ct3d->hostvmem) {
855         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
856     }
857     if (ct3d->hostpmem) {
858         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
859     }
860 
861     if (!vmr && !pmr) {
862         return -ENODEV;
863     }
864 
865     if (!cxl_type3_dpa(ct3d, host_addr, dpa_offset)) {
866         return -EINVAL;
867     }
868 
869     if (*dpa_offset > ct3d->cxl_dstate.mem_size) {
870         return -EINVAL;
871     }
872 
873     if (vmr) {
874         if (*dpa_offset < memory_region_size(vmr)) {
875             *as = &ct3d->hostvmem_as;
876         } else {
877             *as = &ct3d->hostpmem_as;
878             *dpa_offset -= memory_region_size(vmr);
879         }
880     } else {
881         *as = &ct3d->hostpmem_as;
882     }
883 
884     return 0;
885 }
886 
887 MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
888                            unsigned size, MemTxAttrs attrs)
889 {
890     uint64_t dpa_offset = 0;
891     AddressSpace *as = NULL;
892     int res;
893 
894     res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
895                                       &as, &dpa_offset);
896     if (res) {
897         return MEMTX_ERROR;
898     }
899 
900     return address_space_read(as, dpa_offset, attrs, data, size);
901 }
902 
903 MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
904                             unsigned size, MemTxAttrs attrs)
905 {
906     uint64_t dpa_offset = 0;
907     AddressSpace *as = NULL;
908     int res;
909 
910     res = cxl_type3_hpa_to_as_and_dpa(CXL_TYPE3(d), host_addr, size,
911                                       &as, &dpa_offset);
912     if (res) {
913         return MEMTX_ERROR;
914     }
915 
916     return address_space_write(as, dpa_offset, attrs, &data, size);
917 }
918 
919 static void ct3d_reset(DeviceState *dev)
920 {
921     CXLType3Dev *ct3d = CXL_TYPE3(dev);
922     uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
923     uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
924 
925     cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
926     cxl_device_register_init_t3(ct3d);
927 }
928 
929 static Property ct3_props[] = {
930     DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
931                      HostMemoryBackend *), /* for backward compatibility */
932     DEFINE_PROP_LINK("persistent-memdev", CXLType3Dev, hostpmem,
933                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
934     DEFINE_PROP_LINK("volatile-memdev", CXLType3Dev, hostvmem,
935                      TYPE_MEMORY_BACKEND, HostMemoryBackend *),
936     DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
937                      HostMemoryBackend *),
938     DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
939     DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
940     DEFINE_PROP_END_OF_LIST(),
941 };
942 
943 static uint64_t get_lsa_size(CXLType3Dev *ct3d)
944 {
945     MemoryRegion *mr;
946 
947     if (!ct3d->lsa) {
948         return 0;
949     }
950 
951     mr = host_memory_backend_get_memory(ct3d->lsa);
952     return memory_region_size(mr);
953 }
954 
955 static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
956                                 uint64_t offset)
957 {
958     assert(offset + size <= memory_region_size(mr));
959     assert(offset + size > offset);
960 }
961 
962 static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
963                     uint64_t offset)
964 {
965     MemoryRegion *mr;
966     void *lsa;
967 
968     if (!ct3d->lsa) {
969         return 0;
970     }
971 
972     mr = host_memory_backend_get_memory(ct3d->lsa);
973     validate_lsa_access(mr, size, offset);
974 
975     lsa = memory_region_get_ram_ptr(mr) + offset;
976     memcpy(buf, lsa, size);
977 
978     return size;
979 }
980 
981 static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
982                     uint64_t offset)
983 {
984     MemoryRegion *mr;
985     void *lsa;
986 
987     if (!ct3d->lsa) {
988         return;
989     }
990 
991     mr = host_memory_backend_get_memory(ct3d->lsa);
992     validate_lsa_access(mr, size, offset);
993 
994     lsa = memory_region_get_ram_ptr(mr) + offset;
995     memcpy(lsa, buf, size);
996     memory_region_set_dirty(mr, offset, size);
997 
998     /*
999      * Just like the PMEM, if the guest is not allowed to exit gracefully, label
1000      * updates will get lost.
1001      */
1002 }
1003 
1004 static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
1005 {
1006     MemoryRegion *vmr = NULL, *pmr = NULL;
1007     AddressSpace *as;
1008 
1009     if (ct3d->hostvmem) {
1010         vmr = host_memory_backend_get_memory(ct3d->hostvmem);
1011     }
1012     if (ct3d->hostpmem) {
1013         pmr = host_memory_backend_get_memory(ct3d->hostpmem);
1014     }
1015 
1016     if (!vmr && !pmr) {
1017         return false;
1018     }
1019 
1020     if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.mem_size) {
1021         return false;
1022     }
1023 
1024     if (vmr) {
1025         if (dpa_offset < memory_region_size(vmr)) {
1026             as = &ct3d->hostvmem_as;
1027         } else {
1028             as = &ct3d->hostpmem_as;
1029             dpa_offset -= memory_region_size(vmr);
1030         }
1031     } else {
1032         as = &ct3d->hostpmem_as;
1033     }
1034 
1035     address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
1036                         CXL_CACHE_LINE_SIZE);
1037     return true;
1038 }
1039 
1040 void cxl_set_poison_list_overflowed(CXLType3Dev *ct3d)
1041 {
1042         ct3d->poison_list_overflowed = true;
1043         ct3d->poison_list_overflow_ts =
1044             cxl_device_get_timestamp(&ct3d->cxl_dstate);
1045 }
1046 
1047 void qmp_cxl_inject_poison(const char *path, uint64_t start, uint64_t length,
1048                            Error **errp)
1049 {
1050     Object *obj = object_resolve_path(path, NULL);
1051     CXLType3Dev *ct3d;
1052     CXLPoison *p;
1053 
1054     if (length % 64) {
1055         error_setg(errp, "Poison injection must be in multiples of 64 bytes");
1056         return;
1057     }
1058     if (start % 64) {
1059         error_setg(errp, "Poison start address must be 64 byte aligned");
1060         return;
1061     }
1062     if (!obj) {
1063         error_setg(errp, "Unable to resolve path");
1064         return;
1065     }
1066     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1067         error_setg(errp, "Path does not point to a CXL type 3 device");
1068         return;
1069     }
1070 
1071     ct3d = CXL_TYPE3(obj);
1072 
1073     QLIST_FOREACH(p, &ct3d->poison_list, node) {
1074         if (((start >= p->start) && (start < p->start + p->length)) ||
1075             ((start + length > p->start) &&
1076              (start + length <= p->start + p->length))) {
1077             error_setg(errp,
1078                        "Overlap with existing poisoned region not supported");
1079             return;
1080         }
1081     }
1082 
1083     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
1084         cxl_set_poison_list_overflowed(ct3d);
1085         return;
1086     }
1087 
1088     p = g_new0(CXLPoison, 1);
1089     p->length = length;
1090     p->start = start;
1091     /* Different from injected via the mbox */
1092     p->type = CXL_POISON_TYPE_INTERNAL;
1093 
1094     QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
1095     ct3d->poison_list_cnt++;
1096 }
1097 
1098 /* For uncorrectable errors include support for multiple header recording */
1099 void qmp_cxl_inject_uncorrectable_errors(const char *path,
1100                                          CXLUncorErrorRecordList *errors,
1101                                          Error **errp)
1102 {
1103     Object *obj = object_resolve_path(path, NULL);
1104     static PCIEAERErr err = {};
1105     CXLType3Dev *ct3d;
1106     CXLError *cxl_err;
1107     uint32_t *reg_state;
1108     uint32_t unc_err;
1109     bool first;
1110 
1111     if (!obj) {
1112         error_setg(errp, "Unable to resolve path");
1113         return;
1114     }
1115 
1116     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1117         error_setg(errp, "Path does not point to a CXL type 3 device");
1118         return;
1119     }
1120 
1121     err.status = PCI_ERR_UNC_INTN;
1122     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1123     err.flags = 0;
1124 
1125     ct3d = CXL_TYPE3(obj);
1126 
1127     first = QTAILQ_EMPTY(&ct3d->error_list);
1128     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1129     while (errors) {
1130         uint32List *header = errors->value->header;
1131         uint8_t header_count = 0;
1132         int cxl_err_code;
1133 
1134         cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
1135         if (cxl_err_code < 0) {
1136             error_setg(errp, "Unknown error code");
1137             return;
1138         }
1139 
1140         /* If the error is masked, nothing to do here */
1141         if (!((1 << cxl_err_code) &
1142               ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
1143             errors = errors->next;
1144             continue;
1145         }
1146 
1147         cxl_err = g_malloc0(sizeof(*cxl_err));
1148         if (!cxl_err) {
1149             return;
1150         }
1151 
1152         cxl_err->type = cxl_err_code;
1153         while (header && header_count < 32) {
1154             cxl_err->header[header_count++] = header->value;
1155             header = header->next;
1156         }
1157         if (header_count > 32) {
1158             error_setg(errp, "Header must be 32 DWORD or less");
1159             return;
1160         }
1161         QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
1162 
1163         errors = errors->next;
1164     }
1165 
1166     if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
1167         uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
1168         uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
1169         uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
1170         int i;
1171 
1172         cxl_err = QTAILQ_FIRST(&ct3d->error_list);
1173         for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
1174             stl_le_p(header_log + i, cxl_err->header[i]);
1175         }
1176 
1177         capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
1178                              FIRST_ERROR_POINTER, cxl_err->type);
1179         stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
1180     }
1181 
1182     unc_err = 0;
1183     QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
1184         unc_err |= (1 << cxl_err->type);
1185     }
1186     if (!unc_err) {
1187         return;
1188     }
1189 
1190     stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
1191     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1192 
1193     return;
1194 }
1195 
1196 void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
1197                                       Error **errp)
1198 {
1199     static PCIEAERErr err = {};
1200     Object *obj = object_resolve_path(path, NULL);
1201     CXLType3Dev *ct3d;
1202     uint32_t *reg_state;
1203     uint32_t cor_err;
1204     int cxl_err_type;
1205 
1206     if (!obj) {
1207         error_setg(errp, "Unable to resolve path");
1208         return;
1209     }
1210     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1211         error_setg(errp, "Path does not point to a CXL type 3 device");
1212         return;
1213     }
1214 
1215     err.status = PCI_ERR_COR_INTERNAL;
1216     err.source_id = pci_requester_id(PCI_DEVICE(obj));
1217     err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
1218 
1219     ct3d = CXL_TYPE3(obj);
1220     reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
1221     cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
1222 
1223     cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
1224     if (cxl_err_type < 0) {
1225         error_setg(errp, "Invalid COR error");
1226         return;
1227     }
1228     /* If the error is masked, nothting to do here */
1229     if (!((1 << cxl_err_type) &
1230           ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
1231         return;
1232     }
1233 
1234     cor_err |= (1 << cxl_err_type);
1235     stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
1236 
1237     pcie_aer_inject_error(PCI_DEVICE(obj), &err);
1238 }
1239 
1240 static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
1241                                     const QemuUUID *uuid, uint32_t flags,
1242                                     uint8_t length, uint64_t timestamp)
1243 {
1244     st24_le_p(&hdr->flags, flags);
1245     hdr->length = length;
1246     memcpy(&hdr->id, uuid, sizeof(hdr->id));
1247     stq_le_p(&hdr->timestamp, timestamp);
1248 }
1249 
1250 static const QemuUUID gen_media_uuid = {
1251     .data = UUID(0xfbcd0a77, 0xc260, 0x417f,
1252                  0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
1253 };
1254 
1255 static const QemuUUID dram_uuid = {
1256     .data = UUID(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf,
1257                  0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
1258 };
1259 
1260 static const QemuUUID memory_module_uuid = {
1261     .data = UUID(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86,
1262                  0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
1263 };
1264 
1265 #define CXL_GMER_VALID_CHANNEL                          BIT(0)
1266 #define CXL_GMER_VALID_RANK                             BIT(1)
1267 #define CXL_GMER_VALID_DEVICE                           BIT(2)
1268 #define CXL_GMER_VALID_COMPONENT                        BIT(3)
1269 
1270 static int ct3d_qmp_cxl_event_log_enc(CxlEventLog log)
1271 {
1272     switch (log) {
1273     case CXL_EVENT_LOG_INFORMATIONAL:
1274         return CXL_EVENT_TYPE_INFO;
1275     case CXL_EVENT_LOG_WARNING:
1276         return CXL_EVENT_TYPE_WARN;
1277     case CXL_EVENT_LOG_FAILURE:
1278         return CXL_EVENT_TYPE_FAIL;
1279     case CXL_EVENT_LOG_FATAL:
1280         return CXL_EVENT_TYPE_FATAL;
1281 /* DCD not yet supported */
1282     default:
1283         return -EINVAL;
1284     }
1285 }
1286 /* Component ID is device specific.  Define this as a string. */
1287 void qmp_cxl_inject_general_media_event(const char *path, CxlEventLog log,
1288                                         uint8_t flags, uint64_t dpa,
1289                                         uint8_t descriptor, uint8_t type,
1290                                         uint8_t transaction_type,
1291                                         bool has_channel, uint8_t channel,
1292                                         bool has_rank, uint8_t rank,
1293                                         bool has_device, uint32_t device,
1294                                         const char *component_id,
1295                                         Error **errp)
1296 {
1297     Object *obj = object_resolve_path(path, NULL);
1298     CXLEventGenMedia gem;
1299     CXLEventRecordHdr *hdr = &gem.hdr;
1300     CXLDeviceState *cxlds;
1301     CXLType3Dev *ct3d;
1302     uint16_t valid_flags = 0;
1303     uint8_t enc_log;
1304     int rc;
1305 
1306     if (!obj) {
1307         error_setg(errp, "Unable to resolve path");
1308         return;
1309     }
1310     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1311         error_setg(errp, "Path does not point to a CXL type 3 device");
1312         return;
1313     }
1314     ct3d = CXL_TYPE3(obj);
1315     cxlds = &ct3d->cxl_dstate;
1316 
1317     rc = ct3d_qmp_cxl_event_log_enc(log);
1318     if (rc < 0) {
1319         error_setg(errp, "Unhandled error log type");
1320         return;
1321     }
1322     enc_log = rc;
1323 
1324     memset(&gem, 0, sizeof(gem));
1325     cxl_assign_event_header(hdr, &gen_media_uuid, flags, sizeof(gem),
1326                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1327 
1328     stq_le_p(&gem.phys_addr, dpa);
1329     gem.descriptor = descriptor;
1330     gem.type = type;
1331     gem.transaction_type = transaction_type;
1332 
1333     if (has_channel) {
1334         gem.channel = channel;
1335         valid_flags |= CXL_GMER_VALID_CHANNEL;
1336     }
1337 
1338     if (has_rank) {
1339         gem.rank = rank;
1340         valid_flags |= CXL_GMER_VALID_RANK;
1341     }
1342 
1343     if (has_device) {
1344         st24_le_p(gem.device, device);
1345         valid_flags |= CXL_GMER_VALID_DEVICE;
1346     }
1347 
1348     if (component_id) {
1349         strncpy((char *)gem.component_id, component_id,
1350                 sizeof(gem.component_id) - 1);
1351         valid_flags |= CXL_GMER_VALID_COMPONENT;
1352     }
1353 
1354     stw_le_p(&gem.validity_flags, valid_flags);
1355 
1356     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&gem)) {
1357         cxl_event_irq_assert(ct3d);
1358     }
1359 }
1360 
1361 #define CXL_DRAM_VALID_CHANNEL                          BIT(0)
1362 #define CXL_DRAM_VALID_RANK                             BIT(1)
1363 #define CXL_DRAM_VALID_NIBBLE_MASK                      BIT(2)
1364 #define CXL_DRAM_VALID_BANK_GROUP                       BIT(3)
1365 #define CXL_DRAM_VALID_BANK                             BIT(4)
1366 #define CXL_DRAM_VALID_ROW                              BIT(5)
1367 #define CXL_DRAM_VALID_COLUMN                           BIT(6)
1368 #define CXL_DRAM_VALID_CORRECTION_MASK                  BIT(7)
1369 
1370 void qmp_cxl_inject_dram_event(const char *path, CxlEventLog log, uint8_t flags,
1371                                uint64_t dpa, uint8_t descriptor,
1372                                uint8_t type, uint8_t transaction_type,
1373                                bool has_channel, uint8_t channel,
1374                                bool has_rank, uint8_t rank,
1375                                bool has_nibble_mask, uint32_t nibble_mask,
1376                                bool has_bank_group, uint8_t bank_group,
1377                                bool has_bank, uint8_t bank,
1378                                bool has_row, uint32_t row,
1379                                bool has_column, uint16_t column,
1380                                bool has_correction_mask,
1381                                uint64List *correction_mask,
1382                                Error **errp)
1383 {
1384     Object *obj = object_resolve_path(path, NULL);
1385     CXLEventDram dram;
1386     CXLEventRecordHdr *hdr = &dram.hdr;
1387     CXLDeviceState *cxlds;
1388     CXLType3Dev *ct3d;
1389     uint16_t valid_flags = 0;
1390     uint8_t enc_log;
1391     int rc;
1392 
1393     if (!obj) {
1394         error_setg(errp, "Unable to resolve path");
1395         return;
1396     }
1397     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1398         error_setg(errp, "Path does not point to a CXL type 3 device");
1399         return;
1400     }
1401     ct3d = CXL_TYPE3(obj);
1402     cxlds = &ct3d->cxl_dstate;
1403 
1404     rc = ct3d_qmp_cxl_event_log_enc(log);
1405     if (rc < 0) {
1406         error_setg(errp, "Unhandled error log type");
1407         return;
1408     }
1409     enc_log = rc;
1410 
1411     memset(&dram, 0, sizeof(dram));
1412     cxl_assign_event_header(hdr, &dram_uuid, flags, sizeof(dram),
1413                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1414     stq_le_p(&dram.phys_addr, dpa);
1415     dram.descriptor = descriptor;
1416     dram.type = type;
1417     dram.transaction_type = transaction_type;
1418 
1419     if (has_channel) {
1420         dram.channel = channel;
1421         valid_flags |= CXL_DRAM_VALID_CHANNEL;
1422     }
1423 
1424     if (has_rank) {
1425         dram.rank = rank;
1426         valid_flags |= CXL_DRAM_VALID_RANK;
1427     }
1428 
1429     if (has_nibble_mask) {
1430         st24_le_p(dram.nibble_mask, nibble_mask);
1431         valid_flags |= CXL_DRAM_VALID_NIBBLE_MASK;
1432     }
1433 
1434     if (has_bank_group) {
1435         dram.bank_group = bank_group;
1436         valid_flags |= CXL_DRAM_VALID_BANK_GROUP;
1437     }
1438 
1439     if (has_bank) {
1440         dram.bank = bank;
1441         valid_flags |= CXL_DRAM_VALID_BANK;
1442     }
1443 
1444     if (has_row) {
1445         st24_le_p(dram.row, row);
1446         valid_flags |= CXL_DRAM_VALID_ROW;
1447     }
1448 
1449     if (has_column) {
1450         stw_le_p(&dram.column, column);
1451         valid_flags |= CXL_DRAM_VALID_COLUMN;
1452     }
1453 
1454     if (has_correction_mask) {
1455         int count = 0;
1456         while (correction_mask && count < 4) {
1457             stq_le_p(&dram.correction_mask[count],
1458                      correction_mask->value);
1459             count++;
1460             correction_mask = correction_mask->next;
1461         }
1462         valid_flags |= CXL_DRAM_VALID_CORRECTION_MASK;
1463     }
1464 
1465     stw_le_p(&dram.validity_flags, valid_flags);
1466 
1467     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&dram)) {
1468         cxl_event_irq_assert(ct3d);
1469     }
1470     return;
1471 }
1472 
1473 void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
1474                                         uint8_t flags, uint8_t type,
1475                                         uint8_t health_status,
1476                                         uint8_t media_status,
1477                                         uint8_t additional_status,
1478                                         uint8_t life_used,
1479                                         int16_t temperature,
1480                                         uint32_t dirty_shutdown_count,
1481                                         uint32_t corrected_volatile_error_count,
1482                                         uint32_t corrected_persist_error_count,
1483                                         Error **errp)
1484 {
1485     Object *obj = object_resolve_path(path, NULL);
1486     CXLEventMemoryModule module;
1487     CXLEventRecordHdr *hdr = &module.hdr;
1488     CXLDeviceState *cxlds;
1489     CXLType3Dev *ct3d;
1490     uint8_t enc_log;
1491     int rc;
1492 
1493     if (!obj) {
1494         error_setg(errp, "Unable to resolve path");
1495         return;
1496     }
1497     if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
1498         error_setg(errp, "Path does not point to a CXL type 3 device");
1499         return;
1500     }
1501     ct3d = CXL_TYPE3(obj);
1502     cxlds = &ct3d->cxl_dstate;
1503 
1504     rc = ct3d_qmp_cxl_event_log_enc(log);
1505     if (rc < 0) {
1506         error_setg(errp, "Unhandled error log type");
1507         return;
1508     }
1509     enc_log = rc;
1510 
1511     memset(&module, 0, sizeof(module));
1512     cxl_assign_event_header(hdr, &memory_module_uuid, flags, sizeof(module),
1513                             cxl_device_get_timestamp(&ct3d->cxl_dstate));
1514 
1515     module.type = type;
1516     module.health_status = health_status;
1517     module.media_status = media_status;
1518     module.additional_status = additional_status;
1519     module.life_used = life_used;
1520     stw_le_p(&module.temperature, temperature);
1521     stl_le_p(&module.dirty_shutdown_count, dirty_shutdown_count);
1522     stl_le_p(&module.corrected_volatile_error_count,
1523              corrected_volatile_error_count);
1524     stl_le_p(&module.corrected_persistent_error_count,
1525              corrected_persist_error_count);
1526 
1527     if (cxl_event_insert(cxlds, enc_log, (CXLEventRecordRaw *)&module)) {
1528         cxl_event_irq_assert(ct3d);
1529     }
1530 }
1531 
1532 static void ct3_class_init(ObjectClass *oc, void *data)
1533 {
1534     DeviceClass *dc = DEVICE_CLASS(oc);
1535     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1536     CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
1537 
1538     pc->realize = ct3_realize;
1539     pc->exit = ct3_exit;
1540     pc->class_id = PCI_CLASS_MEMORY_CXL;
1541     pc->vendor_id = PCI_VENDOR_ID_INTEL;
1542     pc->device_id = 0xd93; /* LVF for now */
1543     pc->revision = 1;
1544 
1545     pc->config_write = ct3d_config_write;
1546     pc->config_read = ct3d_config_read;
1547 
1548     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1549     dc->desc = "CXL Memory Device (Type 3)";
1550     dc->reset = ct3d_reset;
1551     device_class_set_props(dc, ct3_props);
1552 
1553     cvc->get_lsa_size = get_lsa_size;
1554     cvc->get_lsa = get_lsa;
1555     cvc->set_lsa = set_lsa;
1556     cvc->set_cacheline = set_cacheline;
1557 }
1558 
1559 static const TypeInfo ct3d_info = {
1560     .name = TYPE_CXL_TYPE3,
1561     .parent = TYPE_PCI_DEVICE,
1562     .class_size = sizeof(struct CXLType3Class),
1563     .class_init = ct3_class_init,
1564     .instance_size = sizeof(CXLType3Dev),
1565     .interfaces = (InterfaceInfo[]) {
1566         { INTERFACE_CXL_DEVICE },
1567         { INTERFACE_PCIE_DEVICE },
1568         {}
1569     },
1570 };
1571 
1572 static void ct3d_registers(void)
1573 {
1574     type_register_static(&ct3d_info);
1575 }
1576 
1577 type_init(ct3d_registers);
1578