xref: /qemu/hw/cxl/cxl-mailbox-utils.c (revision 21596064081e8d0c0153f68714981c7f0e040973)
1 /*
2  * CXL Utility library for mailbox interface
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include <math.h>
11 
12 #include "qemu/osdep.h"
13 #include "hw/pci/msi.h"
14 #include "hw/pci/msix.h"
15 #include "hw/cxl/cxl.h"
16 #include "hw/cxl/cxl_events.h"
17 #include "hw/cxl/cxl_mailbox.h"
18 #include "hw/pci/pci.h"
19 #include "hw/pci-bridge/cxl_upstream_port.h"
20 #include "qemu/cutils.h"
21 #include "qemu/log.h"
22 #include "qemu/units.h"
23 #include "qemu/uuid.h"
24 #include "system/hostmem.h"
25 #include "qemu/range.h"
26 
27 #define CXL_CAPACITY_MULTIPLIER   (256 * MiB)
28 #define CXL_DC_EVENT_LOG_SIZE 8
29 #define CXL_NUM_EXTENTS_SUPPORTED 512
30 #define CXL_NUM_TAGS_SUPPORTED 0
31 #define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0)
32 #define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1)
33 #define CXL_ALERTS_UNDER_TEMP_WARN_THRESH (1 << 2)
34 #define CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH (1 << 3)
35 #define CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH (1 << 4)
36 
37 /*
38  * How to add a new command, example. The command set FOO, with cmd BAR.
39  *  1. Add the command set and cmd to the enum.
40  *     FOO    = 0x7f,
41  *          #define BAR 0
42  *  2. Implement the handler
43  *    static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
44  *                                  CXLDeviceState *cxl_dstate, uint16_t *len)
45  *  3. Add the command to the cxl_cmd_set[][]
46  *    [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
47  *  4. Implement your handler
48  *     define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
49  *
50  *
51  *  Writing the handler:
52  *    The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
53  *    in/out length of the payload. The handler is responsible for consuming the
54  *    payload from cmd->payload and operating upon it as necessary. It must then
55  *    fill the output data into cmd->payload (overwriting what was there),
56  *    setting the length, and returning a valid return code.
57  *
58  *  XXX: The handler need not worry about endianness. The payload is read out of
59  *  a register interface that already deals with it.
60  */
61 
62 enum {
63     INFOSTAT    = 0x00,
64         #define IS_IDENTIFY   0x1
65         #define BACKGROUND_OPERATION_STATUS    0x2
66         #define GET_RESPONSE_MSG_LIMIT         0x3
67         #define SET_RESPONSE_MSG_LIMIT         0x4
68         #define BACKGROUND_OPERATION_ABORT     0x5
69     EVENTS      = 0x01,
70         #define GET_RECORDS   0x0
71         #define CLEAR_RECORDS   0x1
72         #define GET_INTERRUPT_POLICY   0x2
73         #define SET_INTERRUPT_POLICY   0x3
74     FIRMWARE_UPDATE = 0x02,
75         #define GET_INFO      0x0
76         #define TRANSFER      0x1
77         #define ACTIVATE      0x2
78     TIMESTAMP   = 0x03,
79         #define GET           0x0
80         #define SET           0x1
81     LOGS        = 0x04,
82         #define GET_SUPPORTED 0x0
83         #define GET_LOG       0x1
84     FEATURES    = 0x05,
85         #define GET_SUPPORTED 0x0
86         #define GET_FEATURE   0x1
87         #define SET_FEATURE   0x2
88     IDENTIFY    = 0x40,
89         #define MEMORY_DEVICE 0x0
90     CCLS        = 0x41,
91         #define GET_PARTITION_INFO     0x0
92         #define GET_LSA       0x2
93         #define SET_LSA       0x3
94     HEALTH_INFO_ALERTS = 0x42,
95         #define GET_ALERT_CONFIG 0x1
96         #define SET_ALERT_CONFIG 0x2
97     SANITIZE    = 0x44,
98         #define OVERWRITE     0x0
99         #define SECURE_ERASE  0x1
100         #define MEDIA_OPERATIONS 0x2
101     PERSISTENT_MEM = 0x45,
102         #define GET_SECURITY_STATE     0x0
103     MEDIA_AND_POISON = 0x43,
104         #define GET_POISON_LIST        0x0
105         #define INJECT_POISON          0x1
106         #define CLEAR_POISON           0x2
107         #define GET_SCAN_MEDIA_CAPABILITIES 0x3
108         #define SCAN_MEDIA             0x4
109         #define GET_SCAN_MEDIA_RESULTS 0x5
110     DCD_CONFIG  = 0x48,
111         #define GET_DC_CONFIG          0x0
112         #define GET_DYN_CAP_EXT_LIST   0x1
113         #define ADD_DYN_CAP_RSP        0x2
114         #define RELEASE_DYN_CAP        0x3
115     PHYSICAL_SWITCH = 0x51,
116         #define IDENTIFY_SWITCH_DEVICE      0x0
117         #define GET_PHYSICAL_PORT_STATE     0x1
118     TUNNEL = 0x53,
119         #define MANAGEMENT_COMMAND     0x0
120 };
121 
122 /* CCI Message Format CXL r3.1 Figure 7-19 */
123 typedef struct CXLCCIMessage {
124     uint8_t category;
125 #define CXL_CCI_CAT_REQ 0
126 #define CXL_CCI_CAT_RSP 1
127     uint8_t tag;
128     uint8_t resv1;
129     uint8_t command;
130     uint8_t command_set;
131     uint8_t pl_length[3];
132     uint16_t rc;
133     uint16_t vendor_specific;
134     uint8_t payload[];
135 } QEMU_PACKED CXLCCIMessage;
136 
137 /* This command is only defined to an MLD FM Owned LD or an MHD */
cmd_tunnel_management_cmd(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)138 static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
139                                             uint8_t *payload_in,
140                                             size_t len_in,
141                                             uint8_t *payload_out,
142                                             size_t *len_out,
143                                             CXLCCI *cci)
144 {
145     PCIDevice *tunnel_target;
146     CXLCCI *target_cci;
147     struct {
148         uint8_t port_or_ld_id;
149         uint8_t target_type;
150         uint16_t size;
151         CXLCCIMessage ccimessage;
152     } QEMU_PACKED *in;
153     struct {
154         uint16_t resp_len;
155         uint8_t resv[2];
156         CXLCCIMessage ccimessage;
157     } QEMU_PACKED *out;
158     size_t pl_length, length_out;
159     bool bg_started;
160     int rc;
161 
162     if (cmd->in < sizeof(*in)) {
163         return CXL_MBOX_INVALID_INPUT;
164     }
165     in = (void *)payload_in;
166     out = (void *)payload_out;
167 
168     if (len_in < sizeof(*in)) {
169         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
170     }
171     /* Enough room for minimum sized message - no payload */
172     if (in->size < sizeof(in->ccimessage)) {
173         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
174     }
175     /* Length of input payload should be in->size + a wrapping tunnel header */
176     if (in->size != len_in - offsetof(typeof(*out), ccimessage)) {
177         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
178     }
179     if (in->ccimessage.category != CXL_CCI_CAT_REQ) {
180         return CXL_MBOX_INVALID_INPUT;
181     }
182 
183     if (in->target_type != 0) {
184         qemu_log_mask(LOG_UNIMP,
185                       "Tunneled Command sent to non existent FM-LD");
186         return CXL_MBOX_INVALID_INPUT;
187     }
188 
189     /*
190      * Target of a tunnel unfortunately depends on type of CCI readint
191      * the message.
192      * If in a switch, then it's the port number.
193      * If in an MLD it is the ld number.
194      * If in an MHD target type indicate where we are going.
195      */
196     if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
197         CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
198         if (in->port_or_ld_id != 0) {
199             /* Only pretending to have one for now! */
200             return CXL_MBOX_INVALID_INPUT;
201         }
202         target_cci = &ct3d->ld0_cci;
203     } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
204         CXLUpstreamPort *usp = CXL_USP(cci->d);
205 
206         tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus,
207                                              in->port_or_ld_id);
208         if (!tunnel_target) {
209             return CXL_MBOX_INVALID_INPUT;
210         }
211         tunnel_target =
212             pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0];
213         if (!tunnel_target) {
214             return CXL_MBOX_INVALID_INPUT;
215         }
216         if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) {
217             CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target);
218             /* Tunneled VDMs always land on FM Owned LD */
219             target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci;
220         } else {
221             return CXL_MBOX_INVALID_INPUT;
222         }
223     } else {
224         return CXL_MBOX_INVALID_INPUT;
225     }
226 
227     pl_length = in->ccimessage.pl_length[2] << 16 |
228         in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0];
229     rc = cxl_process_cci_message(target_cci,
230                                  in->ccimessage.command_set,
231                                  in->ccimessage.command,
232                                  pl_length, in->ccimessage.payload,
233                                  &length_out, out->ccimessage.payload,
234                                  &bg_started);
235     /* Payload should be in place. Rest of CCI header and needs filling */
236     out->resp_len = length_out + sizeof(CXLCCIMessage);
237     st24_le_p(out->ccimessage.pl_length, length_out);
238     out->ccimessage.rc = rc;
239     out->ccimessage.category = CXL_CCI_CAT_RSP;
240     out->ccimessage.command = in->ccimessage.command;
241     out->ccimessage.command_set = in->ccimessage.command_set;
242     out->ccimessage.tag = in->ccimessage.tag;
243     *len_out = length_out + sizeof(*out);
244 
245     return CXL_MBOX_SUCCESS;
246 }
247 
cmd_events_get_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)248 static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
249                                          uint8_t *payload_in, size_t len_in,
250                                          uint8_t *payload_out, size_t *len_out,
251                                          CXLCCI *cci)
252 {
253     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
254     CXLGetEventPayload *pl;
255     uint8_t log_type;
256     int max_recs;
257 
258     if (cmd->in < sizeof(log_type)) {
259         return CXL_MBOX_INVALID_INPUT;
260     }
261 
262     log_type = payload_in[0];
263 
264     pl = (CXLGetEventPayload *)payload_out;
265 
266     max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
267                 CXL_EVENT_RECORD_SIZE;
268     if (max_recs > 0xFFFF) {
269         max_recs = 0xFFFF;
270     }
271 
272     return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
273 }
274 
cmd_events_clear_records(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)275 static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
276                                            uint8_t *payload_in,
277                                            size_t len_in,
278                                            uint8_t *payload_out,
279                                            size_t *len_out,
280                                            CXLCCI *cci)
281 {
282     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
283     CXLClearEventPayload *pl;
284 
285     pl = (CXLClearEventPayload *)payload_in;
286 
287     if (len_in < sizeof(*pl) ||
288         len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) {
289         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
290     }
291 
292     *len_out = 0;
293     return cxl_event_clear_records(cxlds, pl);
294 }
295 
cmd_events_get_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)296 static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
297                                                   uint8_t *payload_in,
298                                                   size_t len_in,
299                                                   uint8_t *payload_out,
300                                                   size_t *len_out,
301                                                   CXLCCI *cci)
302 {
303     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
304     CXLEventInterruptPolicy *policy;
305     CXLEventLog *log;
306 
307     policy = (CXLEventInterruptPolicy *)payload_out;
308 
309     log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
310     if (log->irq_enabled) {
311         policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
312     }
313 
314     log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
315     if (log->irq_enabled) {
316         policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
317     }
318 
319     log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
320     if (log->irq_enabled) {
321         policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
322     }
323 
324     log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
325     if (log->irq_enabled) {
326         policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
327     }
328 
329     log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
330     if (log->irq_enabled) {
331         /* Dynamic Capacity borrows the same vector as info */
332         policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
333     }
334 
335     *len_out = sizeof(*policy);
336     return CXL_MBOX_SUCCESS;
337 }
338 
cmd_events_set_interrupt_policy(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)339 static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
340                                                   uint8_t *payload_in,
341                                                   size_t len_in,
342                                                   uint8_t *payload_out,
343                                                   size_t *len_out,
344                                                   CXLCCI *cci)
345 {
346     CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
347     CXLEventInterruptPolicy *policy;
348     CXLEventLog *log;
349 
350     if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
351         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
352     }
353 
354     policy = (CXLEventInterruptPolicy *)payload_in;
355 
356     log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
357     log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
358                         CXL_INT_MSI_MSIX;
359 
360     log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
361     log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
362                         CXL_INT_MSI_MSIX;
363 
364     log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
365     log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
366                         CXL_INT_MSI_MSIX;
367 
368     log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
369     log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
370                         CXL_INT_MSI_MSIX;
371 
372     /* DCD is optional */
373     if (len_in < sizeof(*policy)) {
374         return CXL_MBOX_SUCCESS;
375     }
376 
377     log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
378     log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
379                         CXL_INT_MSI_MSIX;
380 
381     *len_out = 0;
382     return CXL_MBOX_SUCCESS;
383 }
384 
385 /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
cmd_infostat_identify(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)386 static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
387                                         uint8_t *payload_in,
388                                         size_t len_in,
389                                         uint8_t *payload_out,
390                                         size_t *len_out,
391                                         CXLCCI *cci)
392 {
393     PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
394     struct {
395         uint16_t pcie_vid;
396         uint16_t pcie_did;
397         uint16_t pcie_subsys_vid;
398         uint16_t pcie_subsys_id;
399         uint64_t sn;
400         uint8_t max_message_size;
401         uint8_t component_type;
402     } QEMU_PACKED *is_identify;
403     QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
404 
405     is_identify = (void *)payload_out;
406     is_identify->pcie_vid = class->vendor_id;
407     is_identify->pcie_did = class->device_id;
408     if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
409         is_identify->sn = CXL_USP(cci->d)->sn;
410         /* Subsystem info not defined for a USP */
411         is_identify->pcie_subsys_vid = 0;
412         is_identify->pcie_subsys_id = 0;
413         is_identify->component_type = 0x0; /* Switch */
414     } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
415         PCIDevice *pci_dev = PCI_DEVICE(cci->d);
416 
417         is_identify->sn = CXL_TYPE3(cci->d)->sn;
418         /*
419          * We can't always use class->subsystem_vendor_id as
420          * it is not set if the defaults are used.
421          */
422         is_identify->pcie_subsys_vid =
423             pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
424         is_identify->pcie_subsys_id =
425             pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
426         is_identify->component_type = 0x3; /* Type 3 */
427     }
428 
429     is_identify->max_message_size = (uint8_t)log2(cci->payload_max);
430     *len_out = sizeof(*is_identify);
431     return CXL_MBOX_SUCCESS;
432 }
433 
434 /* CXL r3.1 section 8.2.9.1.3: Get Response Message Limit (Opcode 0003h) */
cmd_get_response_msg_limit(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)435 static CXLRetCode cmd_get_response_msg_limit(const struct cxl_cmd *cmd,
436                                              uint8_t *payload_in,
437                                              size_t len_in,
438                                              uint8_t *payload_out,
439                                              size_t *len_out,
440                                              CXLCCI *cci)
441 {
442     struct {
443         uint8_t rsp_limit;
444     } QEMU_PACKED *get_rsp_msg_limit = (void *)payload_out;
445     QEMU_BUILD_BUG_ON(sizeof(*get_rsp_msg_limit) != 1);
446 
447     get_rsp_msg_limit->rsp_limit = (uint8_t)log2(cci->payload_max);
448 
449     *len_out = sizeof(*get_rsp_msg_limit);
450     return CXL_MBOX_SUCCESS;
451 }
452 
453 /* CXL r3.1 section 8.2.9.1.4: Set Response Message Limit (Opcode 0004h) */
cmd_set_response_msg_limit(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)454 static CXLRetCode cmd_set_response_msg_limit(const struct cxl_cmd *cmd,
455                                              uint8_t *payload_in,
456                                              size_t len_in,
457                                              uint8_t *payload_out,
458                                              size_t *len_out,
459                                              CXLCCI *cci)
460 {
461     struct {
462         uint8_t rsp_limit;
463     } QEMU_PACKED *in = (void *)payload_in;
464     QEMU_BUILD_BUG_ON(sizeof(*in) != 1);
465     struct {
466         uint8_t rsp_limit;
467     } QEMU_PACKED *out = (void *)payload_out;
468     QEMU_BUILD_BUG_ON(sizeof(*out) != 1);
469 
470     if (in->rsp_limit < 8 || in->rsp_limit > 10) {
471         return CXL_MBOX_INVALID_INPUT;
472     }
473 
474     cci->payload_max = 1 << in->rsp_limit;
475     out->rsp_limit = in->rsp_limit;
476 
477     *len_out = sizeof(*out);
478     return CXL_MBOX_SUCCESS;
479 }
480 
cxl_set_dsp_active_bm(PCIBus * b,PCIDevice * d,void * private)481 static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
482                                   void *private)
483 {
484     uint8_t *bm = private;
485     if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
486         uint8_t port = PCIE_PORT(d)->port;
487         bm[port / 8] |= 1 << (port % 8);
488     }
489 }
490 
491 /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
cmd_identify_switch_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)492 static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
493                                              uint8_t *payload_in,
494                                              size_t len_in,
495                                              uint8_t *payload_out,
496                                              size_t *len_out,
497                                              CXLCCI *cci)
498 {
499     PCIEPort *usp = PCIE_PORT(cci->d);
500     PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
501     int num_phys_ports = pcie_count_ds_ports(bus);
502 
503     struct cxl_fmapi_ident_switch_dev_resp_pl {
504         uint8_t ingress_port_id;
505         uint8_t rsvd;
506         uint8_t num_physical_ports;
507         uint8_t num_vcss;
508         uint8_t active_port_bitmask[0x20];
509         uint8_t active_vcs_bitmask[0x20];
510         uint16_t total_vppbs;
511         uint16_t bound_vppbs;
512         uint8_t num_hdm_decoders_per_usp;
513     } QEMU_PACKED *out;
514     QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
515 
516     out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
517     *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
518         .num_physical_ports = num_phys_ports + 1, /* 1 USP */
519         .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
520         .active_vcs_bitmask[0] = 0x1,
521         .total_vppbs = num_phys_ports + 1,
522         .bound_vppbs = num_phys_ports + 1,
523         .num_hdm_decoders_per_usp = 4,
524     };
525 
526     /* Depends on the CCI type */
527     if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
528         out->ingress_port_id = PCIE_PORT(cci->intf)->port;
529     } else {
530         /* MCTP? */
531         out->ingress_port_id = 0;
532     }
533 
534     pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
535                                   out->active_port_bitmask);
536     out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
537 
538     *len_out = sizeof(*out);
539 
540     return CXL_MBOX_SUCCESS;
541 }
542 
543 /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
cmd_get_physical_port_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)544 static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
545                                               uint8_t *payload_in,
546                                               size_t len_in,
547                                               uint8_t *payload_out,
548                                               size_t *len_out,
549                                               CXLCCI *cci)
550 {
551     /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
552     struct cxl_fmapi_get_phys_port_state_req_pl {
553         uint8_t num_ports;
554         uint8_t ports[];
555     } QEMU_PACKED *in;
556 
557     /*
558      * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
559      * Format
560      */
561     struct cxl_fmapi_port_state_info_block {
562         uint8_t port_id;
563         uint8_t config_state;
564         uint8_t connected_device_cxl_version;
565         uint8_t rsv1;
566         uint8_t connected_device_type;
567         uint8_t port_cxl_version_bitmask;
568         uint8_t max_link_width;
569         uint8_t negotiated_link_width;
570         uint8_t supported_link_speeds_vector;
571         uint8_t max_link_speed;
572         uint8_t current_link_speed;
573         uint8_t ltssm_state;
574         uint8_t first_lane_num;
575         uint16_t link_state;
576         uint8_t supported_ld_count;
577     } QEMU_PACKED;
578 
579     /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
580     struct cxl_fmapi_get_phys_port_state_resp_pl {
581         uint8_t num_ports;
582         uint8_t rsv1[3];
583         struct cxl_fmapi_port_state_info_block ports[];
584     } QEMU_PACKED *out;
585     PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
586     PCIEPort *usp = PCIE_PORT(cci->d);
587     size_t pl_size;
588     int i;
589 
590     in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
591     out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
592 
593     if (len_in < sizeof(*in)) {
594         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
595     }
596     /* Check if what was requested can fit */
597     if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
598         return CXL_MBOX_INVALID_INPUT;
599     }
600 
601     /* For success there should be a match for each requested */
602     out->num_ports = in->num_ports;
603 
604     for (i = 0; i < in->num_ports; i++) {
605         struct cxl_fmapi_port_state_info_block *port;
606         /* First try to match on downstream port */
607         PCIDevice *port_dev;
608         uint16_t lnkcap, lnkcap2, lnksta;
609 
610         port = &out->ports[i];
611 
612         port_dev = pcie_find_port_by_pn(bus, in->ports[i]);
613         if (port_dev) { /* DSP */
614             PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev))
615                 ->devices[0];
616             port->config_state = 3;
617             if (ds_dev) {
618                 if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) {
619                     port->connected_device_type = 5; /* Assume MLD for now */
620                 } else {
621                     port->connected_device_type = 1;
622                 }
623             } else {
624                 port->connected_device_type = 0;
625             }
626             port->supported_ld_count = 3;
627         } else if (usp->port == in->ports[i]) { /* USP */
628             port_dev = PCI_DEVICE(usp);
629             port->config_state = 4;
630             port->connected_device_type = 0;
631         } else {
632             return CXL_MBOX_INVALID_INPUT;
633         }
634 
635         port->port_id = in->ports[i];
636         /* Information on status of this port in lnksta, lnkcap */
637         if (!port_dev->exp.exp_cap) {
638             return CXL_MBOX_INTERNAL_ERROR;
639         }
640         lnksta = port_dev->config_read(port_dev,
641                                        port_dev->exp.exp_cap + PCI_EXP_LNKSTA,
642                                        sizeof(lnksta));
643         lnkcap = port_dev->config_read(port_dev,
644                                        port_dev->exp.exp_cap + PCI_EXP_LNKCAP,
645                                        sizeof(lnkcap));
646         lnkcap2 = port_dev->config_read(port_dev,
647                                         port_dev->exp.exp_cap + PCI_EXP_LNKCAP2,
648                                         sizeof(lnkcap2));
649 
650         port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
651         port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4;
652         /* No definition for SLS field in linux/pci_regs.h */
653         port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1;
654         port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS;
655         port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS;
656         /* TODO: Track down if we can get the rest of the info */
657         port->ltssm_state = 0x7;
658         port->first_lane_num = 0;
659         port->link_state = 0;
660         port->port_cxl_version_bitmask = 0x2;
661         port->connected_device_cxl_version = 0x2;
662     }
663 
664     pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports;
665     *len_out = pl_size;
666 
667     return CXL_MBOX_SUCCESS;
668 }
669 
670 /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
cmd_infostat_bg_op_sts(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)671 static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
672                                          uint8_t *payload_in,
673                                          size_t len_in,
674                                          uint8_t *payload_out,
675                                          size_t *len_out,
676                                          CXLCCI *cci)
677 {
678     struct {
679         uint8_t status;
680         uint8_t rsvd;
681         uint16_t opcode;
682         uint16_t returncode;
683         uint16_t vendor_ext_status;
684     } QEMU_PACKED *bg_op_status;
685     QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
686 
687     bg_op_status = (void *)payload_out;
688     bg_op_status->status = cci->bg.complete_pct << 1;
689     if (cci->bg.runtime > 0) {
690         bg_op_status->status |= 1U << 0;
691     }
692     bg_op_status->opcode = cci->bg.opcode;
693     bg_op_status->returncode = cci->bg.ret_code;
694     *len_out = sizeof(*bg_op_status);
695 
696     return CXL_MBOX_SUCCESS;
697 }
698 
699 /*
700  * CXL r3.1 Section 8.2.9.1.5:
701  * Request Abort Background Operation (Opcode 0005h)
702  */
cmd_infostat_bg_op_abort(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)703 static CXLRetCode cmd_infostat_bg_op_abort(const struct cxl_cmd *cmd,
704                                            uint8_t *payload_in,
705                                            size_t len_in,
706                                            uint8_t *payload_out,
707                                            size_t *len_out,
708                                            CXLCCI *cci)
709 {
710     int bg_set = cci->bg.opcode >> 8;
711     int bg_cmd = cci->bg.opcode & 0xff;
712     const struct cxl_cmd *bg_c = &cci->cxl_cmd_set[bg_set][bg_cmd];
713 
714     if (!(bg_c->effect & CXL_MBOX_BACKGROUND_OPERATION_ABORT)) {
715         return CXL_MBOX_REQUEST_ABORT_NOTSUP;
716     }
717 
718     qemu_mutex_lock(&cci->bg.lock);
719     if (cci->bg.runtime) {
720         /* operation is near complete, let it finish */
721         if (cci->bg.complete_pct < 85) {
722             timer_del(cci->bg.timer);
723             cci->bg.ret_code = CXL_MBOX_ABORTED;
724             cci->bg.starttime = 0;
725             cci->bg.runtime = 0;
726             cci->bg.aborted = true;
727         }
728     }
729     qemu_mutex_unlock(&cci->bg.lock);
730 
731     return CXL_MBOX_SUCCESS;
732 }
733 
734 #define CXL_FW_SLOTS 2
735 #define CXL_FW_SIZE  0x02000000 /* 32 mb */
736 
737 /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
cmd_firmware_update_get_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)738 static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
739                                                uint8_t *payload_in,
740                                                size_t len,
741                                                uint8_t *payload_out,
742                                                size_t *len_out,
743                                                CXLCCI *cci)
744 {
745     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
746     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
747     struct {
748         uint8_t slots_supported;
749         uint8_t slot_info;
750         uint8_t caps;
751         uint8_t rsvd[0xd];
752         char fw_rev1[0x10];
753         char fw_rev2[0x10];
754         char fw_rev3[0x10];
755         char fw_rev4[0x10];
756     } QEMU_PACKED *fw_info;
757     QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
758 
759     if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) ||
760         !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) ||
761         !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) {
762         return CXL_MBOX_INTERNAL_ERROR;
763     }
764 
765     fw_info = (void *)payload_out;
766 
767     fw_info->slots_supported = CXL_FW_SLOTS;
768     fw_info->slot_info = (cci->fw.active_slot & 0x7) |
769             ((cci->fw.staged_slot & 0x7) << 3);
770     fw_info->caps = BIT(0);  /* online update supported */
771 
772     if (cci->fw.slot[0]) {
773         pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
774     }
775     if (cci->fw.slot[1]) {
776         pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1");
777     }
778 
779     *len_out = sizeof(*fw_info);
780     return CXL_MBOX_SUCCESS;
781 }
782 
783 /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
784 #define CXL_FW_XFER_ALIGNMENT   128
785 
786 #define CXL_FW_XFER_ACTION_FULL     0x0
787 #define CXL_FW_XFER_ACTION_INIT     0x1
788 #define CXL_FW_XFER_ACTION_CONTINUE 0x2
789 #define CXL_FW_XFER_ACTION_END      0x3
790 #define CXL_FW_XFER_ACTION_ABORT    0x4
791 
cmd_firmware_update_transfer(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)792 static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd,
793                                                uint8_t *payload_in,
794                                                size_t len,
795                                                uint8_t *payload_out,
796                                                size_t *len_out,
797                                                CXLCCI *cci)
798 {
799     struct {
800         uint8_t action;
801         uint8_t slot;
802         uint8_t rsvd1[2];
803         uint32_t offset;
804         uint8_t rsvd2[0x78];
805         uint8_t data[];
806     } QEMU_PACKED *fw_transfer = (void *)payload_in;
807     size_t offset, length;
808 
809     if (len < sizeof(*fw_transfer)) {
810         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
811     }
812 
813     if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) {
814         /*
815          * At this point there aren't any on-going transfers
816          * running in the bg - this is serialized before this
817          * call altogether. Just mark the state machine and
818          * disregard any other input.
819          */
820         cci->fw.transferring = false;
821         return CXL_MBOX_SUCCESS;
822     }
823 
824     offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT;
825     length = len - sizeof(*fw_transfer);
826     if (offset + length > CXL_FW_SIZE) {
827         return CXL_MBOX_INVALID_INPUT;
828     }
829 
830     if (cci->fw.transferring) {
831         if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL ||
832             fw_transfer->action == CXL_FW_XFER_ACTION_INIT) {
833             return CXL_MBOX_FW_XFER_IN_PROGRESS;
834         }
835         /*
836          * Abort partitioned package transfer if over 30 secs
837          * between parts. As opposed to the explicit ABORT action,
838          * semantically treat this condition as an error - as
839          * if a part action were passed without a previous INIT.
840          */
841         if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) {
842             cci->fw.transferring = false;
843             return CXL_MBOX_INVALID_INPUT;
844         }
845     } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
846                fw_transfer->action == CXL_FW_XFER_ACTION_END) {
847         return CXL_MBOX_INVALID_INPUT;
848     }
849 
850     /* allow back-to-back retransmission */
851     if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) &&
852         (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
853          fw_transfer->action == CXL_FW_XFER_ACTION_END)) {
854         /* verify no overlaps */
855         if (offset < cci->fw.prev_offset + cci->fw.prev_len) {
856             return CXL_MBOX_FW_XFER_OUT_OF_ORDER;
857         }
858     }
859 
860     switch (fw_transfer->action) {
861     case CXL_FW_XFER_ACTION_FULL: /* ignores offset */
862     case CXL_FW_XFER_ACTION_END:
863         if (fw_transfer->slot == 0 ||
864             fw_transfer->slot == cci->fw.active_slot ||
865             fw_transfer->slot > CXL_FW_SLOTS) {
866             return CXL_MBOX_FW_INVALID_SLOT;
867         }
868 
869         /* mark the slot used upon bg completion */
870         break;
871     case CXL_FW_XFER_ACTION_INIT:
872         if (offset != 0) {
873             return CXL_MBOX_INVALID_INPUT;
874         }
875 
876         cci->fw.transferring = true;
877         cci->fw.prev_offset = offset;
878         cci->fw.prev_len = length;
879         break;
880     case CXL_FW_XFER_ACTION_CONTINUE:
881         cci->fw.prev_offset = offset;
882         cci->fw.prev_len = length;
883         break;
884     default:
885         return CXL_MBOX_INVALID_INPUT;
886     }
887 
888     if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) {
889         cci->bg.runtime = 10 * 1000UL;
890     } else {
891         cci->bg.runtime = 2 * 1000UL;
892     }
893     /* keep relevant context for bg completion */
894     cci->fw.curr_action = fw_transfer->action;
895     cci->fw.curr_slot = fw_transfer->slot;
896     *len_out = 0;
897 
898     return CXL_MBOX_BG_STARTED;
899 }
900 
__do_firmware_xfer(CXLCCI * cci)901 static void __do_firmware_xfer(CXLCCI *cci)
902 {
903     switch (cci->fw.curr_action) {
904     case CXL_FW_XFER_ACTION_FULL:
905     case CXL_FW_XFER_ACTION_END:
906         cci->fw.slot[cci->fw.curr_slot - 1] = true;
907         cci->fw.transferring = false;
908         break;
909     case CXL_FW_XFER_ACTION_INIT:
910     case CXL_FW_XFER_ACTION_CONTINUE:
911         time(&cci->fw.last_partxfer);
912         break;
913     default:
914         break;
915     }
916 }
917 
918 /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
cmd_firmware_update_activate(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)919 static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd,
920                                                uint8_t *payload_in,
921                                                size_t len,
922                                                uint8_t *payload_out,
923                                                size_t *len_out,
924                                                CXLCCI *cci)
925 {
926     struct {
927         uint8_t action;
928         uint8_t slot;
929     } QEMU_PACKED *fw_activate = (void *)payload_in;
930     QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2);
931 
932     if (fw_activate->slot == 0 ||
933         fw_activate->slot == cci->fw.active_slot ||
934         fw_activate->slot > CXL_FW_SLOTS) {
935         return CXL_MBOX_FW_INVALID_SLOT;
936     }
937 
938     /* ensure that an actual fw package is there */
939     if (!cci->fw.slot[fw_activate->slot - 1]) {
940         return CXL_MBOX_FW_INVALID_SLOT;
941     }
942 
943     switch (fw_activate->action) {
944     case 0: /* online */
945         cci->fw.active_slot = fw_activate->slot;
946         break;
947     case 1: /* reset */
948         cci->fw.staged_slot = fw_activate->slot;
949         break;
950     default:
951         return CXL_MBOX_INVALID_INPUT;
952     }
953 
954     return CXL_MBOX_SUCCESS;
955 }
956 
957 /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
cmd_timestamp_get(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)958 static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
959                                     uint8_t *payload_in,
960                                     size_t len_in,
961                                     uint8_t *payload_out,
962                                     size_t *len_out,
963                                     CXLCCI *cci)
964 {
965     CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
966     uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
967 
968     stq_le_p(payload_out, final_time);
969     *len_out = 8;
970 
971     return CXL_MBOX_SUCCESS;
972 }
973 
974 /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
cmd_timestamp_set(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)975 static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
976                                     uint8_t *payload_in,
977                                     size_t len_in,
978                                     uint8_t *payload_out,
979                                     size_t *len_out,
980                                     CXLCCI *cci)
981 {
982     CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
983 
984     cxl_dstate->timestamp.set = true;
985     cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
986 
987     cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
988 
989     *len_out = 0;
990     return CXL_MBOX_SUCCESS;
991 }
992 
993 /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
994 static const QemuUUID cel_uuid = {
995     .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
996                  0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
997 };
998 
999 /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
cmd_logs_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1000 static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
1001                                          uint8_t *payload_in,
1002                                          size_t len_in,
1003                                          uint8_t *payload_out,
1004                                          size_t *len_out,
1005                                          CXLCCI *cci)
1006 {
1007     struct {
1008         uint16_t entries;
1009         uint8_t rsvd[6];
1010         struct {
1011             QemuUUID uuid;
1012             uint32_t size;
1013         } log_entries[1];
1014     } QEMU_PACKED *supported_logs = (void *)payload_out;
1015     QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
1016 
1017     supported_logs->entries = 1;
1018     supported_logs->log_entries[0].uuid = cel_uuid;
1019     supported_logs->log_entries[0].size = 4 * cci->cel_size;
1020 
1021     *len_out = sizeof(*supported_logs);
1022     return CXL_MBOX_SUCCESS;
1023 }
1024 
1025 /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
cmd_logs_get_log(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1026 static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
1027                                    uint8_t *payload_in,
1028                                    size_t len_in,
1029                                    uint8_t *payload_out,
1030                                    size_t *len_out,
1031                                    CXLCCI *cci)
1032 {
1033     struct {
1034         QemuUUID uuid;
1035         uint32_t offset;
1036         uint32_t length;
1037     } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
1038 
1039     get_log = (void *)payload_in;
1040 
1041     if (get_log->length > cci->payload_max) {
1042         return CXL_MBOX_INVALID_INPUT;
1043     }
1044 
1045     if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
1046         return CXL_MBOX_INVALID_LOG;
1047     }
1048 
1049     /*
1050      * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
1051      *   The device shall return Invalid Input if the Offset or Length
1052      *   fields attempt to access beyond the size of the log as reported by Get
1053      *   Supported Log.
1054      *
1055      * Only valid for there to be one entry per opcode, but the length + offset
1056      * may still be greater than that if the inputs are not valid and so access
1057      * beyond the end of cci->cel_log.
1058      */
1059     if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) {
1060         return CXL_MBOX_INVALID_INPUT;
1061     }
1062 
1063     /* Store off everything to local variables so we can wipe out the payload */
1064     *len_out = get_log->length;
1065 
1066     memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
1067 
1068     return CXL_MBOX_SUCCESS;
1069 }
1070 
1071 /* CXL r3.1 section 8.2.9.6: Features */
1072 /*
1073  * Get Supported Features output payload
1074  * CXL r3.1 section 8.2.9.6.1 Table 8-96
1075  */
1076 typedef struct CXLSupportedFeatureHeader {
1077     uint16_t entries;
1078     uint16_t nsuppfeats_dev;
1079     uint32_t reserved;
1080 } QEMU_PACKED CXLSupportedFeatureHeader;
1081 
1082 /*
1083  * Get Supported Features Supported Feature Entry
1084  * CXL r3.1 section 8.2.9.6.1 Table 8-97
1085  */
1086 typedef struct CXLSupportedFeatureEntry {
1087     QemuUUID uuid;
1088     uint16_t feat_index;
1089     uint16_t get_feat_size;
1090     uint16_t set_feat_size;
1091     uint32_t attr_flags;
1092     uint8_t get_feat_version;
1093     uint8_t set_feat_version;
1094     uint16_t set_feat_effects;
1095     uint8_t rsvd[18];
1096 } QEMU_PACKED CXLSupportedFeatureEntry;
1097 
1098 /*
1099  * Get Supported Features Supported Feature Entry
1100  * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
1101  */
1102 /* Supported Feature Entry : attribute flags */
1103 #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
1104 #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
1105 #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
1106 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
1107 #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
1108 
1109 /* Supported Feature Entry : set feature effects */
1110 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
1111 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
1112 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
1113 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
1114 #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
1115 #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
1116 #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
1117 #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
1118 #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
1119 #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
1120 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
1121 #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
1122 
1123 enum CXL_SUPPORTED_FEATURES_LIST {
1124     CXL_FEATURE_PATROL_SCRUB = 0,
1125     CXL_FEATURE_ECS,
1126     CXL_FEATURE_MAX
1127 };
1128 
1129 /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
1130 /*
1131  * Get Feature input payload
1132  * CXL r3.1 section 8.2.9.6.2 Table 8-99
1133  */
1134 /* Get Feature : Payload in selection */
1135 enum CXL_GET_FEATURE_SELECTION {
1136     CXL_GET_FEATURE_SEL_CURRENT_VALUE,
1137     CXL_GET_FEATURE_SEL_DEFAULT_VALUE,
1138     CXL_GET_FEATURE_SEL_SAVED_VALUE,
1139     CXL_GET_FEATURE_SEL_MAX
1140 };
1141 
1142 /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
1143 /*
1144  * Set Feature input payload
1145  * CXL r3.1 section 8.2.9.6.3 Table 8-101
1146  */
1147 typedef struct CXLSetFeatureInHeader {
1148         QemuUUID uuid;
1149         uint32_t flags;
1150         uint16_t offset;
1151         uint8_t version;
1152         uint8_t rsvd[9];
1153 } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader;
1154 
1155 /* Set Feature : Payload in flags */
1156 #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK   0x7
1157 enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
1158     CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER,
1159     CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER,
1160     CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER,
1161     CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER,
1162     CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER,
1163     CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
1164 };
1165 #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
1166 
1167 /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
1168 static const QemuUUID patrol_scrub_uuid = {
1169     .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
1170                  0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
1171 };
1172 
1173 typedef struct CXLMemPatrolScrubSetFeature {
1174         CXLSetFeatureInHeader hdr;
1175         CXLMemPatrolScrubWriteAttrs feat_data;
1176 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
1177 
1178 /*
1179  * CXL r3.1 section 8.2.9.9.11.2:
1180  * DDR5 Error Check Scrub (ECS) Control Feature
1181  */
1182 static const QemuUUID ecs_uuid = {
1183     .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
1184                  0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
1185 };
1186 
1187 typedef struct CXLMemECSSetFeature {
1188         CXLSetFeatureInHeader hdr;
1189         CXLMemECSWriteAttrs feat_data[];
1190 } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature;
1191 
1192 /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
cmd_features_get_supported(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1193 static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
1194                                              uint8_t *payload_in,
1195                                              size_t len_in,
1196                                              uint8_t *payload_out,
1197                                              size_t *len_out,
1198                                              CXLCCI *cci)
1199 {
1200     struct {
1201         uint32_t count;
1202         uint16_t start_index;
1203         uint16_t reserved;
1204     } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in;
1205 
1206     struct {
1207         CXLSupportedFeatureHeader hdr;
1208         CXLSupportedFeatureEntry feat_entries[];
1209     } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out;
1210     uint16_t index, req_entries;
1211     uint16_t entry;
1212 
1213     if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1214         return CXL_MBOX_UNSUPPORTED;
1215     }
1216     if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
1217         get_feats_in->start_index >= CXL_FEATURE_MAX) {
1218         return CXL_MBOX_INVALID_INPUT;
1219     }
1220 
1221     req_entries = (get_feats_in->count -
1222                    sizeof(CXLSupportedFeatureHeader)) /
1223                    sizeof(CXLSupportedFeatureEntry);
1224     req_entries = MIN(req_entries,
1225                       (CXL_FEATURE_MAX - get_feats_in->start_index));
1226 
1227     for (entry = 0, index = get_feats_in->start_index;
1228          entry < req_entries; index++) {
1229         switch (index) {
1230         case  CXL_FEATURE_PATROL_SCRUB:
1231             /* Fill supported feature entry for device patrol scrub control */
1232             get_feats_out->feat_entries[entry++] =
1233                            (struct CXLSupportedFeatureEntry) {
1234                 .uuid = patrol_scrub_uuid,
1235                 .feat_index = index,
1236                 .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs),
1237                 .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs),
1238                 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1239                 .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
1240                 .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
1241                 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1242                                     CXL_FEAT_ENTRY_SFE_CEL_VALID,
1243             };
1244             break;
1245         case  CXL_FEATURE_ECS:
1246             /* Fill supported feature entry for device DDR5 ECS control */
1247             get_feats_out->feat_entries[entry++] =
1248                          (struct CXLSupportedFeatureEntry) {
1249                 .uuid = ecs_uuid,
1250                 .feat_index = index,
1251                 .get_feat_size = sizeof(CXLMemECSReadAttrs),
1252                 .set_feat_size = sizeof(CXLMemECSWriteAttrs),
1253                 .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
1254                 .get_feat_version = CXL_ECS_GET_FEATURE_VERSION,
1255                 .set_feat_version = CXL_ECS_SET_FEATURE_VERSION,
1256                 .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
1257                                     CXL_FEAT_ENTRY_SFE_CEL_VALID,
1258             };
1259             break;
1260         default:
1261             __builtin_unreachable();
1262         }
1263     }
1264     get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX;
1265     get_feats_out->hdr.entries = req_entries;
1266     *len_out = sizeof(CXLSupportedFeatureHeader) +
1267                       req_entries * sizeof(CXLSupportedFeatureEntry);
1268 
1269     return CXL_MBOX_SUCCESS;
1270 }
1271 
1272 /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
cmd_features_get_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1273 static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
1274                                            uint8_t *payload_in,
1275                                            size_t len_in,
1276                                            uint8_t *payload_out,
1277                                            size_t *len_out,
1278                                            CXLCCI *cci)
1279 {
1280     struct {
1281         QemuUUID uuid;
1282         uint16_t offset;
1283         uint16_t count;
1284         uint8_t selection;
1285     } QEMU_PACKED QEMU_ALIGNED(16) * get_feature;
1286     uint16_t bytes_to_copy = 0;
1287     CXLType3Dev *ct3d;
1288     CXLSetFeatureInfo *set_feat_info;
1289 
1290     if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1291         return CXL_MBOX_UNSUPPORTED;
1292     }
1293 
1294     ct3d = CXL_TYPE3(cci->d);
1295     get_feature = (void *)payload_in;
1296 
1297     set_feat_info = &ct3d->set_feat_info;
1298     if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) {
1299         return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1300     }
1301 
1302     if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) {
1303         return CXL_MBOX_UNSUPPORTED;
1304     }
1305     if (get_feature->offset + get_feature->count > cci->payload_max) {
1306         return CXL_MBOX_INVALID_INPUT;
1307     }
1308 
1309     if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
1310         if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) {
1311             return CXL_MBOX_INVALID_INPUT;
1312         }
1313         bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) -
1314                                              get_feature->offset;
1315         bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1316         memcpy(payload_out,
1317                (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset,
1318                bytes_to_copy);
1319     } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) {
1320         if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) {
1321             return CXL_MBOX_INVALID_INPUT;
1322         }
1323         bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset;
1324         bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
1325         memcpy(payload_out,
1326                (uint8_t *)&ct3d->ecs_attrs + get_feature->offset,
1327                bytes_to_copy);
1328     } else {
1329         return CXL_MBOX_UNSUPPORTED;
1330     }
1331 
1332     *len_out = bytes_to_copy;
1333 
1334     return CXL_MBOX_SUCCESS;
1335 }
1336 
1337 /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
cmd_features_set_feature(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1338 static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
1339                                            uint8_t *payload_in,
1340                                            size_t len_in,
1341                                            uint8_t *payload_out,
1342                                            size_t *len_out,
1343                                            CXLCCI *cci)
1344 {
1345     CXLSetFeatureInHeader *hdr = (void *)payload_in;
1346     CXLMemPatrolScrubWriteAttrs *ps_write_attrs;
1347     CXLMemPatrolScrubSetFeature *ps_set_feature;
1348     CXLMemECSWriteAttrs *ecs_write_attrs;
1349     CXLMemECSSetFeature *ecs_set_feature;
1350     CXLSetFeatureInfo *set_feat_info;
1351     uint16_t bytes_to_copy = 0;
1352     uint8_t data_transfer_flag;
1353     CXLType3Dev *ct3d;
1354     uint16_t count;
1355 
1356     if (len_in < sizeof(*hdr)) {
1357         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1358     }
1359 
1360     if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
1361         return CXL_MBOX_UNSUPPORTED;
1362     }
1363     ct3d = CXL_TYPE3(cci->d);
1364     set_feat_info = &ct3d->set_feat_info;
1365 
1366     if (!qemu_uuid_is_null(&set_feat_info->uuid) &&
1367         !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) {
1368         return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
1369     }
1370     if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) {
1371         set_feat_info->data_saved_across_reset = true;
1372     } else {
1373         set_feat_info->data_saved_across_reset = false;
1374     }
1375 
1376     data_transfer_flag =
1377               hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK;
1378     if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) {
1379         set_feat_info->uuid = hdr->uuid;
1380         set_feat_info->data_size = 0;
1381     }
1382     set_feat_info->data_transfer_flag = data_transfer_flag;
1383     set_feat_info->data_offset = hdr->offset;
1384     bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
1385 
1386     if (bytes_to_copy == 0) {
1387         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1388     }
1389 
1390     if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1391         if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
1392             return CXL_MBOX_UNSUPPORTED;
1393         }
1394 
1395         ps_set_feature = (void *)payload_in;
1396         ps_write_attrs = &ps_set_feature->feat_data;
1397 
1398         if ((uint32_t)hdr->offset + bytes_to_copy >
1399             sizeof(ct3d->patrol_scrub_wr_attrs)) {
1400             return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1401         }
1402         memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset,
1403                ps_write_attrs,
1404                bytes_to_copy);
1405         set_feat_info->data_size += bytes_to_copy;
1406 
1407         if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1408             data_transfer_flag ==  CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1409             ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF;
1410             ct3d->patrol_scrub_attrs.scrub_cycle |=
1411                           ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF;
1412             ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1;
1413             ct3d->patrol_scrub_attrs.scrub_flags |=
1414                           ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1;
1415         }
1416     } else if (qemu_uuid_is_equal(&hdr->uuid,
1417                                   &ecs_uuid)) {
1418         if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) {
1419             return CXL_MBOX_UNSUPPORTED;
1420         }
1421 
1422         ecs_set_feature = (void *)payload_in;
1423         ecs_write_attrs = ecs_set_feature->feat_data;
1424 
1425         if ((uint32_t)hdr->offset + bytes_to_copy >
1426             sizeof(ct3d->ecs_wr_attrs)) {
1427             return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1428         }
1429         memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset,
1430                ecs_write_attrs,
1431                bytes_to_copy);
1432         set_feat_info->data_size += bytes_to_copy;
1433 
1434         if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1435             data_transfer_flag ==  CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
1436             ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap;
1437             for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
1438                 ct3d->ecs_attrs.fru_attrs[count].ecs_config =
1439                         ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F;
1440             }
1441         }
1442     } else {
1443         return CXL_MBOX_UNSUPPORTED;
1444     }
1445 
1446     if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
1447         data_transfer_flag ==  CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER ||
1448         data_transfer_flag ==  CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) {
1449         memset(&set_feat_info->uuid, 0, sizeof(QemuUUID));
1450         if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
1451             memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size);
1452         } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) {
1453             memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size);
1454         }
1455         set_feat_info->data_transfer_flag = 0;
1456         set_feat_info->data_saved_across_reset = false;
1457         set_feat_info->data_offset = 0;
1458         set_feat_info->data_size = 0;
1459     }
1460 
1461     return CXL_MBOX_SUCCESS;
1462 }
1463 
1464 /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
cmd_identify_memory_device(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1465 static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
1466                                              uint8_t *payload_in,
1467                                              size_t len_in,
1468                                              uint8_t *payload_out,
1469                                              size_t *len_out,
1470                                              CXLCCI *cci)
1471 {
1472     struct {
1473         char fw_revision[0x10];
1474         uint64_t total_capacity;
1475         uint64_t volatile_capacity;
1476         uint64_t persistent_capacity;
1477         uint64_t partition_align;
1478         uint16_t info_event_log_size;
1479         uint16_t warning_event_log_size;
1480         uint16_t failure_event_log_size;
1481         uint16_t fatal_event_log_size;
1482         uint32_t lsa_size;
1483         uint8_t poison_list_max_mer[3];
1484         uint16_t inject_poison_limit;
1485         uint8_t poison_caps;
1486         uint8_t qos_telemetry_caps;
1487         uint16_t dc_event_log_size;
1488     } QEMU_PACKED *id;
1489     QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45);
1490     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1491     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1492     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
1493 
1494     if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1495         (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1496         (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1497         return CXL_MBOX_INTERNAL_ERROR;
1498     }
1499 
1500     id = (void *)payload_out;
1501 
1502     snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
1503 
1504     stq_le_p(&id->total_capacity,
1505              cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER);
1506     stq_le_p(&id->persistent_capacity,
1507              cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1508     stq_le_p(&id->volatile_capacity,
1509              cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1510     stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
1511     /* 256 poison records */
1512     st24_le_p(id->poison_list_max_mer, 256);
1513     /* No limit - so limited by main poison record limit */
1514     stw_le_p(&id->inject_poison_limit, 0);
1515     stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE);
1516 
1517     *len_out = sizeof(*id);
1518     return CXL_MBOX_SUCCESS;
1519 }
1520 
1521 /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
cmd_ccls_get_partition_info(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1522 static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
1523                                               uint8_t *payload_in,
1524                                               size_t len_in,
1525                                               uint8_t *payload_out,
1526                                               size_t *len_out,
1527                                               CXLCCI *cci)
1528 {
1529     CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
1530     struct {
1531         uint64_t active_vmem;
1532         uint64_t active_pmem;
1533         uint64_t next_vmem;
1534         uint64_t next_pmem;
1535     } QEMU_PACKED *part_info = (void *)payload_out;
1536     QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
1537     CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
1538 
1539     if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1540         (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
1541         (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
1542         return CXL_MBOX_INTERNAL_ERROR;
1543     }
1544 
1545     stq_le_p(&part_info->active_vmem,
1546              cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
1547     /*
1548      * When both next_vmem and next_pmem are 0, there is no pending change to
1549      * partitioning.
1550      */
1551     stq_le_p(&part_info->next_vmem, 0);
1552     stq_le_p(&part_info->active_pmem,
1553              cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
1554     stq_le_p(&part_info->next_pmem, 0);
1555 
1556     *len_out = sizeof(*part_info);
1557     return CXL_MBOX_SUCCESS;
1558 }
1559 
1560 /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
cmd_ccls_get_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1561 static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
1562                                    uint8_t *payload_in,
1563                                    size_t len_in,
1564                                    uint8_t *payload_out,
1565                                    size_t *len_out,
1566                                    CXLCCI *cci)
1567 {
1568     struct {
1569         uint32_t offset;
1570         uint32_t length;
1571     } QEMU_PACKED *get_lsa;
1572     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1573     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1574     uint64_t offset, length;
1575 
1576     get_lsa = (void *)payload_in;
1577     offset = get_lsa->offset;
1578     length = get_lsa->length;
1579 
1580     if (offset + length > cvc->get_lsa_size(ct3d)) {
1581         *len_out = 0;
1582         return CXL_MBOX_INVALID_INPUT;
1583     }
1584 
1585     *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
1586     return CXL_MBOX_SUCCESS;
1587 }
1588 
1589 /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
cmd_ccls_set_lsa(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1590 static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
1591                                    uint8_t *payload_in,
1592                                    size_t len_in,
1593                                    uint8_t *payload_out,
1594                                    size_t *len_out,
1595                                    CXLCCI *cci)
1596 {
1597     struct set_lsa_pl {
1598         uint32_t offset;
1599         uint32_t rsvd;
1600         uint8_t data[];
1601     } QEMU_PACKED;
1602     struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
1603     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1604     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
1605     const size_t hdr_len = offsetof(struct set_lsa_pl, data);
1606 
1607     *len_out = 0;
1608     if (len_in < hdr_len) {
1609         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1610     }
1611 
1612     if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
1613         return CXL_MBOX_INVALID_INPUT;
1614     }
1615     len_in -= hdr_len;
1616 
1617     cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
1618     return CXL_MBOX_SUCCESS;
1619 }
1620 
1621 /* CXL r3.2 Section 8.2.10.9.3.2 Get Alert Configuration (Opcode 4201h) */
cmd_get_alert_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1622 static CXLRetCode cmd_get_alert_config(const struct cxl_cmd *cmd,
1623                                        uint8_t *payload_in,
1624                                        size_t len_in,
1625                                        uint8_t *payload_out,
1626                                        size_t *len_out,
1627                                        CXLCCI *cci)
1628 {
1629     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1630     CXLAlertConfig *out = (CXLAlertConfig *)payload_out;
1631 
1632     memcpy(out, &ct3d->alert_config, sizeof(ct3d->alert_config));
1633     *len_out = sizeof(ct3d->alert_config);
1634 
1635     return CXL_MBOX_SUCCESS;
1636 }
1637 
1638 /* CXL r3.2 Section 8.2.10.9.3.3 Set Alert Configuration (Opcode 4202h) */
cmd_set_alert_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1639 static CXLRetCode cmd_set_alert_config(const struct cxl_cmd *cmd,
1640                                        uint8_t *payload_in,
1641                                        size_t len_in,
1642                                        uint8_t *payload_out,
1643                                        size_t *len_out,
1644                                        CXLCCI *cci)
1645 {
1646     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1647     CXLAlertConfig *alert_config = &ct3d->alert_config;
1648     struct {
1649         uint8_t valid_alert_actions;
1650         uint8_t enable_alert_actions;
1651         uint8_t life_used_warn_thresh;
1652         uint8_t rsvd;
1653         uint16_t over_temp_warn_thresh;
1654         uint16_t under_temp_warn_thresh;
1655         uint16_t cor_vmem_err_warn_thresh;
1656         uint16_t cor_pmem_err_warn_thresh;
1657     } QEMU_PACKED *in = (void *)payload_in;
1658 
1659     if (in->valid_alert_actions & CXL_ALERTS_LIFE_USED_WARN_THRESH) {
1660         /*
1661          * CXL r3.2 Table 8-149 The life used warning threshold shall be
1662          * less than the life used critical alert value.
1663          */
1664         if (in->life_used_warn_thresh >=
1665             alert_config->life_used_crit_alert_thresh) {
1666             return CXL_MBOX_INVALID_INPUT;
1667         }
1668         alert_config->life_used_warn_thresh = in->life_used_warn_thresh;
1669         alert_config->enable_alerts |= CXL_ALERTS_LIFE_USED_WARN_THRESH;
1670     }
1671 
1672     if (in->valid_alert_actions & CXL_ALERTS_OVER_TEMP_WARN_THRESH) {
1673         /*
1674          * CXL r3.2 Table 8-149 The Device Over-Temperature Warning Threshold
1675          * shall be less than the the Device Over-Temperature Critical
1676          * Alert Threshold.
1677          */
1678         if (in->over_temp_warn_thresh >=
1679             alert_config->over_temp_crit_alert_thresh) {
1680             return CXL_MBOX_INVALID_INPUT;
1681         }
1682         alert_config->over_temp_warn_thresh = in->over_temp_warn_thresh;
1683         alert_config->enable_alerts |= CXL_ALERTS_OVER_TEMP_WARN_THRESH;
1684     }
1685 
1686     if (in->valid_alert_actions & CXL_ALERTS_UNDER_TEMP_WARN_THRESH) {
1687         /*
1688          * CXL r3.2 Table 8-149 The Device Under-Temperature Warning Threshold
1689          * shall be higher than the the Device Under-Temperature Critical
1690          * Alert Threshold.
1691          */
1692         if (in->under_temp_warn_thresh <=
1693             alert_config->under_temp_crit_alert_thresh) {
1694             return CXL_MBOX_INVALID_INPUT;
1695         }
1696         alert_config->under_temp_warn_thresh = in->under_temp_warn_thresh;
1697         alert_config->enable_alerts |= CXL_ALERTS_UNDER_TEMP_WARN_THRESH;
1698     }
1699 
1700     if (in->valid_alert_actions & CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH) {
1701         alert_config->cor_vmem_err_warn_thresh = in->cor_vmem_err_warn_thresh;
1702         alert_config->enable_alerts |= CXL_ALERTS_COR_VMEM_ERR_WARN_THRESH;
1703     }
1704 
1705     if (in->valid_alert_actions & CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH) {
1706         alert_config->cor_pmem_err_warn_thresh = in->cor_pmem_err_warn_thresh;
1707         alert_config->enable_alerts |= CXL_ALERTS_COR_PMEM_ERR_WARN_THRESH;
1708     }
1709     return CXL_MBOX_SUCCESS;
1710 }
1711 
1712 /* Perform the actual device zeroing */
__do_sanitization(CXLType3Dev * ct3d)1713 static void __do_sanitization(CXLType3Dev *ct3d)
1714 {
1715     MemoryRegion *mr;
1716 
1717     if (ct3d->hostvmem) {
1718         mr = host_memory_backend_get_memory(ct3d->hostvmem);
1719         if (mr) {
1720             void *hostmem = memory_region_get_ram_ptr(mr);
1721             memset(hostmem, 0, memory_region_size(mr));
1722         }
1723     }
1724 
1725     if (ct3d->hostpmem) {
1726         mr = host_memory_backend_get_memory(ct3d->hostpmem);
1727         if (mr) {
1728             void *hostmem = memory_region_get_ram_ptr(mr);
1729             memset(hostmem, 0, memory_region_size(mr));
1730         }
1731     }
1732     if (ct3d->lsa) {
1733         mr = host_memory_backend_get_memory(ct3d->lsa);
1734         if (mr) {
1735             void *lsa = memory_region_get_ram_ptr(mr);
1736             memset(lsa, 0, memory_region_size(mr));
1737         }
1738     }
1739     cxl_discard_all_event_records(&ct3d->cxl_dstate);
1740 }
1741 
get_sanitize_duration(uint64_t total_mem)1742 static int get_sanitize_duration(uint64_t total_mem)
1743 {
1744     int secs = 0;
1745 
1746     if (total_mem <= 512) {
1747         secs = 4;
1748     } else if (total_mem <= 1024) {
1749         secs = 8;
1750     } else if (total_mem <= 2 * 1024) {
1751         secs = 15;
1752     } else if (total_mem <= 4 * 1024) {
1753         secs = 30;
1754     } else if (total_mem <= 8 * 1024) {
1755         secs = 60;
1756     } else if (total_mem <= 16 * 1024) {
1757         secs = 2 * 60;
1758     } else if (total_mem <= 32 * 1024) {
1759         secs = 4 * 60;
1760     } else if (total_mem <= 64 * 1024) {
1761         secs = 8 * 60;
1762     } else if (total_mem <= 128 * 1024) {
1763         secs = 15 * 60;
1764     } else if (total_mem <= 256 * 1024) {
1765         secs = 30 * 60;
1766     } else if (total_mem <= 512 * 1024) {
1767         secs = 60 * 60;
1768     } else if (total_mem <= 1024 * 1024) {
1769         secs = 120 * 60;
1770     } else {
1771         secs = 240 * 60; /* max 4 hrs */
1772     }
1773 
1774     return secs;
1775 }
1776 
1777 /*
1778  * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
1779  *
1780  * Once the Sanitize command has started successfully, the device shall be
1781  * placed in the media disabled state. If the command fails or is interrupted
1782  * by a reset or power failure, it shall remain in the media disabled state
1783  * until a successful Sanitize command has been completed. During this state:
1784  *
1785  * 1. Memory writes to the device will have no effect, and all memory reads
1786  * will return random values (no user data returned, even for locations that
1787  * the failed Sanitize operation didn’t sanitize yet).
1788  *
1789  * 2. Mailbox commands shall still be processed in the disabled state, except
1790  * that commands that access Sanitized areas shall fail with the Media Disabled
1791  * error code.
1792  */
cmd_sanitize_overwrite(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)1793 static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
1794                                          uint8_t *payload_in,
1795                                          size_t len_in,
1796                                          uint8_t *payload_out,
1797                                          size_t *len_out,
1798                                          CXLCCI *cci)
1799 {
1800     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
1801     uint64_t total_mem; /* in Mb */
1802     int secs;
1803 
1804     total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
1805     secs = get_sanitize_duration(total_mem);
1806 
1807     /* EBUSY other bg cmds as of now */
1808     cci->bg.runtime = secs * 1000UL;
1809     *len_out = 0;
1810 
1811     cxl_dev_disable_media(&ct3d->cxl_dstate);
1812 
1813     /* sanitize when done */
1814     return CXL_MBOX_BG_STARTED;
1815 }
1816 
1817 struct dpa_range_list_entry {
1818     uint64_t starting_dpa;
1819     uint64_t length;
1820 } QEMU_PACKED;
1821 
1822 struct CXLSanitizeInfo {
1823     uint32_t dpa_range_count;
1824     uint8_t fill_value;
1825     struct dpa_range_list_entry dpa_range_list[];
1826 } QEMU_PACKED;
1827 
get_vmr_size(CXLType3Dev * ct3d,MemoryRegion ** vmr)1828 static uint64_t get_vmr_size(CXLType3Dev *ct3d, MemoryRegion **vmr)
1829 {
1830     MemoryRegion *mr;
1831     if (ct3d->hostvmem) {
1832         mr = host_memory_backend_get_memory(ct3d->hostvmem);
1833         if (vmr) {
1834             *vmr = mr;
1835         }
1836         return memory_region_size(mr);
1837     }
1838     return 0;
1839 }
1840 
get_pmr_size(CXLType3Dev * ct3d,MemoryRegion ** pmr)1841 static uint64_t get_pmr_size(CXLType3Dev *ct3d, MemoryRegion **pmr)
1842 {
1843     MemoryRegion *mr;
1844     if (ct3d->hostpmem) {
1845         mr = host_memory_backend_get_memory(ct3d->hostpmem);
1846         if (pmr) {
1847             *pmr = mr;
1848         }
1849         return memory_region_size(mr);
1850     }
1851     return 0;
1852 }
1853 
get_dc_size(CXLType3Dev * ct3d,MemoryRegion ** dc_mr)1854 static uint64_t get_dc_size(CXLType3Dev *ct3d, MemoryRegion **dc_mr)
1855 {
1856     MemoryRegion *mr;
1857     if (ct3d->dc.host_dc) {
1858         mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
1859         if (dc_mr) {
1860             *dc_mr = mr;
1861         }
1862         return memory_region_size(mr);
1863     }
1864     return 0;
1865 }
1866 
validate_dpa_addr(CXLType3Dev * ct3d,uint64_t dpa_addr,size_t length)1867 static int validate_dpa_addr(CXLType3Dev *ct3d, uint64_t dpa_addr,
1868                              size_t length)
1869 {
1870     uint64_t vmr_size, pmr_size, dc_size;
1871 
1872     if ((dpa_addr % CXL_CACHE_LINE_SIZE) ||
1873         (length % CXL_CACHE_LINE_SIZE)  ||
1874         (length <= 0)) {
1875         return -EINVAL;
1876     }
1877 
1878     vmr_size = get_vmr_size(ct3d, NULL);
1879     pmr_size = get_pmr_size(ct3d, NULL);
1880     dc_size = get_dc_size(ct3d, NULL);
1881 
1882     if (dpa_addr + length > vmr_size + pmr_size + dc_size) {
1883         return -EINVAL;
1884     }
1885 
1886     if (dpa_addr > vmr_size + pmr_size) {
1887         if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) {
1888             return -ENODEV;
1889         }
1890     }
1891 
1892     return 0;
1893 }
1894 
sanitize_range(CXLType3Dev * ct3d,uint64_t dpa_addr,size_t length,uint8_t fill_value)1895 static int sanitize_range(CXLType3Dev *ct3d, uint64_t dpa_addr, size_t length,
1896                           uint8_t fill_value)
1897 {
1898 
1899     uint64_t vmr_size, pmr_size;
1900     AddressSpace *as = NULL;
1901     MemTxAttrs mem_attrs = {};
1902 
1903     vmr_size = get_vmr_size(ct3d, NULL);
1904     pmr_size = get_pmr_size(ct3d, NULL);
1905 
1906     if (dpa_addr < vmr_size) {
1907         as = &ct3d->hostvmem_as;
1908     } else if (dpa_addr < vmr_size + pmr_size) {
1909         as = &ct3d->hostpmem_as;
1910     } else {
1911         if (!ct3_test_region_block_backed(ct3d, dpa_addr, length)) {
1912             return -ENODEV;
1913         }
1914         as = &ct3d->dc.host_dc_as;
1915     }
1916 
1917     return address_space_set(as, dpa_addr, fill_value, length, mem_attrs);
1918 }
1919 
1920 /* Perform the actual device zeroing */
__do_sanitize(CXLType3Dev * ct3d)1921 static void __do_sanitize(CXLType3Dev *ct3d)
1922 {
1923     struct CXLSanitizeInfo  *san_info = ct3d->media_op_sanitize;
1924     int dpa_range_count = san_info->dpa_range_count;
1925     int rc = 0;
1926     int i;
1927 
1928     for (i = 0; i < dpa_range_count; i++) {
1929         rc = sanitize_range(ct3d, san_info->dpa_range_list[i].starting_dpa,
1930                             san_info->dpa_range_list[i].length,
1931                             san_info->fill_value);
1932         if (rc) {
1933             goto exit;
1934         }
1935     }
1936 exit:
1937     g_free(ct3d->media_op_sanitize);
1938     ct3d->media_op_sanitize = NULL;
1939     return;
1940 }
1941 
1942 enum {
1943     MEDIA_OP_CLASS_GENERAL  = 0x0,
1944         #define MEDIA_OP_GEN_SUBC_DISCOVERY 0x0
1945     MEDIA_OP_CLASS_SANITIZE = 0x1,
1946         #define MEDIA_OP_SAN_SUBC_SANITIZE 0x0
1947         #define MEDIA_OP_SAN_SUBC_ZERO 0x1
1948 };
1949 
1950 struct media_op_supported_list_entry {
1951     uint8_t media_op_class;
1952     uint8_t media_op_subclass;
1953 };
1954 
1955 struct media_op_discovery_out_pl {
1956     uint64_t dpa_range_granularity;
1957     uint16_t total_supported_operations;
1958     uint16_t num_of_supported_operations;
1959     struct media_op_supported_list_entry entry[];
1960 } QEMU_PACKED;
1961 
1962 static const struct media_op_supported_list_entry media_op_matrix[] = {
1963     { MEDIA_OP_CLASS_GENERAL, MEDIA_OP_GEN_SUBC_DISCOVERY },
1964     { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_SANITIZE },
1965     { MEDIA_OP_CLASS_SANITIZE, MEDIA_OP_SAN_SUBC_ZERO },
1966 };
1967 
media_operations_discovery(uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out)1968 static CXLRetCode media_operations_discovery(uint8_t *payload_in,
1969                                              size_t len_in,
1970                                              uint8_t *payload_out,
1971                                              size_t *len_out)
1972 {
1973     struct {
1974         uint8_t media_operation_class;
1975         uint8_t media_operation_subclass;
1976         uint8_t rsvd[2];
1977         uint32_t dpa_range_count;
1978         struct {
1979             uint16_t start_index;
1980             uint16_t num_ops;
1981         } discovery_osa;
1982     } QEMU_PACKED *media_op_in_disc_pl = (void *)payload_in;
1983     struct media_op_discovery_out_pl *media_out_pl =
1984         (struct media_op_discovery_out_pl *)payload_out;
1985     int num_ops, start_index, i;
1986     int count = 0;
1987 
1988     if (len_in < sizeof(*media_op_in_disc_pl)) {
1989         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
1990     }
1991 
1992     num_ops = media_op_in_disc_pl->discovery_osa.num_ops;
1993     start_index = media_op_in_disc_pl->discovery_osa.start_index;
1994 
1995     /*
1996      * As per spec CXL r3.2 8.2.10.9.5.3 dpa_range_count should be zero and
1997      * start index should not exceed the total number of entries for discovery
1998      * sub class command.
1999      */
2000     if (media_op_in_disc_pl->dpa_range_count ||
2001         start_index > ARRAY_SIZE(media_op_matrix)) {
2002         return CXL_MBOX_INVALID_INPUT;
2003     }
2004 
2005     media_out_pl->dpa_range_granularity = CXL_CACHE_LINE_SIZE;
2006     media_out_pl->total_supported_operations =
2007                                      ARRAY_SIZE(media_op_matrix);
2008     if (num_ops > 0) {
2009         for (i = start_index; i < start_index + num_ops; i++) {
2010             media_out_pl->entry[count].media_op_class =
2011                     media_op_matrix[i].media_op_class;
2012             media_out_pl->entry[count].media_op_subclass =
2013                         media_op_matrix[i].media_op_subclass;
2014             count++;
2015             if (count == num_ops) {
2016                 break;
2017             }
2018         }
2019     }
2020 
2021     media_out_pl->num_of_supported_operations = count;
2022     *len_out = sizeof(*media_out_pl) + count * sizeof(*media_out_pl->entry);
2023     return CXL_MBOX_SUCCESS;
2024 }
2025 
media_operations_sanitize(CXLType3Dev * ct3d,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,uint8_t fill_value,CXLCCI * cci)2026 static CXLRetCode media_operations_sanitize(CXLType3Dev *ct3d,
2027                                             uint8_t *payload_in,
2028                                             size_t len_in,
2029                                             uint8_t *payload_out,
2030                                             size_t *len_out,
2031                                             uint8_t fill_value,
2032                                             CXLCCI *cci)
2033 {
2034     struct media_operations_sanitize {
2035         uint8_t media_operation_class;
2036         uint8_t media_operation_subclass;
2037         uint8_t rsvd[2];
2038         uint32_t dpa_range_count;
2039         struct dpa_range_list_entry dpa_range_list[];
2040     } QEMU_PACKED *media_op_in_sanitize_pl = (void *)payload_in;
2041     uint32_t dpa_range_count = media_op_in_sanitize_pl->dpa_range_count;
2042     uint64_t total_mem = 0;
2043     size_t dpa_range_list_size;
2044     int secs = 0, i;
2045 
2046     if (dpa_range_count == 0) {
2047         return CXL_MBOX_SUCCESS;
2048     }
2049 
2050     dpa_range_list_size = dpa_range_count * sizeof(struct dpa_range_list_entry);
2051     if (len_in < (sizeof(*media_op_in_sanitize_pl) + dpa_range_list_size)) {
2052         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2053     }
2054 
2055     for (i = 0; i < dpa_range_count; i++) {
2056         uint64_t start_dpa =
2057             media_op_in_sanitize_pl->dpa_range_list[i].starting_dpa;
2058         uint64_t length = media_op_in_sanitize_pl->dpa_range_list[i].length;
2059 
2060         if (validate_dpa_addr(ct3d, start_dpa, length)) {
2061             return CXL_MBOX_INVALID_INPUT;
2062         }
2063         total_mem += length;
2064     }
2065     ct3d->media_op_sanitize = g_malloc0(sizeof(struct CXLSanitizeInfo) +
2066                                         dpa_range_list_size);
2067 
2068     ct3d->media_op_sanitize->dpa_range_count = dpa_range_count;
2069     ct3d->media_op_sanitize->fill_value = fill_value;
2070     memcpy(ct3d->media_op_sanitize->dpa_range_list,
2071            media_op_in_sanitize_pl->dpa_range_list,
2072            dpa_range_list_size);
2073     secs = get_sanitize_duration(total_mem >> 20);
2074 
2075     /* EBUSY other bg cmds as of now */
2076     cci->bg.runtime = secs * 1000UL;
2077     *len_out = 0;
2078     /*
2079      * media op sanitize is targeted so no need to disable media or
2080      * clear event logs
2081      */
2082     return CXL_MBOX_BG_STARTED;
2083 }
2084 
cmd_media_operations(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2085 static CXLRetCode cmd_media_operations(const struct cxl_cmd *cmd,
2086                                        uint8_t *payload_in,
2087                                        size_t len_in,
2088                                        uint8_t *payload_out,
2089                                        size_t *len_out,
2090                                        CXLCCI *cci)
2091 {
2092     struct {
2093         uint8_t media_operation_class;
2094         uint8_t media_operation_subclass;
2095         uint8_t rsvd[2];
2096         uint32_t dpa_range_count;
2097     } QEMU_PACKED *media_op_in_common_pl = (void *)payload_in;
2098     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2099     uint8_t media_op_cl = 0;
2100     uint8_t media_op_subclass = 0;
2101 
2102     if (len_in < sizeof(*media_op_in_common_pl)) {
2103         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
2104     }
2105 
2106     media_op_cl = media_op_in_common_pl->media_operation_class;
2107     media_op_subclass = media_op_in_common_pl->media_operation_subclass;
2108 
2109     switch (media_op_cl) {
2110     case MEDIA_OP_CLASS_GENERAL:
2111         if (media_op_subclass != MEDIA_OP_GEN_SUBC_DISCOVERY) {
2112             return CXL_MBOX_UNSUPPORTED;
2113         }
2114 
2115         return media_operations_discovery(payload_in, len_in, payload_out,
2116                                              len_out);
2117     case MEDIA_OP_CLASS_SANITIZE:
2118         switch (media_op_subclass) {
2119         case MEDIA_OP_SAN_SUBC_SANITIZE:
2120             return media_operations_sanitize(ct3d, payload_in, len_in,
2121                                              payload_out, len_out, 0xF,
2122                                              cci);
2123         case MEDIA_OP_SAN_SUBC_ZERO:
2124             return media_operations_sanitize(ct3d, payload_in, len_in,
2125                                              payload_out, len_out, 0,
2126                                              cci);
2127         default:
2128             return CXL_MBOX_UNSUPPORTED;
2129         }
2130     default:
2131         return CXL_MBOX_UNSUPPORTED;
2132     }
2133 }
2134 
cmd_get_security_state(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2135 static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
2136                                          uint8_t *payload_in,
2137                                          size_t len_in,
2138                                          uint8_t *payload_out,
2139                                          size_t *len_out,
2140                                          CXLCCI *cci)
2141 {
2142     uint32_t *state = (uint32_t *)payload_out;
2143 
2144     *state = 0;
2145     *len_out = 4;
2146     return CXL_MBOX_SUCCESS;
2147 }
2148 
2149 /*
2150  * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
2151  *
2152  * This is very inefficient, but good enough for now!
2153  * Also the payload will always fit, so no need to handle the MORE flag and
2154  * make this stateful. We may want to allow longer poison lists to aid
2155  * testing that kernel functionality.
2156  */
cmd_media_get_poison_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2157 static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
2158                                             uint8_t *payload_in,
2159                                             size_t len_in,
2160                                             uint8_t *payload_out,
2161                                             size_t *len_out,
2162                                             CXLCCI *cci)
2163 {
2164     struct get_poison_list_pl {
2165         uint64_t pa;
2166         uint64_t length;
2167     } QEMU_PACKED;
2168 
2169     struct get_poison_list_out_pl {
2170         uint8_t flags;
2171         uint8_t rsvd1;
2172         uint64_t overflow_timestamp;
2173         uint16_t count;
2174         uint8_t rsvd2[0x14];
2175         struct {
2176             uint64_t addr;
2177             uint32_t length;
2178             uint32_t resv;
2179         } QEMU_PACKED records[];
2180     } QEMU_PACKED;
2181 
2182     struct get_poison_list_pl *in = (void *)payload_in;
2183     struct get_poison_list_out_pl *out = (void *)payload_out;
2184     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2185     uint16_t record_count = 0, i = 0;
2186     uint64_t query_start, query_length;
2187     CXLPoisonList *poison_list = &ct3d->poison_list;
2188     CXLPoison *ent;
2189     uint16_t out_pl_len;
2190 
2191     query_start = ldq_le_p(&in->pa);
2192     /* 64 byte alignment required */
2193     if (query_start & 0x3f) {
2194         return CXL_MBOX_INVALID_INPUT;
2195     }
2196     query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
2197 
2198     QLIST_FOREACH(ent, poison_list, node) {
2199         /* Check for no overlap */
2200         if (!ranges_overlap(ent->start, ent->length,
2201                             query_start, query_length)) {
2202             continue;
2203         }
2204         record_count++;
2205     }
2206     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2207     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2208 
2209     QLIST_FOREACH(ent, poison_list, node) {
2210         uint64_t start, stop;
2211 
2212         /* Check for no overlap */
2213         if (!ranges_overlap(ent->start, ent->length,
2214                             query_start, query_length)) {
2215             continue;
2216         }
2217 
2218         /* Deal with overlap */
2219         start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
2220         stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
2221                    query_start + query_length);
2222         stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
2223         stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
2224         i++;
2225     }
2226     if (ct3d->poison_list_overflowed) {
2227         out->flags = (1 << 1);
2228         stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
2229     }
2230     if (scan_media_running(cci)) {
2231         out->flags |= (1 << 2);
2232     }
2233 
2234     stw_le_p(&out->count, record_count);
2235     *len_out = out_pl_len;
2236     return CXL_MBOX_SUCCESS;
2237 }
2238 
2239 /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
cmd_media_inject_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2240 static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
2241                                           uint8_t *payload_in,
2242                                           size_t len_in,
2243                                           uint8_t *payload_out,
2244                                           size_t *len_out,
2245                                           CXLCCI *cci)
2246 {
2247     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2248     CXLPoisonList *poison_list = &ct3d->poison_list;
2249     CXLPoison *ent;
2250     struct inject_poison_pl {
2251         uint64_t dpa;
2252     };
2253     struct inject_poison_pl *in = (void *)payload_in;
2254     uint64_t dpa = ldq_le_p(&in->dpa);
2255     CXLPoison *p;
2256 
2257     QLIST_FOREACH(ent, poison_list, node) {
2258         if (dpa >= ent->start &&
2259             dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
2260             return CXL_MBOX_SUCCESS;
2261         }
2262     }
2263     /*
2264      * Freeze the list if there is an on-going scan media operation.
2265      */
2266     if (scan_media_running(cci)) {
2267         /*
2268          * XXX: Spec is ambiguous - is this case considered
2269          * a successful return despite not adding to the list?
2270          */
2271         goto success;
2272     }
2273 
2274     if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
2275         return CXL_MBOX_INJECT_POISON_LIMIT;
2276     }
2277     p = g_new0(CXLPoison, 1);
2278 
2279     p->length = CXL_CACHE_LINE_SIZE;
2280     p->start = dpa;
2281     p->type = CXL_POISON_TYPE_INJECTED;
2282 
2283     /*
2284      * Possible todo: Merge with existing entry if next to it and if same type
2285      */
2286     QLIST_INSERT_HEAD(poison_list, p, node);
2287     ct3d->poison_list_cnt++;
2288 success:
2289     *len_out = 0;
2290 
2291     return CXL_MBOX_SUCCESS;
2292 }
2293 
2294 /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
cmd_media_clear_poison(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2295 static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
2296                                          uint8_t *payload_in,
2297                                          size_t len_in,
2298                                          uint8_t *payload_out,
2299                                          size_t *len_out,
2300                                          CXLCCI *cci)
2301 {
2302     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2303     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2304     CXLPoisonList *poison_list = &ct3d->poison_list;
2305     CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
2306     struct clear_poison_pl {
2307         uint64_t dpa;
2308         uint8_t data[64];
2309     };
2310     CXLPoison *ent;
2311     uint64_t dpa;
2312 
2313     struct clear_poison_pl *in = (void *)payload_in;
2314 
2315     dpa = ldq_le_p(&in->dpa);
2316     if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size +
2317         ct3d->dc.total_capacity) {
2318         return CXL_MBOX_INVALID_PA;
2319     }
2320 
2321     /* Clearing a region with no poison is not an error so always do so */
2322     if (cvc->set_cacheline) {
2323         if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
2324             return CXL_MBOX_INTERNAL_ERROR;
2325         }
2326     }
2327 
2328     /*
2329      * Freeze the list if there is an on-going scan media operation.
2330      */
2331     if (scan_media_running(cci)) {
2332         /*
2333          * XXX: Spec is ambiguous - is this case considered
2334          * a successful return despite not removing from the list?
2335          */
2336         goto success;
2337     }
2338 
2339     QLIST_FOREACH(ent, poison_list, node) {
2340         /*
2341          * Test for contained in entry. Simpler than general case
2342          * as clearing 64 bytes and entries 64 byte aligned
2343          */
2344         if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
2345             break;
2346         }
2347     }
2348     if (!ent) {
2349         goto success;
2350     }
2351 
2352     QLIST_REMOVE(ent, node);
2353     ct3d->poison_list_cnt--;
2354 
2355     if (dpa > ent->start) {
2356         CXLPoison *frag;
2357         /* Cannot overflow as replacing existing entry */
2358 
2359         frag = g_new0(CXLPoison, 1);
2360 
2361         frag->start = ent->start;
2362         frag->length = dpa - ent->start;
2363         frag->type = ent->type;
2364 
2365         QLIST_INSERT_HEAD(poison_list, frag, node);
2366         ct3d->poison_list_cnt++;
2367     }
2368 
2369     if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
2370         CXLPoison *frag;
2371 
2372         if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
2373             cxl_set_poison_list_overflowed(ct3d);
2374         } else {
2375             frag = g_new0(CXLPoison, 1);
2376 
2377             frag->start = dpa + CXL_CACHE_LINE_SIZE;
2378             frag->length = ent->start + ent->length - frag->start;
2379             frag->type = ent->type;
2380             QLIST_INSERT_HEAD(poison_list, frag, node);
2381             ct3d->poison_list_cnt++;
2382         }
2383     }
2384     /* Any fragments have been added, free original entry */
2385     g_free(ent);
2386 success:
2387     *len_out = 0;
2388 
2389     return CXL_MBOX_SUCCESS;
2390 }
2391 
2392 /*
2393  * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
2394  */
2395 static CXLRetCode
cmd_media_get_scan_media_capabilities(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2396 cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd,
2397                                       uint8_t *payload_in,
2398                                       size_t len_in,
2399                                       uint8_t *payload_out,
2400                                       size_t *len_out,
2401                                       CXLCCI *cci)
2402 {
2403     struct get_scan_media_capabilities_pl {
2404         uint64_t pa;
2405         uint64_t length;
2406     } QEMU_PACKED;
2407 
2408     struct get_scan_media_capabilities_out_pl {
2409         uint32_t estimated_runtime_ms;
2410     };
2411 
2412     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2413     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2414     struct get_scan_media_capabilities_pl *in = (void *)payload_in;
2415     struct get_scan_media_capabilities_out_pl *out = (void *)payload_out;
2416     uint64_t query_start;
2417     uint64_t query_length;
2418 
2419     query_start = ldq_le_p(&in->pa);
2420     /* 64 byte alignment required */
2421     if (query_start & 0x3f) {
2422         return CXL_MBOX_INVALID_INPUT;
2423     }
2424     query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
2425 
2426     if (query_start + query_length > cxl_dstate->static_mem_size) {
2427         return CXL_MBOX_INVALID_PA;
2428     }
2429 
2430     /*
2431      * Just use 400 nanosecond access/read latency + 100 ns for
2432      * the cost of updating the poison list. For small enough
2433      * chunks return at least 1 ms.
2434      */
2435     stl_le_p(&out->estimated_runtime_ms,
2436              MAX(1, query_length * (0.0005L / 64)));
2437 
2438     *len_out = sizeof(*out);
2439     return CXL_MBOX_SUCCESS;
2440 }
2441 
__do_scan_media(CXLType3Dev * ct3d)2442 static void __do_scan_media(CXLType3Dev *ct3d)
2443 {
2444     CXLPoison *ent;
2445     unsigned int results_cnt = 0;
2446 
2447     QLIST_FOREACH(ent, &ct3d->scan_media_results, node) {
2448         results_cnt++;
2449     }
2450 
2451     /* only scan media may clear the overflow */
2452     if (ct3d->poison_list_overflowed &&
2453         ct3d->poison_list_cnt == results_cnt) {
2454         cxl_clear_poison_list_overflowed(ct3d);
2455     }
2456     /* scan media has run since last conventional reset */
2457     ct3d->scan_media_hasrun = true;
2458 }
2459 
2460 /*
2461  * CXL r3.1 section 8.2.9.9.4.5: Scan Media
2462  */
cmd_media_scan_media(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2463 static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd,
2464                                        uint8_t *payload_in,
2465                                        size_t len_in,
2466                                        uint8_t *payload_out,
2467                                        size_t *len_out,
2468                                        CXLCCI *cci)
2469 {
2470     struct scan_media_pl {
2471         uint64_t pa;
2472         uint64_t length;
2473         uint8_t flags;
2474     } QEMU_PACKED;
2475 
2476     struct scan_media_pl *in = (void *)payload_in;
2477     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2478     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
2479     uint64_t query_start;
2480     uint64_t query_length;
2481     CXLPoison *ent, *next;
2482 
2483     query_start = ldq_le_p(&in->pa);
2484     /* 64 byte alignment required */
2485     if (query_start & 0x3f) {
2486         return CXL_MBOX_INVALID_INPUT;
2487     }
2488     query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
2489 
2490     if (query_start + query_length > cxl_dstate->static_mem_size) {
2491         return CXL_MBOX_INVALID_PA;
2492     }
2493     if (ct3d->dc.num_regions && query_start + query_length >=
2494             cxl_dstate->static_mem_size + ct3d->dc.total_capacity) {
2495         return CXL_MBOX_INVALID_PA;
2496     }
2497 
2498     if (in->flags == 0) { /* TODO */
2499         qemu_log_mask(LOG_UNIMP,
2500                       "Scan Media Event Log is unsupported\n");
2501     }
2502 
2503     /* any previous results are discarded upon a new Scan Media */
2504     QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) {
2505         QLIST_REMOVE(ent, node);
2506         g_free(ent);
2507     }
2508 
2509     /* kill the poison list - it will be recreated */
2510     if (ct3d->poison_list_overflowed) {
2511         QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) {
2512             QLIST_REMOVE(ent, node);
2513             g_free(ent);
2514             ct3d->poison_list_cnt--;
2515         }
2516     }
2517 
2518     /*
2519      * Scan the backup list and move corresponding entries
2520      * into the results list, updating the poison list
2521      * when possible.
2522      */
2523     QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) {
2524         CXLPoison *res;
2525 
2526         if (ent->start >= query_start + query_length ||
2527             ent->start + ent->length <= query_start) {
2528             continue;
2529         }
2530 
2531         /*
2532          * If a Get Poison List cmd comes in while this
2533          * scan is being done, it will see the new complete
2534          * list, while setting the respective flag.
2535          */
2536         if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
2537             CXLPoison *p = g_new0(CXLPoison, 1);
2538 
2539             p->start = ent->start;
2540             p->length = ent->length;
2541             p->type = ent->type;
2542             QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
2543             ct3d->poison_list_cnt++;
2544         }
2545 
2546         res = g_new0(CXLPoison, 1);
2547         res->start = ent->start;
2548         res->length = ent->length;
2549         res->type = ent->type;
2550         QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node);
2551 
2552         QLIST_REMOVE(ent, node);
2553         g_free(ent);
2554     }
2555 
2556     cci->bg.runtime = MAX(1, query_length * (0.0005L / 64));
2557     *len_out = 0;
2558 
2559     return CXL_MBOX_BG_STARTED;
2560 }
2561 
2562 /*
2563  * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
2564  */
cmd_media_get_scan_media_results(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2565 static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd,
2566                                                    uint8_t *payload_in,
2567                                                    size_t len_in,
2568                                                    uint8_t *payload_out,
2569                                                    size_t *len_out,
2570                                                    CXLCCI *cci)
2571 {
2572     struct get_scan_media_results_out_pl {
2573         uint64_t dpa_restart;
2574         uint64_t length;
2575         uint8_t flags;
2576         uint8_t rsvd1;
2577         uint16_t count;
2578         uint8_t rsvd2[0xc];
2579         struct {
2580             uint64_t addr;
2581             uint32_t length;
2582             uint32_t resv;
2583         } QEMU_PACKED records[];
2584     } QEMU_PACKED;
2585 
2586     struct get_scan_media_results_out_pl *out = (void *)payload_out;
2587     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2588     CXLPoisonList *scan_media_results = &ct3d->scan_media_results;
2589     CXLPoison *ent, *next;
2590     uint16_t total_count = 0, record_count = 0, i = 0;
2591     uint16_t out_pl_len;
2592 
2593     if (!ct3d->scan_media_hasrun) {
2594         return CXL_MBOX_UNSUPPORTED;
2595     }
2596 
2597     /*
2598      * Calculate limits, all entries are within the same address range of the
2599      * last scan media call.
2600      */
2601     QLIST_FOREACH(ent, scan_media_results, node) {
2602         size_t rec_size = record_count * sizeof(out->records[0]);
2603 
2604         if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) {
2605             record_count++;
2606         }
2607         total_count++;
2608     }
2609 
2610     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2611     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2612 
2613     memset(out, 0, out_pl_len);
2614     QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) {
2615         uint64_t start, stop;
2616 
2617         if (i == record_count) {
2618             break;
2619         }
2620 
2621         start = ROUND_DOWN(ent->start, 64ull);
2622         stop = ROUND_DOWN(ent->start, 64ull) + ent->length;
2623         stq_le_p(&out->records[i].addr, start);
2624         stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
2625         i++;
2626 
2627         /* consume the returning entry */
2628         QLIST_REMOVE(ent, node);
2629         g_free(ent);
2630     }
2631 
2632     stw_le_p(&out->count, record_count);
2633     if (total_count > record_count) {
2634         out->flags = (1 << 0); /* More Media Error Records */
2635     }
2636 
2637     *len_out = out_pl_len;
2638     return CXL_MBOX_SUCCESS;
2639 }
2640 
2641 /*
2642  * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
2643  * (Opcode: 4800h)
2644  */
cmd_dcd_get_dyn_cap_config(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2645 static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd,
2646                                              uint8_t *payload_in,
2647                                              size_t len_in,
2648                                              uint8_t *payload_out,
2649                                              size_t *len_out,
2650                                              CXLCCI *cci)
2651 {
2652     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2653     struct {
2654         uint8_t region_cnt;
2655         uint8_t start_rid;
2656     } QEMU_PACKED *in = (void *)payload_in;
2657     struct {
2658         uint8_t num_regions;
2659         uint8_t regions_returned;
2660         uint8_t rsvd1[6];
2661         struct {
2662             uint64_t base;
2663             uint64_t decode_len;
2664             uint64_t region_len;
2665             uint64_t block_size;
2666             uint32_t dsmadhandle;
2667             uint8_t flags;
2668             uint8_t rsvd2[3];
2669         } QEMU_PACKED records[];
2670     } QEMU_PACKED *out = (void *)payload_out;
2671     struct {
2672         uint32_t num_extents_supported;
2673         uint32_t num_extents_available;
2674         uint32_t num_tags_supported;
2675         uint32_t num_tags_available;
2676     } QEMU_PACKED *extra_out;
2677     uint16_t record_count;
2678     uint16_t i;
2679     uint16_t out_pl_len;
2680     uint8_t start_rid;
2681 
2682     start_rid = in->start_rid;
2683     if (start_rid >= ct3d->dc.num_regions) {
2684         return CXL_MBOX_INVALID_INPUT;
2685     }
2686 
2687     record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
2688 
2689     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2690     extra_out = (void *)(payload_out + out_pl_len);
2691     out_pl_len += sizeof(*extra_out);
2692     assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
2693 
2694     out->num_regions = ct3d->dc.num_regions;
2695     out->regions_returned = record_count;
2696     for (i = 0; i < record_count; i++) {
2697         stq_le_p(&out->records[i].base,
2698                  ct3d->dc.regions[start_rid + i].base);
2699         stq_le_p(&out->records[i].decode_len,
2700                  ct3d->dc.regions[start_rid + i].decode_len /
2701                  CXL_CAPACITY_MULTIPLIER);
2702         stq_le_p(&out->records[i].region_len,
2703                  ct3d->dc.regions[start_rid + i].len);
2704         stq_le_p(&out->records[i].block_size,
2705                  ct3d->dc.regions[start_rid + i].block_size);
2706         stl_le_p(&out->records[i].dsmadhandle,
2707                  ct3d->dc.regions[start_rid + i].dsmadhandle);
2708         out->records[i].flags = ct3d->dc.regions[start_rid + i].flags;
2709     }
2710     /*
2711      * TODO: Assign values once extents and tags are introduced
2712      * to use.
2713      */
2714     stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
2715     stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
2716              ct3d->dc.total_extent_count);
2717     stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
2718     stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
2719 
2720     *len_out = out_pl_len;
2721     return CXL_MBOX_SUCCESS;
2722 }
2723 
2724 /*
2725  * CXL r3.1 section 8.2.9.9.9.2:
2726  * Get Dynamic Capacity Extent List (Opcode 4801h)
2727  */
cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)2728 static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
2729                                                uint8_t *payload_in,
2730                                                size_t len_in,
2731                                                uint8_t *payload_out,
2732                                                size_t *len_out,
2733                                                CXLCCI *cci)
2734 {
2735     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
2736     struct {
2737         uint32_t extent_cnt;
2738         uint32_t start_extent_id;
2739     } QEMU_PACKED *in = (void *)payload_in;
2740     struct {
2741         uint32_t count;
2742         uint32_t total_extents;
2743         uint32_t generation_num;
2744         uint8_t rsvd[4];
2745         CXLDCExtentRaw records[];
2746     } QEMU_PACKED *out = (void *)payload_out;
2747     uint32_t start_extent_id = in->start_extent_id;
2748     CXLDCExtentList *extent_list = &ct3d->dc.extents;
2749     uint16_t record_count = 0, i = 0, record_done = 0;
2750     uint16_t out_pl_len, size;
2751     CXLDCExtent *ent;
2752 
2753     if (start_extent_id > ct3d->dc.total_extent_count) {
2754         return CXL_MBOX_INVALID_INPUT;
2755     }
2756 
2757     record_count = MIN(in->extent_cnt,
2758                        ct3d->dc.total_extent_count - start_extent_id);
2759     size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
2760     record_count = MIN(record_count, size / sizeof(out->records[0]));
2761     out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
2762 
2763     stl_le_p(&out->count, record_count);
2764     stl_le_p(&out->total_extents, ct3d->dc.total_extent_count);
2765     stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq);
2766 
2767     if (record_count > 0) {
2768         CXLDCExtentRaw *out_rec = &out->records[record_done];
2769 
2770         QTAILQ_FOREACH(ent, extent_list, node) {
2771             if (i++ < start_extent_id) {
2772                 continue;
2773             }
2774             stq_le_p(&out_rec->start_dpa, ent->start_dpa);
2775             stq_le_p(&out_rec->len, ent->len);
2776             memcpy(&out_rec->tag, ent->tag, 0x10);
2777             stw_le_p(&out_rec->shared_seq, ent->shared_seq);
2778 
2779             record_done++;
2780             out_rec++;
2781             if (record_done == record_count) {
2782                 break;
2783             }
2784         }
2785     }
2786 
2787     *len_out = out_pl_len;
2788     return CXL_MBOX_SUCCESS;
2789 }
2790 
2791 /*
2792  * Check whether any bit between addr[nr, nr+size) is set,
2793  * return true if any bit is set, otherwise return false
2794  */
test_any_bits_set(const unsigned long * addr,unsigned long nr,unsigned long size)2795 bool test_any_bits_set(const unsigned long *addr, unsigned long nr,
2796                               unsigned long size)
2797 {
2798     unsigned long res = find_next_bit(addr, size + nr, nr);
2799 
2800     return res < nr + size;
2801 }
2802 
cxl_find_dc_region(CXLType3Dev * ct3d,uint64_t dpa,uint64_t len)2803 CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len)
2804 {
2805     int i;
2806     CXLDCRegion *region = &ct3d->dc.regions[0];
2807 
2808     if (dpa < region->base ||
2809         dpa >= region->base + ct3d->dc.total_capacity) {
2810         return NULL;
2811     }
2812 
2813     /*
2814      * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD)
2815      *
2816      * Regions are used in increasing-DPA order, with Region 0 being used for
2817      * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA.
2818      * So check from the last region to find where the dpa belongs. Extents that
2819      * cross multiple regions are not allowed.
2820      */
2821     for (i = ct3d->dc.num_regions - 1; i >= 0; i--) {
2822         region = &ct3d->dc.regions[i];
2823         if (dpa >= region->base) {
2824             if (dpa + len > region->base + region->len) {
2825                 return NULL;
2826             }
2827             return region;
2828         }
2829     }
2830 
2831     return NULL;
2832 }
2833 
cxl_insert_extent_to_extent_list(CXLDCExtentList * list,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2834 void cxl_insert_extent_to_extent_list(CXLDCExtentList *list,
2835                                              uint64_t dpa,
2836                                              uint64_t len,
2837                                              uint8_t *tag,
2838                                              uint16_t shared_seq)
2839 {
2840     CXLDCExtent *extent;
2841 
2842     extent = g_new0(CXLDCExtent, 1);
2843     extent->start_dpa = dpa;
2844     extent->len = len;
2845     if (tag) {
2846         memcpy(extent->tag, tag, 0x10);
2847     }
2848     extent->shared_seq = shared_seq;
2849 
2850     QTAILQ_INSERT_TAIL(list, extent, node);
2851 }
2852 
cxl_remove_extent_from_extent_list(CXLDCExtentList * list,CXLDCExtent * extent)2853 void cxl_remove_extent_from_extent_list(CXLDCExtentList *list,
2854                                         CXLDCExtent *extent)
2855 {
2856     QTAILQ_REMOVE(list, extent, node);
2857     g_free(extent);
2858 }
2859 
2860 /*
2861  * Add a new extent to the extent "group" if group exists;
2862  * otherwise, create a new group
2863  * Return value: the extent group where the extent is inserted.
2864  */
cxl_insert_extent_to_extent_group(CXLDCExtentGroup * group,uint64_t dpa,uint64_t len,uint8_t * tag,uint16_t shared_seq)2865 CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
2866                                                     uint64_t dpa,
2867                                                     uint64_t len,
2868                                                     uint8_t *tag,
2869                                                     uint16_t shared_seq)
2870 {
2871     if (!group) {
2872         group = g_new0(CXLDCExtentGroup, 1);
2873         QTAILQ_INIT(&group->list);
2874     }
2875     cxl_insert_extent_to_extent_list(&group->list, dpa, len,
2876                                      tag, shared_seq);
2877     return group;
2878 }
2879 
cxl_extent_group_list_insert_tail(CXLDCExtentGroupList * list,CXLDCExtentGroup * group)2880 void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
2881                                        CXLDCExtentGroup *group)
2882 {
2883     QTAILQ_INSERT_TAIL(list, group, node);
2884 }
2885 
cxl_extent_group_list_delete_front(CXLDCExtentGroupList * list)2886 void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
2887 {
2888     CXLDCExtent *ent, *ent_next;
2889     CXLDCExtentGroup *group = QTAILQ_FIRST(list);
2890 
2891     QTAILQ_REMOVE(list, group, node);
2892     QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
2893         cxl_remove_extent_from_extent_list(&group->list, ent);
2894     }
2895     g_free(group);
2896 }
2897 
2898 /*
2899  * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload
2900  * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload
2901  */
2902 typedef struct CXLUpdateDCExtentListInPl {
2903     uint32_t num_entries_updated;
2904     uint8_t flags;
2905     uint8_t rsvd[3];
2906     /* CXL r3.1 Table 8-169: Updated Extent */
2907     struct {
2908         uint64_t start_dpa;
2909         uint64_t len;
2910         uint8_t rsvd[8];
2911     } QEMU_PACKED updated_entries[];
2912 } QEMU_PACKED CXLUpdateDCExtentListInPl;
2913 
2914 /*
2915  * For the extents in the extent list to operate, check whether they are valid
2916  * 1. The extent should be in the range of a valid DC region;
2917  * 2. The extent should not cross multiple regions;
2918  * 3. The start DPA and the length of the extent should align with the block
2919  * size of the region;
2920  * 4. The address range of multiple extents in the list should not overlap.
2921  */
cxl_detect_malformed_extent_list(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2922 static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
2923         const CXLUpdateDCExtentListInPl *in)
2924 {
2925     uint64_t min_block_size = UINT64_MAX;
2926     CXLDCRegion *region;
2927     CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1];
2928     g_autofree unsigned long *blk_bitmap = NULL;
2929     uint64_t dpa, len;
2930     uint32_t i;
2931 
2932     for (i = 0; i < ct3d->dc.num_regions; i++) {
2933         region = &ct3d->dc.regions[i];
2934         min_block_size = MIN(min_block_size, region->block_size);
2935     }
2936 
2937     blk_bitmap = bitmap_new((lastregion->base + lastregion->len -
2938                              ct3d->dc.regions[0].base) / min_block_size);
2939 
2940     for (i = 0; i < in->num_entries_updated; i++) {
2941         dpa = in->updated_entries[i].start_dpa;
2942         len = in->updated_entries[i].len;
2943 
2944         region = cxl_find_dc_region(ct3d, dpa, len);
2945         if (!region) {
2946             return CXL_MBOX_INVALID_PA;
2947         }
2948 
2949         dpa -= ct3d->dc.regions[0].base;
2950         if (dpa % region->block_size || len % region->block_size) {
2951             return CXL_MBOX_INVALID_EXTENT_LIST;
2952         }
2953         /* the dpa range already covered by some other extents in the list */
2954         if (test_any_bits_set(blk_bitmap, dpa / min_block_size,
2955             len / min_block_size)) {
2956             return CXL_MBOX_INVALID_EXTENT_LIST;
2957         }
2958         bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size);
2959    }
2960 
2961     return CXL_MBOX_SUCCESS;
2962 }
2963 
cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in)2964 static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
2965         const CXLUpdateDCExtentListInPl *in)
2966 {
2967     uint32_t i;
2968     CXLDCExtent *ent;
2969     CXLDCExtentGroup *ext_group;
2970     uint64_t dpa, len;
2971     Range range1, range2;
2972 
2973     for (i = 0; i < in->num_entries_updated; i++) {
2974         dpa = in->updated_entries[i].start_dpa;
2975         len = in->updated_entries[i].len;
2976 
2977         range_init_nofail(&range1, dpa, len);
2978 
2979         /*
2980          * The host-accepted DPA range must be contained by the first extent
2981          * group in the pending list
2982          */
2983         ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending);
2984         if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) {
2985             return CXL_MBOX_INVALID_PA;
2986         }
2987 
2988         /* to-be-added range should not overlap with range already accepted */
2989         QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
2990             range_init_nofail(&range2, ent->start_dpa, ent->len);
2991             if (range_overlaps_range(&range1, &range2)) {
2992                 return CXL_MBOX_INVALID_PA;
2993             }
2994         }
2995     }
2996     return CXL_MBOX_SUCCESS;
2997 }
2998 
2999 /*
3000  * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h)
3001  * An extent is added to the extent list and becomes usable only after the
3002  * response is processed successfully.
3003  */
cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3004 static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
3005                                           uint8_t *payload_in,
3006                                           size_t len_in,
3007                                           uint8_t *payload_out,
3008                                           size_t *len_out,
3009                                           CXLCCI *cci)
3010 {
3011     CXLUpdateDCExtentListInPl *in = (void *)payload_in;
3012     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3013     CXLDCExtentList *extent_list = &ct3d->dc.extents;
3014     uint32_t i;
3015     uint64_t dpa, len;
3016     CXLRetCode ret;
3017 
3018     if (len_in < sizeof(*in)) {
3019         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3020     }
3021 
3022     if (in->num_entries_updated == 0) {
3023         cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
3024         return CXL_MBOX_SUCCESS;
3025     }
3026 
3027     if (len_in <
3028         sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
3029         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3030     }
3031 
3032     /* Adding extents causes exceeding device's extent tracking ability. */
3033     if (in->num_entries_updated + ct3d->dc.total_extent_count >
3034         CXL_NUM_EXTENTS_SUPPORTED) {
3035         return CXL_MBOX_RESOURCES_EXHAUSTED;
3036     }
3037 
3038     ret = cxl_detect_malformed_extent_list(ct3d, in);
3039     if (ret != CXL_MBOX_SUCCESS) {
3040         return ret;
3041     }
3042 
3043     ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in);
3044     if (ret != CXL_MBOX_SUCCESS) {
3045         return ret;
3046     }
3047 
3048     for (i = 0; i < in->num_entries_updated; i++) {
3049         dpa = in->updated_entries[i].start_dpa;
3050         len = in->updated_entries[i].len;
3051 
3052         cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0);
3053         ct3d->dc.total_extent_count += 1;
3054         ct3_set_region_block_backed(ct3d, dpa, len);
3055     }
3056     /* Remove the first extent group in the pending list */
3057     cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
3058 
3059     return CXL_MBOX_SUCCESS;
3060 }
3061 
3062 /*
3063  * Copy extent list from src to dst
3064  * Return value: number of extents copied
3065  */
copy_extent_list(CXLDCExtentList * dst,const CXLDCExtentList * src)3066 static uint32_t copy_extent_list(CXLDCExtentList *dst,
3067                                  const CXLDCExtentList *src)
3068 {
3069     uint32_t cnt = 0;
3070     CXLDCExtent *ent;
3071 
3072     if (!dst || !src) {
3073         return 0;
3074     }
3075 
3076     QTAILQ_FOREACH(ent, src, node) {
3077         cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len,
3078                                          ent->tag, ent->shared_seq);
3079         cnt++;
3080     }
3081     return cnt;
3082 }
3083 
cxl_dc_extent_release_dry_run(CXLType3Dev * ct3d,const CXLUpdateDCExtentListInPl * in,CXLDCExtentList * updated_list,uint32_t * updated_list_size)3084 static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d,
3085         const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list,
3086         uint32_t *updated_list_size)
3087 {
3088     CXLDCExtent *ent, *ent_next;
3089     uint64_t dpa, len;
3090     uint32_t i;
3091     int cnt_delta = 0;
3092     CXLRetCode ret = CXL_MBOX_SUCCESS;
3093 
3094     QTAILQ_INIT(updated_list);
3095     copy_extent_list(updated_list, &ct3d->dc.extents);
3096 
3097     for (i = 0; i < in->num_entries_updated; i++) {
3098         Range range;
3099 
3100         dpa = in->updated_entries[i].start_dpa;
3101         len = in->updated_entries[i].len;
3102 
3103         /* Check if the DPA range is not fully backed with valid extents */
3104         if (!ct3_test_region_block_backed(ct3d, dpa, len)) {
3105             ret = CXL_MBOX_INVALID_PA;
3106             goto free_and_exit;
3107         }
3108 
3109         /* After this point, extent overflow is the only error can happen */
3110         while (len > 0) {
3111             QTAILQ_FOREACH(ent, updated_list, node) {
3112                 range_init_nofail(&range, ent->start_dpa, ent->len);
3113 
3114                 if (range_contains(&range, dpa)) {
3115                     uint64_t len1, len2 = 0, len_done = 0;
3116                     uint64_t ent_start_dpa = ent->start_dpa;
3117                     uint64_t ent_len = ent->len;
3118 
3119                     len1 = dpa - ent->start_dpa;
3120                     /* Found the extent or the subset of an existing extent */
3121                     if (range_contains(&range, dpa + len - 1)) {
3122                         len2 = ent_start_dpa + ent_len - dpa - len;
3123                     } else {
3124                         dpa = ent_start_dpa + ent_len;
3125                     }
3126                     len_done = ent_len - len1 - len2;
3127 
3128                     cxl_remove_extent_from_extent_list(updated_list, ent);
3129                     cnt_delta--;
3130 
3131                     if (len1) {
3132                         cxl_insert_extent_to_extent_list(updated_list,
3133                                                          ent_start_dpa,
3134                                                          len1, NULL, 0);
3135                         cnt_delta++;
3136                     }
3137                     if (len2) {
3138                         cxl_insert_extent_to_extent_list(updated_list,
3139                                                          dpa + len,
3140                                                          len2, NULL, 0);
3141                         cnt_delta++;
3142                     }
3143 
3144                     if (cnt_delta + ct3d->dc.total_extent_count >
3145                             CXL_NUM_EXTENTS_SUPPORTED) {
3146                         ret = CXL_MBOX_RESOURCES_EXHAUSTED;
3147                         goto free_and_exit;
3148                     }
3149 
3150                     len -= len_done;
3151                     break;
3152                 }
3153             }
3154         }
3155     }
3156 free_and_exit:
3157     if (ret != CXL_MBOX_SUCCESS) {
3158         QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) {
3159             cxl_remove_extent_from_extent_list(updated_list, ent);
3160         }
3161         *updated_list_size = 0;
3162     } else {
3163         *updated_list_size = ct3d->dc.total_extent_count + cnt_delta;
3164     }
3165 
3166     return ret;
3167 }
3168 
3169 /*
3170  * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h)
3171  */
cmd_dcd_release_dyn_cap(const struct cxl_cmd * cmd,uint8_t * payload_in,size_t len_in,uint8_t * payload_out,size_t * len_out,CXLCCI * cci)3172 static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
3173                                           uint8_t *payload_in,
3174                                           size_t len_in,
3175                                           uint8_t *payload_out,
3176                                           size_t *len_out,
3177                                           CXLCCI *cci)
3178 {
3179     CXLUpdateDCExtentListInPl *in = (void *)payload_in;
3180     CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3181     CXLDCExtentList updated_list;
3182     CXLDCExtent *ent, *ent_next;
3183     uint32_t updated_list_size;
3184     CXLRetCode ret;
3185 
3186     if (len_in < sizeof(*in)) {
3187         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3188     }
3189 
3190     if (in->num_entries_updated == 0) {
3191         return CXL_MBOX_INVALID_INPUT;
3192     }
3193 
3194     if (len_in <
3195         sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
3196         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3197     }
3198 
3199     ret = cxl_detect_malformed_extent_list(ct3d, in);
3200     if (ret != CXL_MBOX_SUCCESS) {
3201         return ret;
3202     }
3203 
3204     ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list,
3205                                         &updated_list_size);
3206     if (ret != CXL_MBOX_SUCCESS) {
3207         return ret;
3208     }
3209 
3210     /*
3211      * If the dry run release passes, the returned updated_list will
3212      * be the updated extent list and we just need to clear the extents
3213      * in the accepted list and copy extents in the updated_list to accepted
3214      * list and update the extent count;
3215      */
3216     QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
3217         ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len);
3218         cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
3219     }
3220     copy_extent_list(&ct3d->dc.extents, &updated_list);
3221     QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) {
3222         ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len);
3223         cxl_remove_extent_from_extent_list(&updated_list, ent);
3224     }
3225     ct3d->dc.total_extent_count = updated_list_size;
3226 
3227     return CXL_MBOX_SUCCESS;
3228 }
3229 
3230 static const struct cxl_cmd cxl_cmd_set[256][256] = {
3231     [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
3232         cmd_infostat_bg_op_abort, 0, 0 },
3233     [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
3234         cmd_events_get_records, 1, 0 },
3235     [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
3236         cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE },
3237     [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
3238                                       cmd_events_get_interrupt_policy, 0, 0 },
3239     [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
3240                                       cmd_events_set_interrupt_policy,
3241                                       ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE },
3242     [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
3243         cmd_firmware_update_get_info, 0, 0 },
3244     [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER",
3245         cmd_firmware_update_transfer, ~0,
3246         CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT },
3247     [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE",
3248         cmd_firmware_update_activate, 2,
3249         CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT },
3250     [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3251     [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
3252                          8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
3253     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
3254                               0, 0 },
3255     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3256     [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED",
3257                                   cmd_features_get_supported, 0x8, 0 },
3258     [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE",
3259                                 cmd_features_get_feature, 0x15, 0 },
3260     [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE",
3261                                 cmd_features_set_feature,
3262                                 ~0,
3263                                 (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
3264                                  CXL_MBOX_IMMEDIATE_DATA_CHANGE |
3265                                  CXL_MBOX_IMMEDIATE_POLICY_CHANGE |
3266                                  CXL_MBOX_IMMEDIATE_LOG_CHANGE |
3267                                  CXL_MBOX_SECURITY_STATE_CHANGE)},
3268     [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
3269         cmd_identify_memory_device, 0, 0 },
3270     [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
3271         cmd_ccls_get_partition_info, 0, 0 },
3272     [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
3273     [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
3274         ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE },
3275     [HEALTH_INFO_ALERTS][GET_ALERT_CONFIG] = {
3276         "HEALTH_INFO_ALERTS_GET_ALERT_CONFIG",
3277         cmd_get_alert_config, 0, 0 },
3278     [HEALTH_INFO_ALERTS][SET_ALERT_CONFIG] = {
3279         "HEALTH_INFO_ALERTS_SET_ALERT_CONFIG",
3280         cmd_set_alert_config, 12, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
3281     [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
3282         (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
3283          CXL_MBOX_SECURITY_STATE_CHANGE |
3284          CXL_MBOX_BACKGROUND_OPERATION |
3285          CXL_MBOX_BACKGROUND_OPERATION_ABORT)},
3286     [SANITIZE][MEDIA_OPERATIONS] = { "MEDIA_OPERATIONS", cmd_media_operations,
3287         ~0,
3288         (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
3289          CXL_MBOX_BACKGROUND_OPERATION)},
3290     [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
3291         cmd_get_security_state, 0, 0 },
3292     [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
3293         cmd_media_get_poison_list, 16, 0 },
3294     [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
3295         cmd_media_inject_poison, 8, 0 },
3296     [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
3297         cmd_media_clear_poison, 72, 0 },
3298     [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = {
3299         "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
3300         cmd_media_get_scan_media_capabilities, 16, 0 },
3301     [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA",
3302         cmd_media_scan_media, 17,
3303         (CXL_MBOX_BACKGROUND_OPERATION | CXL_MBOX_BACKGROUND_OPERATION_ABORT)},
3304     [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = {
3305         "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
3306         cmd_media_get_scan_media_results, 0, 0 },
3307 };
3308 
3309 static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
3310     [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG",
3311         cmd_dcd_get_dyn_cap_config, 2, 0 },
3312     [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = {
3313         "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list,
3314         8, 0 },
3315     [DCD_CONFIG][ADD_DYN_CAP_RSP] = {
3316         "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp,
3317         ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
3318     [DCD_CONFIG][RELEASE_DYN_CAP] = {
3319         "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap,
3320         ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
3321 };
3322 
3323 static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
3324     [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
3325     [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
3326         cmd_infostat_bg_op_sts, 0, 0 },
3327     [INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
3328         cmd_infostat_bg_op_abort, 0, 0 },
3329     [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3330     [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8,
3331                          CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
3332     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3333                               0 },
3334     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3335     [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
3336         cmd_identify_switch_device, 0, 0 },
3337     [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS",
3338         cmd_get_physical_port_state, ~0, 0 },
3339     [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
3340                                      cmd_tunnel_management_cmd, ~0, 0 },
3341 };
3342 
3343 /*
3344  * While the command is executing in the background, the device should
3345  * update the percentage complete in the Background Command Status Register
3346  * at least once per second.
3347  */
3348 
3349 #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
3350 
cxl_process_cci_message(CXLCCI * cci,uint8_t set,uint8_t cmd,size_t len_in,uint8_t * pl_in,size_t * len_out,uint8_t * pl_out,bool * bg_started)3351 int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
3352                             size_t len_in, uint8_t *pl_in, size_t *len_out,
3353                             uint8_t *pl_out, bool *bg_started)
3354 {
3355     int ret;
3356     const struct cxl_cmd *cxl_cmd;
3357     opcode_handler h;
3358     CXLDeviceState *cxl_dstate;
3359 
3360     *len_out = 0;
3361     cxl_cmd = &cci->cxl_cmd_set[set][cmd];
3362     h = cxl_cmd->handler;
3363     if (!h) {
3364         qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
3365                       set << 8 | cmd);
3366         return CXL_MBOX_UNSUPPORTED;
3367     }
3368 
3369     if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
3370         return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
3371     }
3372 
3373     /* Only one bg command at a time */
3374     if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
3375         cci->bg.runtime > 0) {
3376         return CXL_MBOX_BUSY;
3377     }
3378 
3379     /* forbid any selected commands while the media is disabled */
3380     if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
3381         cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
3382 
3383         if (cxl_dev_media_disabled(cxl_dstate)) {
3384             if (h == cmd_events_get_records ||
3385                 h == cmd_ccls_get_partition_info ||
3386                 h == cmd_ccls_set_lsa ||
3387                 h == cmd_ccls_get_lsa ||
3388                 h == cmd_logs_get_log ||
3389                 h == cmd_media_get_poison_list ||
3390                 h == cmd_media_inject_poison ||
3391                 h == cmd_media_clear_poison ||
3392                 h == cmd_sanitize_overwrite ||
3393                 h == cmd_firmware_update_transfer ||
3394                 h == cmd_firmware_update_activate) {
3395                 return CXL_MBOX_MEDIA_DISABLED;
3396             }
3397         }
3398     }
3399 
3400     ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
3401     if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
3402         ret == CXL_MBOX_BG_STARTED) {
3403         *bg_started = true;
3404     } else {
3405         *bg_started = false;
3406     }
3407 
3408     /* Set bg and the return code */
3409     if (*bg_started) {
3410         uint64_t now;
3411 
3412         cci->bg.opcode = (set << 8) | cmd;
3413 
3414         cci->bg.complete_pct = 0;
3415         cci->bg.aborted = false;
3416         cci->bg.ret_code = 0;
3417 
3418         now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
3419         cci->bg.starttime = now;
3420         timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
3421     }
3422 
3423     return ret;
3424 }
3425 
bg_timercb(void * opaque)3426 static void bg_timercb(void *opaque)
3427 {
3428     CXLCCI *cci = opaque;
3429     uint64_t now, total_time;
3430 
3431     qemu_mutex_lock(&cci->bg.lock);
3432 
3433     now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
3434     total_time = cci->bg.starttime + cci->bg.runtime;
3435 
3436     if (now >= total_time) { /* we are done */
3437         uint16_t ret = CXL_MBOX_SUCCESS;
3438 
3439         cci->bg.complete_pct = 100;
3440         cci->bg.ret_code = ret;
3441         switch (cci->bg.opcode) {
3442         case 0x0201: /* fw transfer */
3443             __do_firmware_xfer(cci);
3444             break;
3445         case 0x4400: /* sanitize */
3446         {
3447             CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3448 
3449             __do_sanitization(ct3d);
3450             cxl_dev_enable_media(&ct3d->cxl_dstate);
3451         }
3452         break;
3453         case 0x4402: /* Media Operations sanitize */
3454         {
3455             CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3456             __do_sanitize(ct3d);
3457         }
3458         break;
3459         case 0x4304: /* scan media */
3460         {
3461             CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3462 
3463             __do_scan_media(ct3d);
3464             break;
3465         }
3466         default:
3467             __builtin_unreachable();
3468             break;
3469         }
3470     } else {
3471         /* estimate only */
3472         cci->bg.complete_pct =
3473             100 * (now - cci->bg.starttime) / cci->bg.runtime;
3474         timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
3475     }
3476 
3477     if (cci->bg.complete_pct == 100) {
3478         /* TODO: generalize to switch CCI */
3479         CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
3480         CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
3481         PCIDevice *pdev = PCI_DEVICE(cci->d);
3482 
3483         cci->bg.starttime = 0;
3484         /* registers are updated, allow new bg-capable cmds */
3485         cci->bg.runtime = 0;
3486 
3487         if (msix_enabled(pdev)) {
3488             msix_notify(pdev, cxl_dstate->mbox_msi_n);
3489         } else if (msi_enabled(pdev)) {
3490             msi_notify(pdev, cxl_dstate->mbox_msi_n);
3491         }
3492     }
3493 
3494     qemu_mutex_unlock(&cci->bg.lock);
3495 }
3496 
cxl_rebuild_cel(CXLCCI * cci)3497 static void cxl_rebuild_cel(CXLCCI *cci)
3498 {
3499     cci->cel_size = 0; /* Reset for a fresh build */
3500     for (int set = 0; set < 256; set++) {
3501         for (int cmd = 0; cmd < 256; cmd++) {
3502             if (cci->cxl_cmd_set[set][cmd].handler) {
3503                 const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
3504                 struct cel_log *log =
3505                     &cci->cel_log[cci->cel_size];
3506 
3507                 log->opcode = (set << 8) | cmd;
3508                 log->effect = c->effect;
3509                 cci->cel_size++;
3510             }
3511         }
3512     }
3513 }
3514 
cxl_init_cci(CXLCCI * cci,size_t payload_max)3515 void cxl_init_cci(CXLCCI *cci, size_t payload_max)
3516 {
3517     cci->payload_max = payload_max;
3518     cxl_rebuild_cel(cci);
3519 
3520     cci->bg.complete_pct = 0;
3521     cci->bg.starttime = 0;
3522     cci->bg.runtime = 0;
3523     cci->bg.aborted = false;
3524     cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
3525                                  bg_timercb, cci);
3526     qemu_mutex_init(&cci->bg.lock);
3527 
3528     memset(&cci->fw, 0, sizeof(cci->fw));
3529     cci->fw.active_slot = 1;
3530     cci->fw.slot[cci->fw.active_slot - 1] = true;
3531     cci->initialized = true;
3532 }
3533 
cxl_destroy_cci(CXLCCI * cci)3534 void cxl_destroy_cci(CXLCCI *cci)
3535 {
3536     qemu_mutex_destroy(&cci->bg.lock);
3537     cci->initialized = false;
3538 }
3539 
cxl_copy_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmds)[256])3540 static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256])
3541 {
3542     for (int set = 0; set < 256; set++) {
3543         for (int cmd = 0; cmd < 256; cmd++) {
3544             if (cxl_cmds[set][cmd].handler) {
3545                 cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd];
3546             }
3547         }
3548     }
3549 }
3550 
cxl_add_cci_commands(CXLCCI * cci,const struct cxl_cmd (* cxl_cmd_set)[256],size_t payload_max)3551 void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
3552                                  size_t payload_max)
3553 {
3554     cci->payload_max = MAX(payload_max, cci->payload_max);
3555     cxl_copy_cci_commands(cci, cxl_cmd_set);
3556     cxl_rebuild_cel(cci);
3557 }
3558 
cxl_initialize_mailbox_swcci(CXLCCI * cci,DeviceState * intf,DeviceState * d,size_t payload_max)3559 void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
3560                                   DeviceState *d, size_t payload_max)
3561 {
3562     cxl_copy_cci_commands(cci, cxl_cmd_set_sw);
3563     cci->d = d;
3564     cci->intf = intf;
3565     cxl_init_cci(cci, payload_max);
3566 }
3567 
cxl_initialize_mailbox_t3(CXLCCI * cci,DeviceState * d,size_t payload_max)3568 void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
3569 {
3570     CXLType3Dev *ct3d = CXL_TYPE3(d);
3571 
3572     cxl_copy_cci_commands(cci, cxl_cmd_set);
3573     if (ct3d->dc.num_regions) {
3574         cxl_copy_cci_commands(cci, cxl_cmd_set_dcd);
3575     }
3576     cci->d = d;
3577 
3578     /* No separation for PCI MB as protocol handled in PCI device */
3579     cci->intf = d;
3580     cxl_init_cci(cci, payload_max);
3581 }
3582 
3583 static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = {
3584     [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
3585     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3586                               0 },
3587     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3588 };
3589 
cxl_initialize_t3_ld_cci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)3590 void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
3591                                size_t payload_max)
3592 {
3593     cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld);
3594     cci->d = d;
3595     cci->intf = intf;
3596     cxl_init_cci(cci, payload_max);
3597 }
3598 
3599 static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
3600     [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0,  0},
3601     [INFOSTAT][GET_RESPONSE_MSG_LIMIT] = { "GET_RESPONSE_MSG_LIMIT",
3602                                            cmd_get_response_msg_limit, 0, 0 },
3603     [INFOSTAT][SET_RESPONSE_MSG_LIMIT] = { "SET_RESPONSE_MSG_LIMIT",
3604                                            cmd_set_response_msg_limit, 1, 0 },
3605     [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
3606                               0 },
3607     [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
3608     [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
3609     [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
3610                                      cmd_tunnel_management_cmd, ~0, 0 },
3611 };
3612 
cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI * cci,DeviceState * d,DeviceState * intf,size_t payload_max)3613 void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
3614                                            DeviceState *intf,
3615                                            size_t payload_max)
3616 {
3617     cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp);
3618     cci->d = d;
3619     cci->intf = intf;
3620     cxl_init_cci(cci, payload_max);
3621 }
3622