xref: /qemu/hw/cxl/cxl-device-utils.c (revision 2710d49a7c8b9b117a46847c7ace5eb21d48e882)
1 /*
2  * CXL Utility library for devices
3  *
4  * Copyright(C) 2020 Intel Corporation.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2. See the
7  * COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
13 
14 /*
15  * Device registers have no restrictions per the spec, and so fall back to the
16  * default memory mapped register rules in 8.2:
17  *   Software shall use CXL.io Memory Read and Write to access memory mapped
18  *   register defined in this section. Unless otherwise specified, software
19  *   shall restrict the accesses width based on the following:
20  *   • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21  *     quantity.
22  *   • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23  *     Bytes
24  *   • The address shall be a multiple of the access width, e.g. when
25  *     accessing a register as a 4 Byte quantity, the address shall be
26  *     multiple of 4.
27  *   • The accesses shall map to contiguous bytes.If these rules are not
28  *     followed, the behavior is undefined
29  */
30 
31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
32 {
33     CXLDeviceState *cxl_dstate = opaque;
34 
35     switch (size) {
36     case 4:
37         return cxl_dstate->caps_reg_state32[offset / size];
38     case 8:
39         return cxl_dstate->caps_reg_state64[offset / size];
40     default:
41         g_assert_not_reached();
42     }
43 }
44 
45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
46 {
47     CXLDeviceState *cxl_dstate = opaque;
48 
49     switch (size) {
50     case 1:
51         return cxl_dstate->dev_reg_state[offset];
52     case 2:
53         return cxl_dstate->dev_reg_state16[offset / size];
54     case 4:
55         return cxl_dstate->dev_reg_state32[offset / size];
56     case 8:
57         return cxl_dstate->dev_reg_state64[offset / size];
58     default:
59         g_assert_not_reached();
60     }
61 }
62 
63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
64 {
65     CXLDeviceState *cxl_dstate;
66     CXLCCI *cci = opaque;
67 
68     if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
69         cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
70     } else {
71         return 0;
72     }
73 
74     switch (size) {
75     case 1:
76         return cxl_dstate->mbox_reg_state[offset];
77     case 2:
78         return cxl_dstate->mbox_reg_state16[offset / size];
79     case 4:
80         return cxl_dstate->mbox_reg_state32[offset / size];
81     case 8:
82         return cxl_dstate->mbox_reg_state64[offset / size];
83     default:
84         g_assert_not_reached();
85     }
86 }
87 
88 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
89                                uint64_t value)
90 {
91     switch (offset) {
92     case A_CXL_DEV_MAILBOX_CTRL:
93         /* fallthrough */
94     case A_CXL_DEV_MAILBOX_CAP:
95         /* RO register */
96         break;
97     default:
98         qemu_log_mask(LOG_UNIMP,
99                       "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
100                       __func__, offset);
101         return;
102     }
103 
104     reg_state[offset / sizeof(*reg_state)] = value;
105 }
106 
107 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
108                                uint64_t value)
109 {
110     switch (offset) {
111     case A_CXL_DEV_MAILBOX_CMD:
112         break;
113     case A_CXL_DEV_BG_CMD_STS:
114         /* BG not supported */
115         /* fallthrough */
116     case A_CXL_DEV_MAILBOX_STS:
117         /* Read only register, will get updated by the state machine */
118         return;
119     default:
120         qemu_log_mask(LOG_UNIMP,
121                       "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
122                       __func__, offset);
123         return;
124     }
125 
126 
127     reg_state[offset / sizeof(*reg_state)] = value;
128 }
129 
130 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
131                               unsigned size)
132 {
133     CXLDeviceState *cxl_dstate;
134     CXLCCI *cci = opaque;
135 
136     if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
137         cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
138     } else {
139         return;
140     }
141 
142     if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
143         memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
144         return;
145     }
146 
147     switch (size) {
148     case 4:
149         mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
150         break;
151     case 8:
152         mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
153         break;
154     default:
155         g_assert_not_reached();
156     }
157 
158     if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
159                          DOORBELL)) {
160         uint64_t command_reg =
161             cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
162         uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD,
163                                      COMMAND_SET);
164         uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
165         size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
166         uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD;
167         /*
168          * Copy taken to avoid need for individual command handlers to care
169          * about aliasing.
170          */
171         g_autofree uint8_t *pl_in_copy = NULL;
172         size_t len_out = 0;
173         uint64_t status_reg;
174         bool bg_started = false;
175         int rc;
176 
177         pl_in_copy = g_memdup2(pl, len_in);
178         if (len_in == 0 || pl_in_copy) {
179             /* Avoid stale data  - including from earlier cmds */
180             memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE);
181             rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy,
182                                          &len_out, pl, &bg_started);
183         } else {
184             rc = CXL_MBOX_INTERNAL_ERROR;
185         }
186 
187         /* Set bg and the return code */
188         status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP,
189                                 bg_started ? 1 : 0);
190         status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc);
191         /* Set the return length */
192         command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set);
193         command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
194                                  COMMAND, cmd);
195         command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
196                                  LENGTH, len_out);
197 
198         cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
199         cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
200         /* Tell the host we're done */
201         ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
202                          DOORBELL, 0);
203     }
204 }
205 
206 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
207 {
208     uint64_t retval = 0;
209 
210     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
211     retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
212 
213     return retval;
214 }
215 
216 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
217                            unsigned size)
218 {
219     /* Many register sets are read only */
220 }
221 
222 static const MemoryRegionOps mdev_ops = {
223     .read = mdev_reg_read,
224     .write = ro_reg_write,
225     .endianness = DEVICE_LITTLE_ENDIAN,
226     .valid = {
227         .min_access_size = 1,
228         .max_access_size = 8,
229         .unaligned = false,
230     },
231     .impl = {
232         .min_access_size = 8,
233         .max_access_size = 8,
234     },
235 };
236 
237 static const MemoryRegionOps mailbox_ops = {
238     .read = mailbox_reg_read,
239     .write = mailbox_reg_write,
240     .endianness = DEVICE_LITTLE_ENDIAN,
241     .valid = {
242         .min_access_size = 1,
243         .max_access_size = 8,
244         .unaligned = false,
245     },
246     .impl = {
247         .min_access_size = 1,
248         .max_access_size = 8,
249     },
250 };
251 
252 static const MemoryRegionOps dev_ops = {
253     .read = dev_reg_read,
254     .write = ro_reg_write,
255     .endianness = DEVICE_LITTLE_ENDIAN,
256     .valid = {
257         .min_access_size = 1,
258         .max_access_size = 8,
259         .unaligned = false,
260     },
261     .impl = {
262         .min_access_size = 1,
263         .max_access_size = 8,
264     },
265 };
266 
267 static const MemoryRegionOps caps_ops = {
268     .read = caps_reg_read,
269     .write = ro_reg_write,
270     .endianness = DEVICE_LITTLE_ENDIAN,
271     .valid = {
272         .min_access_size = 1,
273         .max_access_size = 8,
274         .unaligned = false,
275     },
276     .impl = {
277         .min_access_size = 4,
278         .max_access_size = 8,
279     },
280 };
281 
282 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate,
283                                     CXLCCI *cci)
284 {
285     /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
286     memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
287                        pow2ceil(CXL_MMIO_SIZE));
288 
289     memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
290                           "cap-array", CXL_CAPS_SIZE);
291     memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
292                           "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
293     memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci,
294                           "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
295     memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
296                           cxl_dstate, "memory device caps",
297                           CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
298 
299     memory_region_add_subregion(&cxl_dstate->device_registers, 0,
300                                 &cxl_dstate->caps);
301     memory_region_add_subregion(&cxl_dstate->device_registers,
302                                 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
303                                 &cxl_dstate->device);
304     memory_region_add_subregion(&cxl_dstate->device_registers,
305                                 CXL_MAILBOX_REGISTERS_OFFSET,
306                                 &cxl_dstate->mailbox);
307     memory_region_add_subregion(&cxl_dstate->device_registers,
308                                 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
309                                 &cxl_dstate->memory_device);
310 }
311 
312 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
313                           bool available)
314 {
315     if (available) {
316         cxl_dstate->event_status |= (1 << log_type);
317     } else {
318         cxl_dstate->event_status &= ~(1 << log_type);
319     }
320 
321     ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
322                      EVENT_STATUS, cxl_dstate->event_status);
323 }
324 
325 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
326 {
327     CXLEventLogType log;
328 
329     for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
330         cxl_event_set_status(cxl_dstate, log, false);
331     }
332 }
333 
334 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
335 {
336     /* 2048 payload size, with no interrupt or background support */
337     ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
338                      PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
339     cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
340 }
341 
342 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
343 
344 void cxl_device_register_init_t3(CXLType3Dev *ct3d)
345 {
346     CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
347     uint64_t *cap_h = cxl_dstate->caps_reg_state64;
348     const int cap_count = 3;
349 
350     /* CXL Device Capabilities Array Register */
351     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
352     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
353     ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
354 
355     cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
356     device_reg_init_common(cxl_dstate);
357 
358     cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
359     mailbox_reg_init_common(cxl_dstate);
360 
361     cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
362     memdev_reg_init_common(cxl_dstate);
363 
364     cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
365                               CXL_MAILBOX_MAX_PAYLOAD_SIZE);
366 }
367 
368 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
369 {
370     uint64_t time, delta;
371     uint64_t final_time = 0;
372 
373     if (cxl_dstate->timestamp.set) {
374         /* Find the delta from the last time the host set the time. */
375         time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
376         delta = time - cxl_dstate->timestamp.last_set;
377         final_time = cxl_dstate->timestamp.host_set + delta;
378     }
379 
380     return final_time;
381 }
382