1 /*
2 * CXL Utility library for devices
3 *
4 * Copyright(C) 2020 Intel Corporation.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "hw/cxl/cxl.h"
13
14 /*
15 * Device registers have no restrictions per the spec, and so fall back to the
16 * default memory mapped register rules in CXL r3.1 Section 8.2:
17 * Software shall use CXL.io Memory Read and Write to access memory mapped
18 * register defined in this section. Unless otherwise specified, software
19 * shall restrict the accesses width based on the following:
20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
21 * quantity.
22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
23 * Bytes
24 * • The address shall be a multiple of the access width, e.g. when
25 * accessing a register as a 4 Byte quantity, the address shall be
26 * multiple of 4.
27 * • The accesses shall map to contiguous bytes.If these rules are not
28 * followed, the behavior is undefined
29 */
30
caps_reg_read(void * opaque,hwaddr offset,unsigned size)31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
32 {
33 CXLDeviceState *cxl_dstate = opaque;
34
35 switch (size) {
36 case 4:
37 return cxl_dstate->caps_reg_state32[offset / size];
38 case 8:
39 return cxl_dstate->caps_reg_state64[offset / size];
40 default:
41 g_assert_not_reached();
42 }
43 }
44
dev_reg_read(void * opaque,hwaddr offset,unsigned size)45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
46 {
47 CXLDeviceState *cxl_dstate = opaque;
48
49 switch (size) {
50 case 1:
51 return cxl_dstate->dev_reg_state[offset];
52 case 2:
53 return cxl_dstate->dev_reg_state16[offset / size];
54 case 4:
55 return cxl_dstate->dev_reg_state32[offset / size];
56 case 8:
57 return cxl_dstate->dev_reg_state64[offset / size];
58 default:
59 g_assert_not_reached();
60 }
61 }
62
mailbox_reg_read(void * opaque,hwaddr offset,unsigned size)63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
64 {
65 CXLDeviceState *cxl_dstate;
66 CXLCCI *cci = opaque;
67
68 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
69 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
70 } else if (object_dynamic_cast(OBJECT(cci->intf),
71 TYPE_CXL_SWITCH_MAILBOX_CCI)) {
72 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
73 } else {
74 return 0;
75 }
76
77 switch (size) {
78 case 1:
79 return cxl_dstate->mbox_reg_state[offset];
80 case 2:
81 return cxl_dstate->mbox_reg_state16[offset / size];
82 case 4:
83 return cxl_dstate->mbox_reg_state32[offset / size];
84 case 8:
85 if (offset == A_CXL_DEV_BG_CMD_STS) {
86 uint64_t bg_status_reg;
87 bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP,
88 cci->bg.opcode);
89 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
90 PERCENTAGE_COMP, cci->bg.complete_pct);
91 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
92 RET_CODE, cci->bg.ret_code);
93 /* endian? */
94 cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg;
95 }
96 if (offset == A_CXL_DEV_MAILBOX_STS) {
97 uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size];
98 int bgop;
99
100 qemu_mutex_lock(&cci->bg.lock);
101 bgop = !(cci->bg.complete_pct == 100 || cci->bg.aborted);
102
103 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
104 bgop);
105 cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
106 qemu_mutex_unlock(&cci->bg.lock);
107 }
108 return cxl_dstate->mbox_reg_state64[offset / size];
109 default:
110 g_assert_not_reached();
111 }
112 }
113
mailbox_mem_writel(uint32_t * reg_state,hwaddr offset,uint64_t value)114 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
115 uint64_t value)
116 {
117 switch (offset) {
118 case A_CXL_DEV_MAILBOX_CTRL:
119 /* fallthrough */
120 case A_CXL_DEV_MAILBOX_CAP:
121 /* RO register */
122 break;
123 default:
124 qemu_log_mask(LOG_UNIMP,
125 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
126 __func__, offset);
127 return;
128 }
129
130 reg_state[offset / sizeof(*reg_state)] = value;
131 }
132
mailbox_mem_writeq(uint64_t * reg_state,hwaddr offset,uint64_t value)133 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
134 uint64_t value)
135 {
136 switch (offset) {
137 case A_CXL_DEV_MAILBOX_CMD:
138 break;
139 case A_CXL_DEV_BG_CMD_STS:
140 break;
141 case A_CXL_DEV_MAILBOX_STS:
142 /* Read only register, will get updated by the state machine */
143 return;
144 default:
145 qemu_log_mask(LOG_UNIMP,
146 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
147 __func__, offset);
148 return;
149 }
150
151
152 reg_state[offset / sizeof(*reg_state)] = value;
153 }
154
mailbox_reg_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)155 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
156 unsigned size)
157 {
158 CXLDeviceState *cxl_dstate;
159 CXLCCI *cci = opaque;
160
161 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
162 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
163 } else if (object_dynamic_cast(OBJECT(cci->intf),
164 TYPE_CXL_SWITCH_MAILBOX_CCI)) {
165 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
166 } else {
167 return;
168 }
169
170 if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
171 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
172 return;
173 }
174
175 switch (size) {
176 case 4:
177 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
178 break;
179 case 8:
180 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
181 break;
182 default:
183 g_assert_not_reached();
184 }
185
186 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
187 DOORBELL)) {
188 uint64_t command_reg =
189 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
190 uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD,
191 COMMAND_SET);
192 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
193 size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
194 uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD;
195 /*
196 * Copy taken to avoid need for individual command handlers to care
197 * about aliasing.
198 */
199 g_autofree uint8_t *pl_in_copy = NULL;
200 size_t len_out = 0;
201 uint64_t status_reg;
202 bool bg_started = false;
203 int rc;
204
205 pl_in_copy = g_memdup2(pl, len_in);
206 if (len_in == 0 || pl_in_copy) {
207 /* Avoid stale data - including from earlier cmds */
208 memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE);
209 rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy,
210 &len_out, pl, &bg_started);
211 } else {
212 rc = CXL_MBOX_INTERNAL_ERROR;
213 }
214
215 /* Set bg and the return code */
216 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP,
217 bg_started ? 1 : 0);
218 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc);
219 /* Set the return length */
220 command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set);
221 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
222 COMMAND, cmd);
223 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
224 LENGTH, len_out);
225
226 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
227 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
228 /* Tell the host we're done */
229 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
230 DOORBELL, 0);
231 }
232 }
233
mdev_reg_read(void * opaque,hwaddr offset,unsigned size)234 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
235 {
236 CXLDeviceState *cxl_dstate = opaque;
237
238 return cxl_dstate->memdev_status;
239 }
240
ro_reg_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)241 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
242 unsigned size)
243 {
244 /* Many register sets are read only */
245 }
246
247 static const MemoryRegionOps mdev_ops = {
248 .read = mdev_reg_read,
249 .write = ro_reg_write,
250 .endianness = DEVICE_LITTLE_ENDIAN,
251 .valid = {
252 .min_access_size = 1,
253 .max_access_size = 8,
254 .unaligned = false,
255 },
256 .impl = {
257 .min_access_size = 8,
258 .max_access_size = 8,
259 },
260 };
261
262 static const MemoryRegionOps mailbox_ops = {
263 .read = mailbox_reg_read,
264 .write = mailbox_reg_write,
265 .endianness = DEVICE_LITTLE_ENDIAN,
266 .valid = {
267 .min_access_size = 1,
268 .max_access_size = 8,
269 .unaligned = false,
270 },
271 .impl = {
272 .min_access_size = 1,
273 .max_access_size = 8,
274 },
275 };
276
277 static const MemoryRegionOps dev_ops = {
278 .read = dev_reg_read,
279 .write = ro_reg_write,
280 .endianness = DEVICE_LITTLE_ENDIAN,
281 .valid = {
282 .min_access_size = 1,
283 .max_access_size = 8,
284 .unaligned = false,
285 },
286 .impl = {
287 .min_access_size = 1,
288 .max_access_size = 8,
289 },
290 };
291
292 static const MemoryRegionOps caps_ops = {
293 .read = caps_reg_read,
294 .write = ro_reg_write,
295 .endianness = DEVICE_LITTLE_ENDIAN,
296 .valid = {
297 .min_access_size = 1,
298 .max_access_size = 8,
299 .unaligned = false,
300 },
301 .impl = {
302 .min_access_size = 4,
303 .max_access_size = 8,
304 },
305 };
306
cxl_device_register_block_init(Object * obj,CXLDeviceState * cxl_dstate,CXLCCI * cci)307 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate,
308 CXLCCI *cci)
309 {
310 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
311 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
312 pow2ceil(CXL_MMIO_SIZE));
313
314 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
315 "cap-array", CXL_CAPS_SIZE);
316 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
317 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
318 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci,
319 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
320 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
321 cxl_dstate, "memory device caps",
322 CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
323
324 memory_region_add_subregion(&cxl_dstate->device_registers, 0,
325 &cxl_dstate->caps);
326 memory_region_add_subregion(&cxl_dstate->device_registers,
327 CXL_DEVICE_STATUS_REGISTERS_OFFSET,
328 &cxl_dstate->device);
329 memory_region_add_subregion(&cxl_dstate->device_registers,
330 CXL_MAILBOX_REGISTERS_OFFSET,
331 &cxl_dstate->mailbox);
332 memory_region_add_subregion(&cxl_dstate->device_registers,
333 CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
334 &cxl_dstate->memory_device);
335 }
336
cxl_event_set_status(CXLDeviceState * cxl_dstate,CXLEventLogType log_type,bool available)337 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
338 bool available)
339 {
340 if (available) {
341 cxl_dstate->event_status |= (1 << log_type);
342 } else {
343 cxl_dstate->event_status &= ~(1 << log_type);
344 }
345
346 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
347 EVENT_STATUS, cxl_dstate->event_status);
348 }
349
device_reg_init_common(CXLDeviceState * cxl_dstate)350 static void device_reg_init_common(CXLDeviceState *cxl_dstate)
351 {
352 CXLEventLogType log;
353
354 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
355 cxl_event_set_status(cxl_dstate, log, false);
356 }
357 }
358
mailbox_reg_init_common(CXLDeviceState * cxl_dstate,int msi_n)359 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate, int msi_n)
360 {
361 /* 2048 payload size */
362 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
363 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
364 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
365 /* irq support */
366 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
367 BG_INT_CAP, 1);
368 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
369 MSI_N, msi_n);
370 cxl_dstate->mbox_msi_n = msi_n;
371 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
372 MBOX_READY_TIME, 0); /* Not reported */
373 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
374 TYPE, 0); /* Inferred from class code */
375 }
376
memdev_reg_init_common(CXLDeviceState * cxl_dstate)377 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate)
378 {
379 uint64_t memdev_status_reg;
380
381 memdev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
382 memdev_status_reg = FIELD_DP64(memdev_status_reg, CXL_MEM_DEV_STS,
383 MBOX_READY, 1);
384 cxl_dstate->memdev_status = memdev_status_reg;
385 }
386
cxl_device_register_init_t3(CXLType3Dev * ct3d,int msi_n)387 void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n)
388 {
389 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
390 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
391 const int cap_count = 3;
392
393 /* CXL Device Capabilities Array Register */
394 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
395 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
396 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
397
398 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1,
399 CXL_DEVICE_STATUS_VERSION);
400 device_reg_init_common(cxl_dstate);
401
402 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION);
403 mailbox_reg_init_common(cxl_dstate, msi_n);
404
405 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000,
406 CXL_MEM_DEV_STATUS_VERSION);
407 memdev_reg_init_common(cxl_dstate);
408
409 cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
410 CXL_MAILBOX_MAX_PAYLOAD_SIZE);
411 }
412
cxl_device_register_init_swcci(CSWMBCCIDev * sw,int msi_n)413 void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n)
414 {
415 CXLDeviceState *cxl_dstate = &sw->cxl_dstate;
416 uint64_t *cap_h = cxl_dstate->caps_reg_state64;
417 const int cap_count = 3;
418
419 /* CXL Device Capabilities Array Register */
420 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
421 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
422 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
423
424 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
425 device_reg_init_common(cxl_dstate);
426
427 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
428 mailbox_reg_init_common(cxl_dstate, msi_n);
429
430 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
431 memdev_reg_init_common(cxl_dstate);
432 }
433
cxl_device_get_timestamp(CXLDeviceState * cxl_dstate)434 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
435 {
436 uint64_t time, delta;
437 uint64_t final_time = 0;
438
439 if (cxl_dstate->timestamp.set) {
440 /* Find the delta from the last time the host set the time. */
441 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
442 delta = time - cxl_dstate->timestamp.last_set;
443 final_time = cxl_dstate->timestamp.host_set + delta;
444 }
445
446 return final_time;
447 }
448