1 /* 2 * CXL Utility library for devices 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "hw/cxl/cxl.h" 13 14 /* 15 * Device registers have no restrictions per the spec, and so fall back to the 16 * default memory mapped register rules in CXL r3.1 Section 8.2: 17 * Software shall use CXL.io Memory Read and Write to access memory mapped 18 * register defined in this section. Unless otherwise specified, software 19 * shall restrict the accesses width based on the following: 20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes 21 * quantity. 22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8 23 * Bytes 24 * • The address shall be a multiple of the access width, e.g. when 25 * accessing a register as a 4 Byte quantity, the address shall be 26 * multiple of 4. 27 * • The accesses shall map to contiguous bytes.If these rules are not 28 * followed, the behavior is undefined 29 */ 30 31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size) 32 { 33 CXLDeviceState *cxl_dstate = opaque; 34 35 switch (size) { 36 case 4: 37 return cxl_dstate->caps_reg_state32[offset / size]; 38 case 8: 39 return cxl_dstate->caps_reg_state64[offset / size]; 40 default: 41 g_assert_not_reached(); 42 } 43 } 44 45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size) 46 { 47 CXLDeviceState *cxl_dstate = opaque; 48 49 switch (size) { 50 case 1: 51 return cxl_dstate->dev_reg_state[offset]; 52 case 2: 53 return cxl_dstate->dev_reg_state16[offset / size]; 54 case 4: 55 return cxl_dstate->dev_reg_state32[offset / size]; 56 case 8: 57 return cxl_dstate->dev_reg_state64[offset / size]; 58 default: 59 g_assert_not_reached(); 60 } 61 } 62 63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) 64 { 65 CXLDeviceState *cxl_dstate; 66 CXLCCI *cci = opaque; 67 68 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { 69 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; 70 } else if (object_dynamic_cast(OBJECT(cci->intf), 71 TYPE_CXL_SWITCH_MAILBOX_CCI)) { 72 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; 73 } else { 74 return 0; 75 } 76 77 switch (size) { 78 case 1: 79 return cxl_dstate->mbox_reg_state[offset]; 80 case 2: 81 return cxl_dstate->mbox_reg_state16[offset / size]; 82 case 4: 83 return cxl_dstate->mbox_reg_state32[offset / size]; 84 case 8: 85 if (offset == A_CXL_DEV_BG_CMD_STS) { 86 uint64_t bg_status_reg; 87 bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP, 88 cci->bg.opcode); 89 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS, 90 PERCENTAGE_COMP, cci->bg.complete_pct); 91 bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS, 92 RET_CODE, cci->bg.ret_code); 93 /* endian? */ 94 cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg; 95 } 96 if (offset == A_CXL_DEV_MAILBOX_STS) { 97 uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size]; 98 if (cci->bg.complete_pct) { 99 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP, 100 0); 101 cxl_dstate->mbox_reg_state64[offset / size] = status_reg; 102 } 103 } 104 return cxl_dstate->mbox_reg_state64[offset / size]; 105 default: 106 g_assert_not_reached(); 107 } 108 } 109 110 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset, 111 uint64_t value) 112 { 113 switch (offset) { 114 case A_CXL_DEV_MAILBOX_CTRL: 115 /* fallthrough */ 116 case A_CXL_DEV_MAILBOX_CAP: 117 /* RO register */ 118 break; 119 default: 120 qemu_log_mask(LOG_UNIMP, 121 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n", 122 __func__, offset); 123 return; 124 } 125 126 reg_state[offset / sizeof(*reg_state)] = value; 127 } 128 129 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset, 130 uint64_t value) 131 { 132 switch (offset) { 133 case A_CXL_DEV_MAILBOX_CMD: 134 break; 135 case A_CXL_DEV_BG_CMD_STS: 136 break; 137 case A_CXL_DEV_MAILBOX_STS: 138 /* Read only register, will get updated by the state machine */ 139 return; 140 default: 141 qemu_log_mask(LOG_UNIMP, 142 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n", 143 __func__, offset); 144 return; 145 } 146 147 148 reg_state[offset / sizeof(*reg_state)] = value; 149 } 150 151 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value, 152 unsigned size) 153 { 154 CXLDeviceState *cxl_dstate; 155 CXLCCI *cci = opaque; 156 157 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { 158 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; 159 } else if (object_dynamic_cast(OBJECT(cci->intf), 160 TYPE_CXL_SWITCH_MAILBOX_CCI)) { 161 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; 162 } else { 163 return; 164 } 165 166 if (offset >= A_CXL_DEV_CMD_PAYLOAD) { 167 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size); 168 return; 169 } 170 171 switch (size) { 172 case 4: 173 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value); 174 break; 175 case 8: 176 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value); 177 break; 178 default: 179 g_assert_not_reached(); 180 } 181 182 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, 183 DOORBELL)) { 184 uint64_t command_reg = 185 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD]; 186 uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, 187 COMMAND_SET); 188 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND); 189 size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH); 190 uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD; 191 /* 192 * Copy taken to avoid need for individual command handlers to care 193 * about aliasing. 194 */ 195 g_autofree uint8_t *pl_in_copy = NULL; 196 size_t len_out = 0; 197 uint64_t status_reg; 198 bool bg_started = false; 199 int rc; 200 201 pl_in_copy = g_memdup2(pl, len_in); 202 if (len_in == 0 || pl_in_copy) { 203 /* Avoid stale data - including from earlier cmds */ 204 memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE); 205 rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy, 206 &len_out, pl, &bg_started); 207 } else { 208 rc = CXL_MBOX_INTERNAL_ERROR; 209 } 210 211 /* Set bg and the return code */ 212 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP, 213 bg_started ? 1 : 0); 214 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc); 215 /* Set the return length */ 216 command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set); 217 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, 218 COMMAND, cmd); 219 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, 220 LENGTH, len_out); 221 222 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg; 223 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg; 224 /* Tell the host we're done */ 225 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, 226 DOORBELL, 0); 227 } 228 } 229 230 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size) 231 { 232 CXLDeviceState *cxl_dstate = opaque; 233 234 return cxl_dstate->memdev_status; 235 } 236 237 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value, 238 unsigned size) 239 { 240 /* Many register sets are read only */ 241 } 242 243 static const MemoryRegionOps mdev_ops = { 244 .read = mdev_reg_read, 245 .write = ro_reg_write, 246 .endianness = DEVICE_LITTLE_ENDIAN, 247 .valid = { 248 .min_access_size = 1, 249 .max_access_size = 8, 250 .unaligned = false, 251 }, 252 .impl = { 253 .min_access_size = 8, 254 .max_access_size = 8, 255 }, 256 }; 257 258 static const MemoryRegionOps mailbox_ops = { 259 .read = mailbox_reg_read, 260 .write = mailbox_reg_write, 261 .endianness = DEVICE_LITTLE_ENDIAN, 262 .valid = { 263 .min_access_size = 1, 264 .max_access_size = 8, 265 .unaligned = false, 266 }, 267 .impl = { 268 .min_access_size = 1, 269 .max_access_size = 8, 270 }, 271 }; 272 273 static const MemoryRegionOps dev_ops = { 274 .read = dev_reg_read, 275 .write = ro_reg_write, 276 .endianness = DEVICE_LITTLE_ENDIAN, 277 .valid = { 278 .min_access_size = 1, 279 .max_access_size = 8, 280 .unaligned = false, 281 }, 282 .impl = { 283 .min_access_size = 1, 284 .max_access_size = 8, 285 }, 286 }; 287 288 static const MemoryRegionOps caps_ops = { 289 .read = caps_reg_read, 290 .write = ro_reg_write, 291 .endianness = DEVICE_LITTLE_ENDIAN, 292 .valid = { 293 .min_access_size = 1, 294 .max_access_size = 8, 295 .unaligned = false, 296 }, 297 .impl = { 298 .min_access_size = 4, 299 .max_access_size = 8, 300 }, 301 }; 302 303 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate, 304 CXLCCI *cci) 305 { 306 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */ 307 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers", 308 pow2ceil(CXL_MMIO_SIZE)); 309 310 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate, 311 "cap-array", CXL_CAPS_SIZE); 312 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate, 313 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH); 314 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci, 315 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH); 316 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops, 317 cxl_dstate, "memory device caps", 318 CXL_MEMORY_DEVICE_REGISTERS_LENGTH); 319 320 memory_region_add_subregion(&cxl_dstate->device_registers, 0, 321 &cxl_dstate->caps); 322 memory_region_add_subregion(&cxl_dstate->device_registers, 323 CXL_DEVICE_STATUS_REGISTERS_OFFSET, 324 &cxl_dstate->device); 325 memory_region_add_subregion(&cxl_dstate->device_registers, 326 CXL_MAILBOX_REGISTERS_OFFSET, 327 &cxl_dstate->mailbox); 328 memory_region_add_subregion(&cxl_dstate->device_registers, 329 CXL_MEMORY_DEVICE_REGISTERS_OFFSET, 330 &cxl_dstate->memory_device); 331 } 332 333 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type, 334 bool available) 335 { 336 if (available) { 337 cxl_dstate->event_status |= (1 << log_type); 338 } else { 339 cxl_dstate->event_status &= ~(1 << log_type); 340 } 341 342 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS, 343 EVENT_STATUS, cxl_dstate->event_status); 344 } 345 346 static void device_reg_init_common(CXLDeviceState *cxl_dstate) 347 { 348 CXLEventLogType log; 349 350 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) { 351 cxl_event_set_status(cxl_dstate, log, false); 352 } 353 } 354 355 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate, int msi_n) 356 { 357 /* 2048 payload size */ 358 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 359 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT); 360 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE; 361 /* irq support */ 362 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 363 BG_INT_CAP, 1); 364 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 365 MSI_N, msi_n); 366 cxl_dstate->mbox_msi_n = msi_n; 367 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 368 MBOX_READY_TIME, 0); /* Not reported */ 369 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 370 TYPE, 0); /* Inferred from class code */ 371 } 372 373 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) 374 { 375 uint64_t memdev_status_reg; 376 377 memdev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, 1); 378 memdev_status_reg = FIELD_DP64(memdev_status_reg, CXL_MEM_DEV_STS, 379 MBOX_READY, 1); 380 cxl_dstate->memdev_status = memdev_status_reg; 381 } 382 383 void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n) 384 { 385 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 386 uint64_t *cap_h = cxl_dstate->caps_reg_state64; 387 const int cap_count = 3; 388 389 /* CXL Device Capabilities Array Register */ 390 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); 391 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); 392 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); 393 394 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 395 CXL_DEVICE_STATUS_VERSION); 396 device_reg_init_common(cxl_dstate); 397 398 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION); 399 mailbox_reg_init_common(cxl_dstate, msi_n); 400 401 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 402 CXL_MEM_DEV_STATUS_VERSION); 403 memdev_reg_init_common(cxl_dstate); 404 405 cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d), 406 CXL_MAILBOX_MAX_PAYLOAD_SIZE); 407 } 408 409 void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n) 410 { 411 CXLDeviceState *cxl_dstate = &sw->cxl_dstate; 412 uint64_t *cap_h = cxl_dstate->caps_reg_state64; 413 const int cap_count = 3; 414 415 /* CXL Device Capabilities Array Register */ 416 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); 417 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); 418 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); 419 420 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2); 421 device_reg_init_common(cxl_dstate); 422 423 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); 424 mailbox_reg_init_common(cxl_dstate, msi_n); 425 426 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); 427 memdev_reg_init_common(cxl_dstate); 428 } 429 430 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) 431 { 432 uint64_t time, delta; 433 uint64_t final_time = 0; 434 435 if (cxl_dstate->timestamp.set) { 436 /* Find the delta from the last time the host set the time. */ 437 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 438 delta = time - cxl_dstate->timestamp.last_set; 439 final_time = cxl_dstate->timestamp.host_set + delta; 440 } 441 442 return final_time; 443 } 444