1 /* 2 * CXL Utility library for devices 3 * 4 * Copyright(C) 2020 Intel Corporation. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "hw/cxl/cxl.h" 13 14 /* 15 * Device registers have no restrictions per the spec, and so fall back to the 16 * default memory mapped register rules in 8.2: 17 * Software shall use CXL.io Memory Read and Write to access memory mapped 18 * register defined in this section. Unless otherwise specified, software 19 * shall restrict the accesses width based on the following: 20 * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes 21 * quantity. 22 * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8 23 * Bytes 24 * • The address shall be a multiple of the access width, e.g. when 25 * accessing a register as a 4 Byte quantity, the address shall be 26 * multiple of 4. 27 * • The accesses shall map to contiguous bytes.If these rules are not 28 * followed, the behavior is undefined 29 */ 30 31 static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size) 32 { 33 CXLDeviceState *cxl_dstate = opaque; 34 35 switch (size) { 36 case 4: 37 return cxl_dstate->caps_reg_state32[offset / size]; 38 case 8: 39 return cxl_dstate->caps_reg_state64[offset / size]; 40 default: 41 g_assert_not_reached(); 42 } 43 } 44 45 static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size) 46 { 47 CXLDeviceState *cxl_dstate = opaque; 48 49 switch (size) { 50 case 1: 51 return cxl_dstate->dev_reg_state[offset]; 52 case 2: 53 return cxl_dstate->dev_reg_state16[offset / size]; 54 case 4: 55 return cxl_dstate->dev_reg_state32[offset / size]; 56 case 8: 57 return cxl_dstate->dev_reg_state64[offset / size]; 58 default: 59 g_assert_not_reached(); 60 } 61 } 62 63 static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size) 64 { 65 CXLDeviceState *cxl_dstate; 66 CXLCCI *cci = opaque; 67 68 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { 69 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; 70 } else if (object_dynamic_cast(OBJECT(cci->intf), 71 TYPE_CXL_SWITCH_MAILBOX_CCI)) { 72 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; 73 } else { 74 return 0; 75 } 76 77 switch (size) { 78 case 1: 79 return cxl_dstate->mbox_reg_state[offset]; 80 case 2: 81 return cxl_dstate->mbox_reg_state16[offset / size]; 82 case 4: 83 return cxl_dstate->mbox_reg_state32[offset / size]; 84 case 8: 85 return cxl_dstate->mbox_reg_state64[offset / size]; 86 default: 87 g_assert_not_reached(); 88 } 89 } 90 91 static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset, 92 uint64_t value) 93 { 94 switch (offset) { 95 case A_CXL_DEV_MAILBOX_CTRL: 96 /* fallthrough */ 97 case A_CXL_DEV_MAILBOX_CAP: 98 /* RO register */ 99 break; 100 default: 101 qemu_log_mask(LOG_UNIMP, 102 "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n", 103 __func__, offset); 104 return; 105 } 106 107 reg_state[offset / sizeof(*reg_state)] = value; 108 } 109 110 static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset, 111 uint64_t value) 112 { 113 switch (offset) { 114 case A_CXL_DEV_MAILBOX_CMD: 115 break; 116 case A_CXL_DEV_BG_CMD_STS: 117 /* BG not supported */ 118 /* fallthrough */ 119 case A_CXL_DEV_MAILBOX_STS: 120 /* Read only register, will get updated by the state machine */ 121 return; 122 default: 123 qemu_log_mask(LOG_UNIMP, 124 "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n", 125 __func__, offset); 126 return; 127 } 128 129 130 reg_state[offset / sizeof(*reg_state)] = value; 131 } 132 133 static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value, 134 unsigned size) 135 { 136 CXLDeviceState *cxl_dstate; 137 CXLCCI *cci = opaque; 138 139 if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) { 140 cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate; 141 } else if (object_dynamic_cast(OBJECT(cci->intf), 142 TYPE_CXL_SWITCH_MAILBOX_CCI)) { 143 cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate; 144 } else { 145 return; 146 } 147 148 if (offset >= A_CXL_DEV_CMD_PAYLOAD) { 149 memcpy(cxl_dstate->mbox_reg_state + offset, &value, size); 150 return; 151 } 152 153 switch (size) { 154 case 4: 155 mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value); 156 break; 157 case 8: 158 mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value); 159 break; 160 default: 161 g_assert_not_reached(); 162 } 163 164 if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, 165 DOORBELL)) { 166 uint64_t command_reg = 167 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD]; 168 uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, 169 COMMAND_SET); 170 uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND); 171 size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH); 172 uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD; 173 /* 174 * Copy taken to avoid need for individual command handlers to care 175 * about aliasing. 176 */ 177 g_autofree uint8_t *pl_in_copy = NULL; 178 size_t len_out = 0; 179 uint64_t status_reg; 180 bool bg_started = false; 181 int rc; 182 183 pl_in_copy = g_memdup2(pl, len_in); 184 if (len_in == 0 || pl_in_copy) { 185 /* Avoid stale data - including from earlier cmds */ 186 memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE); 187 rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy, 188 &len_out, pl, &bg_started); 189 } else { 190 rc = CXL_MBOX_INTERNAL_ERROR; 191 } 192 193 /* Set bg and the return code */ 194 status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP, 195 bg_started ? 1 : 0); 196 status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc); 197 /* Set the return length */ 198 command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set); 199 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, 200 COMMAND, cmd); 201 command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, 202 LENGTH, len_out); 203 204 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg; 205 cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg; 206 /* Tell the host we're done */ 207 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL, 208 DOORBELL, 0); 209 } 210 } 211 212 static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size) 213 { 214 uint64_t retval = 0; 215 216 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1); 217 retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1); 218 219 return retval; 220 } 221 222 static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value, 223 unsigned size) 224 { 225 /* Many register sets are read only */ 226 } 227 228 static const MemoryRegionOps mdev_ops = { 229 .read = mdev_reg_read, 230 .write = ro_reg_write, 231 .endianness = DEVICE_LITTLE_ENDIAN, 232 .valid = { 233 .min_access_size = 1, 234 .max_access_size = 8, 235 .unaligned = false, 236 }, 237 .impl = { 238 .min_access_size = 8, 239 .max_access_size = 8, 240 }, 241 }; 242 243 static const MemoryRegionOps mailbox_ops = { 244 .read = mailbox_reg_read, 245 .write = mailbox_reg_write, 246 .endianness = DEVICE_LITTLE_ENDIAN, 247 .valid = { 248 .min_access_size = 1, 249 .max_access_size = 8, 250 .unaligned = false, 251 }, 252 .impl = { 253 .min_access_size = 1, 254 .max_access_size = 8, 255 }, 256 }; 257 258 static const MemoryRegionOps dev_ops = { 259 .read = dev_reg_read, 260 .write = ro_reg_write, 261 .endianness = DEVICE_LITTLE_ENDIAN, 262 .valid = { 263 .min_access_size = 1, 264 .max_access_size = 8, 265 .unaligned = false, 266 }, 267 .impl = { 268 .min_access_size = 1, 269 .max_access_size = 8, 270 }, 271 }; 272 273 static const MemoryRegionOps caps_ops = { 274 .read = caps_reg_read, 275 .write = ro_reg_write, 276 .endianness = DEVICE_LITTLE_ENDIAN, 277 .valid = { 278 .min_access_size = 1, 279 .max_access_size = 8, 280 .unaligned = false, 281 }, 282 .impl = { 283 .min_access_size = 4, 284 .max_access_size = 8, 285 }, 286 }; 287 288 void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate, 289 CXLCCI *cci) 290 { 291 /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */ 292 memory_region_init(&cxl_dstate->device_registers, obj, "device-registers", 293 pow2ceil(CXL_MMIO_SIZE)); 294 295 memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate, 296 "cap-array", CXL_CAPS_SIZE); 297 memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate, 298 "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH); 299 memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci, 300 "mailbox", CXL_MAILBOX_REGISTERS_LENGTH); 301 memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops, 302 cxl_dstate, "memory device caps", 303 CXL_MEMORY_DEVICE_REGISTERS_LENGTH); 304 305 memory_region_add_subregion(&cxl_dstate->device_registers, 0, 306 &cxl_dstate->caps); 307 memory_region_add_subregion(&cxl_dstate->device_registers, 308 CXL_DEVICE_STATUS_REGISTERS_OFFSET, 309 &cxl_dstate->device); 310 memory_region_add_subregion(&cxl_dstate->device_registers, 311 CXL_MAILBOX_REGISTERS_OFFSET, 312 &cxl_dstate->mailbox); 313 memory_region_add_subregion(&cxl_dstate->device_registers, 314 CXL_MEMORY_DEVICE_REGISTERS_OFFSET, 315 &cxl_dstate->memory_device); 316 } 317 318 void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type, 319 bool available) 320 { 321 if (available) { 322 cxl_dstate->event_status |= (1 << log_type); 323 } else { 324 cxl_dstate->event_status &= ~(1 << log_type); 325 } 326 327 ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS, 328 EVENT_STATUS, cxl_dstate->event_status); 329 } 330 331 static void device_reg_init_common(CXLDeviceState *cxl_dstate) 332 { 333 CXLEventLogType log; 334 335 for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) { 336 cxl_event_set_status(cxl_dstate, log, false); 337 } 338 } 339 340 static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate) 341 { 342 /* 2048 payload size, with no interrupt or background support */ 343 ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP, 344 PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT); 345 cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE; 346 } 347 348 static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { } 349 350 void cxl_device_register_init_t3(CXLType3Dev *ct3d) 351 { 352 CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate; 353 uint64_t *cap_h = cxl_dstate->caps_reg_state64; 354 const int cap_count = 3; 355 356 /* CXL Device Capabilities Array Register */ 357 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); 358 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); 359 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); 360 361 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2); 362 device_reg_init_common(cxl_dstate); 363 364 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); 365 mailbox_reg_init_common(cxl_dstate); 366 367 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); 368 memdev_reg_init_common(cxl_dstate); 369 370 cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d), 371 CXL_MAILBOX_MAX_PAYLOAD_SIZE); 372 } 373 374 void cxl_device_register_init_swcci(CSWMBCCIDev *sw) 375 { 376 CXLDeviceState *cxl_dstate = &sw->cxl_dstate; 377 uint64_t *cap_h = cxl_dstate->caps_reg_state64; 378 const int cap_count = 3; 379 380 /* CXL Device Capabilities Array Register */ 381 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0); 382 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1); 383 ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count); 384 385 cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2); 386 device_reg_init_common(cxl_dstate); 387 388 cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1); 389 mailbox_reg_init_common(cxl_dstate); 390 391 cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1); 392 memdev_reg_init_common(cxl_dstate); 393 } 394 395 uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate) 396 { 397 uint64_t time, delta; 398 uint64_t final_time = 0; 399 400 if (cxl_dstate->timestamp.set) { 401 /* Find the delta from the last time the host set the time. */ 402 time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 403 delta = time - cxl_dstate->timestamp.last_set; 404 final_time = cxl_dstate->timestamp.host_set + delta; 405 } 406 407 return final_time; 408 } 409