1 /* 2 * QEMU VMWARE PVSCSI paravirtual SCSI bus 3 * 4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com) 5 * 6 * Developed by Daynix Computing LTD (http://www.daynix.com) 7 * 8 * Based on implementation by Paolo Bonzini 9 * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html 10 * 11 * Authors: 12 * Paolo Bonzini <pbonzini@redhat.com> 13 * Dmitry Fleytman <dmitry@daynix.com> 14 * Yan Vugenfirer <yan@daynix.com> 15 * 16 * This work is licensed under the terms of the GNU GPL, version 2. 17 * See the COPYING file in the top-level directory. 18 * 19 * NOTE about MSI-X: 20 * MSI-X support has been removed for the moment because it leads Windows OS 21 * to crash on startup. The crash happens because Windows driver requires 22 * MSI-X shared memory to be part of the same BAR used for rings state 23 * registers, etc. This is not supported by QEMU infrastructure so separate 24 * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs. 25 * 26 */ 27 28 #include "hw/scsi/scsi.h" 29 #include <block/scsi.h> 30 #include "hw/pci/msi.h" 31 #include "vmw_pvscsi.h" 32 #include "trace.h" 33 34 35 #define PVSCSI_USE_64BIT (true) 36 #define PVSCSI_PER_VECTOR_MASK (false) 37 38 #define PVSCSI_MAX_DEVS (64) 39 #define PVSCSI_MSIX_NUM_VECTORS (1) 40 41 #define PVSCSI_MAX_CMD_DATA_WORDS \ 42 (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t)) 43 44 #define RS_GET_FIELD(m, field) \ 45 (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ 46 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field))) 47 #define RS_SET_FIELD(m, field, val) \ 48 (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ 49 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val)) 50 51 #define TYPE_PVSCSI "pvscsi" 52 #define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI) 53 54 /* Compatability flags for migration */ 55 #define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0 56 #define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \ 57 (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT) 58 59 #define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \ 60 ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION) 61 #define PVSCSI_MSI_OFFSET(s) \ 62 (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c) 63 64 typedef struct PVSCSIRingInfo { 65 uint64_t rs_pa; 66 uint32_t txr_len_mask; 67 uint32_t rxr_len_mask; 68 uint32_t msg_len_mask; 69 uint64_t req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; 70 uint64_t cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES]; 71 uint64_t msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES]; 72 uint64_t consumed_ptr; 73 uint64_t filled_cmp_ptr; 74 uint64_t filled_msg_ptr; 75 } PVSCSIRingInfo; 76 77 typedef struct PVSCSISGState { 78 hwaddr elemAddr; 79 hwaddr dataAddr; 80 uint32_t resid; 81 } PVSCSISGState; 82 83 typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList; 84 85 typedef struct { 86 PCIDevice parent_obj; 87 MemoryRegion io_space; 88 SCSIBus bus; 89 QEMUBH *completion_worker; 90 PVSCSIRequestList pending_queue; 91 PVSCSIRequestList completion_queue; 92 93 uint64_t reg_interrupt_status; /* Interrupt status register value */ 94 uint64_t reg_interrupt_enabled; /* Interrupt mask register value */ 95 uint64_t reg_command_status; /* Command status register value */ 96 97 /* Command data adoption mechanism */ 98 uint64_t curr_cmd; /* Last command arrived */ 99 uint32_t curr_cmd_data_cntr; /* Amount of data for last command */ 100 101 /* Collector for current command data */ 102 uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS]; 103 104 uint8_t rings_info_valid; /* Whether data rings initialized */ 105 uint8_t msg_ring_info_valid; /* Whether message ring initialized */ 106 uint8_t use_msg; /* Whether to use message ring */ 107 108 uint8_t msi_used; /* Whether MSI support was installed successfully */ 109 110 PVSCSIRingInfo rings; /* Data transfer rings manager */ 111 uint32_t resetting; /* Reset in progress */ 112 113 uint32_t compat_flags; 114 } PVSCSIState; 115 116 typedef struct PVSCSIRequest { 117 SCSIRequest *sreq; 118 PVSCSIState *dev; 119 uint8_t sense_key; 120 uint8_t completed; 121 int lun; 122 QEMUSGList sgl; 123 PVSCSISGState sg; 124 struct PVSCSIRingReqDesc req; 125 struct PVSCSIRingCmpDesc cmp; 126 QTAILQ_ENTRY(PVSCSIRequest) next; 127 } PVSCSIRequest; 128 129 /* Integer binary logarithm */ 130 static int 131 pvscsi_log2(uint32_t input) 132 { 133 int log = 0; 134 assert(input > 0); 135 while (input >> ++log) { 136 } 137 return log; 138 } 139 140 static void 141 pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri) 142 { 143 int i; 144 uint32_t txr_len_log2, rxr_len_log2; 145 uint32_t req_ring_size, cmp_ring_size; 146 m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT; 147 148 req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; 149 cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE; 150 txr_len_log2 = pvscsi_log2(req_ring_size - 1); 151 rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1); 152 153 m->txr_len_mask = MASK(txr_len_log2); 154 m->rxr_len_mask = MASK(rxr_len_log2); 155 156 m->consumed_ptr = 0; 157 m->filled_cmp_ptr = 0; 158 159 for (i = 0; i < ri->reqRingNumPages; i++) { 160 m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT; 161 } 162 163 for (i = 0; i < ri->cmpRingNumPages; i++) { 164 m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT; 165 } 166 167 RS_SET_FIELD(m, reqProdIdx, 0); 168 RS_SET_FIELD(m, reqConsIdx, 0); 169 RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2); 170 171 RS_SET_FIELD(m, cmpProdIdx, 0); 172 RS_SET_FIELD(m, cmpConsIdx, 0); 173 RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2); 174 175 trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2); 176 177 /* Flush ring state page changes */ 178 smp_wmb(); 179 } 180 181 static void 182 pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri) 183 { 184 int i; 185 uint32_t len_log2; 186 uint32_t ring_size; 187 188 ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE; 189 len_log2 = pvscsi_log2(ring_size - 1); 190 191 m->msg_len_mask = MASK(len_log2); 192 193 m->filled_msg_ptr = 0; 194 195 for (i = 0; i < ri->numPages; i++) { 196 m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT; 197 } 198 199 RS_SET_FIELD(m, msgProdIdx, 0); 200 RS_SET_FIELD(m, msgConsIdx, 0); 201 RS_SET_FIELD(m, msgNumEntriesLog2, len_log2); 202 203 trace_pvscsi_ring_init_msg(len_log2); 204 205 /* Flush ring state page changes */ 206 smp_wmb(); 207 } 208 209 static void 210 pvscsi_ring_cleanup(PVSCSIRingInfo *mgr) 211 { 212 mgr->rs_pa = 0; 213 mgr->txr_len_mask = 0; 214 mgr->rxr_len_mask = 0; 215 mgr->msg_len_mask = 0; 216 mgr->consumed_ptr = 0; 217 mgr->filled_cmp_ptr = 0; 218 mgr->filled_msg_ptr = 0; 219 memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa)); 220 memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa)); 221 memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa)); 222 } 223 224 static hwaddr 225 pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr) 226 { 227 uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx); 228 229 if (ready_ptr != mgr->consumed_ptr) { 230 uint32_t next_ready_ptr = 231 mgr->consumed_ptr++ & mgr->txr_len_mask; 232 uint32_t next_ready_page = 233 next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; 234 uint32_t inpage_idx = 235 next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; 236 237 return mgr->req_ring_pages_pa[next_ready_page] + 238 inpage_idx * sizeof(PVSCSIRingReqDesc); 239 } else { 240 return 0; 241 } 242 } 243 244 static void 245 pvscsi_ring_flush_req(PVSCSIRingInfo *mgr) 246 { 247 RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr); 248 } 249 250 static hwaddr 251 pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr) 252 { 253 /* 254 * According to Linux driver code it explicitly verifies that number 255 * of requests being processed by device is less then the size of 256 * completion queue, so device may omit completion queue overflow 257 * conditions check. We assume that this is true for other (Windows) 258 * drivers as well. 259 */ 260 261 uint32_t free_cmp_ptr = 262 mgr->filled_cmp_ptr++ & mgr->rxr_len_mask; 263 uint32_t free_cmp_page = 264 free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE; 265 uint32_t inpage_idx = 266 free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE; 267 return mgr->cmp_ring_pages_pa[free_cmp_page] + 268 inpage_idx * sizeof(PVSCSIRingCmpDesc); 269 } 270 271 static hwaddr 272 pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr) 273 { 274 uint32_t free_msg_ptr = 275 mgr->filled_msg_ptr++ & mgr->msg_len_mask; 276 uint32_t free_msg_page = 277 free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE; 278 uint32_t inpage_idx = 279 free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE; 280 return mgr->msg_ring_pages_pa[free_msg_page] + 281 inpage_idx * sizeof(PVSCSIRingMsgDesc); 282 } 283 284 static void 285 pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr) 286 { 287 /* Flush descriptor changes */ 288 smp_wmb(); 289 290 trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr); 291 292 RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr); 293 } 294 295 static bool 296 pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr) 297 { 298 uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx); 299 uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx); 300 301 return (prodIdx - consIdx) < (mgr->msg_len_mask + 1); 302 } 303 304 static void 305 pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr) 306 { 307 /* Flush descriptor changes */ 308 smp_wmb(); 309 310 trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr); 311 312 RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr); 313 } 314 315 static void 316 pvscsi_reset_state(PVSCSIState *s) 317 { 318 s->curr_cmd = PVSCSI_CMD_FIRST; 319 s->curr_cmd_data_cntr = 0; 320 s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED; 321 s->reg_interrupt_status = 0; 322 pvscsi_ring_cleanup(&s->rings); 323 s->rings_info_valid = FALSE; 324 s->msg_ring_info_valid = FALSE; 325 QTAILQ_INIT(&s->pending_queue); 326 QTAILQ_INIT(&s->completion_queue); 327 } 328 329 static void 330 pvscsi_update_irq_status(PVSCSIState *s) 331 { 332 PCIDevice *d = PCI_DEVICE(s); 333 bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status; 334 335 trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled, 336 s->reg_interrupt_status); 337 338 if (s->msi_used && msi_enabled(d)) { 339 if (should_raise) { 340 trace_pvscsi_update_irq_msi(); 341 msi_notify(d, PVSCSI_VECTOR_COMPLETION); 342 } 343 return; 344 } 345 346 pci_set_irq(d, !!should_raise); 347 } 348 349 static void 350 pvscsi_raise_completion_interrupt(PVSCSIState *s) 351 { 352 s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0; 353 354 /* Memory barrier to flush interrupt status register changes*/ 355 smp_wmb(); 356 357 pvscsi_update_irq_status(s); 358 } 359 360 static void 361 pvscsi_raise_message_interrupt(PVSCSIState *s) 362 { 363 s->reg_interrupt_status |= PVSCSI_INTR_MSG_0; 364 365 /* Memory barrier to flush interrupt status register changes*/ 366 smp_wmb(); 367 368 pvscsi_update_irq_status(s); 369 } 370 371 static void 372 pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc) 373 { 374 hwaddr cmp_descr_pa; 375 376 cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings); 377 trace_pvscsi_cmp_ring_put(cmp_descr_pa); 378 cpu_physical_memory_write(cmp_descr_pa, (void *)cmp_desc, 379 sizeof(*cmp_desc)); 380 } 381 382 static void 383 pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc) 384 { 385 hwaddr msg_descr_pa; 386 387 msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings); 388 trace_pvscsi_msg_ring_put(msg_descr_pa); 389 cpu_physical_memory_write(msg_descr_pa, (void *)msg_desc, 390 sizeof(*msg_desc)); 391 } 392 393 static void 394 pvscsi_process_completion_queue(void *opaque) 395 { 396 PVSCSIState *s = opaque; 397 PVSCSIRequest *pvscsi_req; 398 bool has_completed = false; 399 400 while (!QTAILQ_EMPTY(&s->completion_queue)) { 401 pvscsi_req = QTAILQ_FIRST(&s->completion_queue); 402 QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next); 403 pvscsi_cmp_ring_put(s, &pvscsi_req->cmp); 404 g_free(pvscsi_req); 405 has_completed = true; 406 } 407 408 if (has_completed) { 409 pvscsi_ring_flush_cmp(&s->rings); 410 pvscsi_raise_completion_interrupt(s); 411 } 412 } 413 414 static void 415 pvscsi_reset_adapter(PVSCSIState *s) 416 { 417 s->resetting++; 418 qbus_reset_all_fn(&s->bus); 419 s->resetting--; 420 pvscsi_process_completion_queue(s); 421 assert(QTAILQ_EMPTY(&s->pending_queue)); 422 pvscsi_reset_state(s); 423 } 424 425 static void 426 pvscsi_schedule_completion_processing(PVSCSIState *s) 427 { 428 /* Try putting more complete requests on the ring. */ 429 if (!QTAILQ_EMPTY(&s->completion_queue)) { 430 qemu_bh_schedule(s->completion_worker); 431 } 432 } 433 434 static void 435 pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r) 436 { 437 assert(!r->completed); 438 439 trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen, 440 r->sense_key); 441 if (r->sreq != NULL) { 442 scsi_req_unref(r->sreq); 443 r->sreq = NULL; 444 } 445 r->completed = 1; 446 QTAILQ_REMOVE(&s->pending_queue, r, next); 447 QTAILQ_INSERT_TAIL(&s->completion_queue, r, next); 448 pvscsi_schedule_completion_processing(s); 449 } 450 451 static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r) 452 { 453 PVSCSIRequest *req = r->hba_private; 454 455 trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size); 456 457 return &req->sgl; 458 } 459 460 static void 461 pvscsi_get_next_sg_elem(PVSCSISGState *sg) 462 { 463 struct PVSCSISGElement elem; 464 465 cpu_physical_memory_read(sg->elemAddr, (void *)&elem, sizeof(elem)); 466 if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) { 467 /* 468 * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in 469 * header file but its value is unknown. This flag requires 470 * additional processing, so we put warning here to catch it 471 * some day and make proper implementation 472 */ 473 trace_pvscsi_get_next_sg_elem(elem.flags); 474 } 475 476 sg->elemAddr += sizeof(elem); 477 sg->dataAddr = elem.addr; 478 sg->resid = elem.length; 479 } 480 481 static void 482 pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len) 483 { 484 r->cmp.senseLen = MIN(r->req.senseLen, len); 485 r->sense_key = sense[(sense[0] & 2) ? 1 : 2]; 486 cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen); 487 } 488 489 static void 490 pvscsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid) 491 { 492 PVSCSIRequest *pvscsi_req = req->hba_private; 493 PVSCSIState *s; 494 495 if (!pvscsi_req) { 496 trace_pvscsi_command_complete_not_found(req->tag); 497 return; 498 } 499 s = pvscsi_req->dev; 500 501 if (resid) { 502 /* Short transfer. */ 503 trace_pvscsi_command_complete_data_run(); 504 pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN; 505 } 506 507 pvscsi_req->cmp.scsiStatus = status; 508 if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) { 509 uint8_t sense[SCSI_SENSE_BUF_SIZE]; 510 int sense_len = 511 scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense)); 512 513 trace_pvscsi_command_complete_sense_len(sense_len); 514 pvscsi_write_sense(pvscsi_req, sense, sense_len); 515 } 516 qemu_sglist_destroy(&pvscsi_req->sgl); 517 pvscsi_complete_request(s, pvscsi_req); 518 } 519 520 static void 521 pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type) 522 { 523 if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) { 524 PVSCSIMsgDescDevStatusChanged msg = {0}; 525 526 msg.type = msg_type; 527 msg.bus = dev->channel; 528 msg.target = dev->id; 529 msg.lun[1] = dev->lun; 530 531 pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg); 532 pvscsi_ring_flush_msg(&s->rings); 533 pvscsi_raise_message_interrupt(s); 534 } 535 } 536 537 static void 538 pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) 539 { 540 PVSCSIState *s = PVSCSI(hotplug_dev); 541 542 pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED); 543 } 544 545 static void 546 pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) 547 { 548 PVSCSIState *s = PVSCSI(hotplug_dev); 549 550 pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED); 551 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); 552 } 553 554 static void 555 pvscsi_request_cancelled(SCSIRequest *req) 556 { 557 PVSCSIRequest *pvscsi_req = req->hba_private; 558 PVSCSIState *s = pvscsi_req->dev; 559 560 if (pvscsi_req->completed) { 561 return; 562 } 563 564 if (pvscsi_req->dev->resetting) { 565 pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET; 566 } else { 567 pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE; 568 } 569 570 pvscsi_complete_request(s, pvscsi_req); 571 } 572 573 static SCSIDevice* 574 pvscsi_device_find(PVSCSIState *s, int channel, int target, 575 uint8_t *requested_lun, uint8_t *target_lun) 576 { 577 if (requested_lun[0] || requested_lun[2] || requested_lun[3] || 578 requested_lun[4] || requested_lun[5] || requested_lun[6] || 579 requested_lun[7] || (target > PVSCSI_MAX_DEVS)) { 580 return NULL; 581 } else { 582 *target_lun = requested_lun[1]; 583 return scsi_device_find(&s->bus, channel, target, *target_lun); 584 } 585 } 586 587 static PVSCSIRequest * 588 pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d, 589 struct PVSCSIRingReqDesc *descr) 590 { 591 PVSCSIRequest *pvscsi_req; 592 uint8_t lun; 593 594 pvscsi_req = g_malloc0(sizeof(*pvscsi_req)); 595 pvscsi_req->dev = s; 596 pvscsi_req->req = *descr; 597 pvscsi_req->cmp.context = pvscsi_req->req.context; 598 QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next); 599 600 *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun); 601 if (*d) { 602 pvscsi_req->lun = lun; 603 } 604 605 return pvscsi_req; 606 } 607 608 static void 609 pvscsi_convert_sglist(PVSCSIRequest *r) 610 { 611 int chunk_size; 612 uint64_t data_length = r->req.dataLen; 613 PVSCSISGState sg = r->sg; 614 while (data_length) { 615 while (!sg.resid) { 616 pvscsi_get_next_sg_elem(&sg); 617 trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr, 618 r->sg.resid); 619 } 620 assert(data_length > 0); 621 chunk_size = MIN((unsigned) data_length, sg.resid); 622 if (chunk_size) { 623 qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size); 624 } 625 626 sg.dataAddr += chunk_size; 627 data_length -= chunk_size; 628 sg.resid -= chunk_size; 629 } 630 } 631 632 static void 633 pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r) 634 { 635 PCIDevice *d = PCI_DEVICE(s); 636 637 pci_dma_sglist_init(&r->sgl, d, 1); 638 if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) { 639 pvscsi_convert_sglist(r); 640 } else { 641 qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen); 642 } 643 } 644 645 static void 646 pvscsi_process_request_descriptor(PVSCSIState *s, 647 struct PVSCSIRingReqDesc *descr) 648 { 649 SCSIDevice *d; 650 PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr); 651 int64_t n; 652 653 trace_pvscsi_process_req_descr(descr->cdb[0], descr->context); 654 655 if (!d) { 656 r->cmp.hostStatus = BTSTAT_SELTIMEO; 657 trace_pvscsi_process_req_descr_unknown_device(); 658 pvscsi_complete_request(s, r); 659 return; 660 } 661 662 if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) { 663 r->sg.elemAddr = descr->dataAddr; 664 } 665 666 r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r); 667 if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV && 668 (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) { 669 r->cmp.hostStatus = BTSTAT_BADMSG; 670 trace_pvscsi_process_req_descr_invalid_dir(); 671 scsi_req_cancel(r->sreq); 672 return; 673 } 674 if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV && 675 (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) { 676 r->cmp.hostStatus = BTSTAT_BADMSG; 677 trace_pvscsi_process_req_descr_invalid_dir(); 678 scsi_req_cancel(r->sreq); 679 return; 680 } 681 682 pvscsi_build_sglist(s, r); 683 n = scsi_req_enqueue(r->sreq); 684 685 if (n) { 686 scsi_req_continue(r->sreq); 687 } 688 } 689 690 static void 691 pvscsi_process_io(PVSCSIState *s) 692 { 693 PVSCSIRingReqDesc descr; 694 hwaddr next_descr_pa; 695 696 assert(s->rings_info_valid); 697 while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) { 698 699 /* Only read after production index verification */ 700 smp_rmb(); 701 702 trace_pvscsi_process_io(next_descr_pa); 703 cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr)); 704 pvscsi_process_request_descriptor(s, &descr); 705 } 706 707 pvscsi_ring_flush_req(&s->rings); 708 } 709 710 static void 711 pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc) 712 { 713 int i; 714 trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN); 715 716 trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages); 717 for (i = 0; i < rc->reqRingNumPages; i++) { 718 trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]); 719 } 720 721 trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages); 722 for (i = 0; i < rc->cmpRingNumPages; i++) { 723 trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->reqRingPPNs[i]); 724 } 725 } 726 727 static uint64_t 728 pvscsi_on_cmd_config(PVSCSIState *s) 729 { 730 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG"); 731 return PVSCSI_COMMAND_PROCESSING_FAILED; 732 } 733 734 static uint64_t 735 pvscsi_on_cmd_unplug(PVSCSIState *s) 736 { 737 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG"); 738 return PVSCSI_COMMAND_PROCESSING_FAILED; 739 } 740 741 static uint64_t 742 pvscsi_on_issue_scsi(PVSCSIState *s) 743 { 744 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI"); 745 return PVSCSI_COMMAND_PROCESSING_FAILED; 746 } 747 748 static uint64_t 749 pvscsi_on_cmd_setup_rings(PVSCSIState *s) 750 { 751 PVSCSICmdDescSetupRings *rc = 752 (PVSCSICmdDescSetupRings *) s->curr_cmd_data; 753 754 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS"); 755 756 pvscsi_dbg_dump_tx_rings_config(rc); 757 pvscsi_ring_init_data(&s->rings, rc); 758 s->rings_info_valid = TRUE; 759 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; 760 } 761 762 static uint64_t 763 pvscsi_on_cmd_abort(PVSCSIState *s) 764 { 765 PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data; 766 PVSCSIRequest *r, *next; 767 768 trace_pvscsi_on_cmd_abort(cmd->context, cmd->target); 769 770 QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) { 771 if (r->req.context == cmd->context) { 772 break; 773 } 774 } 775 if (r) { 776 assert(!r->completed); 777 r->cmp.hostStatus = BTSTAT_ABORTQUEUE; 778 scsi_req_cancel(r->sreq); 779 } 780 781 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; 782 } 783 784 static uint64_t 785 pvscsi_on_cmd_unknown(PVSCSIState *s) 786 { 787 trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]); 788 return PVSCSI_COMMAND_PROCESSING_FAILED; 789 } 790 791 static uint64_t 792 pvscsi_on_cmd_reset_device(PVSCSIState *s) 793 { 794 uint8_t target_lun = 0; 795 struct PVSCSICmdDescResetDevice *cmd = 796 (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data; 797 SCSIDevice *sdev; 798 799 sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun); 800 801 trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev); 802 803 if (sdev != NULL) { 804 s->resetting++; 805 device_reset(&sdev->qdev); 806 s->resetting--; 807 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; 808 } 809 810 return PVSCSI_COMMAND_PROCESSING_FAILED; 811 } 812 813 static uint64_t 814 pvscsi_on_cmd_reset_bus(PVSCSIState *s) 815 { 816 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS"); 817 818 s->resetting++; 819 qbus_reset_all_fn(&s->bus); 820 s->resetting--; 821 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; 822 } 823 824 static uint64_t 825 pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s) 826 { 827 PVSCSICmdDescSetupMsgRing *rc = 828 (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data; 829 830 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING"); 831 832 if (!s->use_msg) { 833 return PVSCSI_COMMAND_PROCESSING_FAILED; 834 } 835 836 if (s->rings_info_valid) { 837 pvscsi_ring_init_msg(&s->rings, rc); 838 s->msg_ring_info_valid = TRUE; 839 } 840 return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t); 841 } 842 843 static uint64_t 844 pvscsi_on_cmd_adapter_reset(PVSCSIState *s) 845 { 846 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET"); 847 848 pvscsi_reset_adapter(s); 849 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED; 850 } 851 852 static const struct { 853 int data_size; 854 uint64_t (*handler_fn)(PVSCSIState *s); 855 } pvscsi_commands[] = { 856 [PVSCSI_CMD_FIRST] = { 857 .data_size = 0, 858 .handler_fn = pvscsi_on_cmd_unknown, 859 }, 860 861 /* Not implemented, data size defined based on what arrives on windows */ 862 [PVSCSI_CMD_CONFIG] = { 863 .data_size = 6 * sizeof(uint32_t), 864 .handler_fn = pvscsi_on_cmd_config, 865 }, 866 867 /* Command not implemented, data size is unknown */ 868 [PVSCSI_CMD_ISSUE_SCSI] = { 869 .data_size = 0, 870 .handler_fn = pvscsi_on_issue_scsi, 871 }, 872 873 /* Command not implemented, data size is unknown */ 874 [PVSCSI_CMD_DEVICE_UNPLUG] = { 875 .data_size = 0, 876 .handler_fn = pvscsi_on_cmd_unplug, 877 }, 878 879 [PVSCSI_CMD_SETUP_RINGS] = { 880 .data_size = sizeof(PVSCSICmdDescSetupRings), 881 .handler_fn = pvscsi_on_cmd_setup_rings, 882 }, 883 884 [PVSCSI_CMD_RESET_DEVICE] = { 885 .data_size = sizeof(struct PVSCSICmdDescResetDevice), 886 .handler_fn = pvscsi_on_cmd_reset_device, 887 }, 888 889 [PVSCSI_CMD_RESET_BUS] = { 890 .data_size = 0, 891 .handler_fn = pvscsi_on_cmd_reset_bus, 892 }, 893 894 [PVSCSI_CMD_SETUP_MSG_RING] = { 895 .data_size = sizeof(PVSCSICmdDescSetupMsgRing), 896 .handler_fn = pvscsi_on_cmd_setup_msg_ring, 897 }, 898 899 [PVSCSI_CMD_ADAPTER_RESET] = { 900 .data_size = 0, 901 .handler_fn = pvscsi_on_cmd_adapter_reset, 902 }, 903 904 [PVSCSI_CMD_ABORT_CMD] = { 905 .data_size = sizeof(struct PVSCSICmdDescAbortCmd), 906 .handler_fn = pvscsi_on_cmd_abort, 907 }, 908 }; 909 910 static void 911 pvscsi_do_command_processing(PVSCSIState *s) 912 { 913 size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t); 914 915 assert(s->curr_cmd < PVSCSI_CMD_LAST); 916 if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) { 917 s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s); 918 s->curr_cmd = PVSCSI_CMD_FIRST; 919 s->curr_cmd_data_cntr = 0; 920 } 921 } 922 923 static void 924 pvscsi_on_command_data(PVSCSIState *s, uint32_t value) 925 { 926 size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t); 927 928 assert(bytes_arrived < sizeof(s->curr_cmd_data)); 929 s->curr_cmd_data[s->curr_cmd_data_cntr++] = value; 930 931 pvscsi_do_command_processing(s); 932 } 933 934 static void 935 pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id) 936 { 937 if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) { 938 s->curr_cmd = cmd_id; 939 } else { 940 s->curr_cmd = PVSCSI_CMD_FIRST; 941 trace_pvscsi_on_cmd_unknown(cmd_id); 942 } 943 944 s->curr_cmd_data_cntr = 0; 945 s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA; 946 947 pvscsi_do_command_processing(s); 948 } 949 950 static void 951 pvscsi_io_write(void *opaque, hwaddr addr, 952 uint64_t val, unsigned size) 953 { 954 PVSCSIState *s = opaque; 955 956 switch (addr) { 957 case PVSCSI_REG_OFFSET_COMMAND: 958 pvscsi_on_command(s, val); 959 break; 960 961 case PVSCSI_REG_OFFSET_COMMAND_DATA: 962 pvscsi_on_command_data(s, (uint32_t) val); 963 break; 964 965 case PVSCSI_REG_OFFSET_INTR_STATUS: 966 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val); 967 s->reg_interrupt_status &= ~val; 968 pvscsi_update_irq_status(s); 969 pvscsi_schedule_completion_processing(s); 970 break; 971 972 case PVSCSI_REG_OFFSET_INTR_MASK: 973 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val); 974 s->reg_interrupt_enabled = val; 975 pvscsi_update_irq_status(s); 976 break; 977 978 case PVSCSI_REG_OFFSET_KICK_NON_RW_IO: 979 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val); 980 pvscsi_process_io(s); 981 break; 982 983 case PVSCSI_REG_OFFSET_KICK_RW_IO: 984 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val); 985 pvscsi_process_io(s); 986 break; 987 988 case PVSCSI_REG_OFFSET_DEBUG: 989 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val); 990 break; 991 992 default: 993 trace_pvscsi_io_write_unknown(addr, size, val); 994 break; 995 } 996 997 } 998 999 static uint64_t 1000 pvscsi_io_read(void *opaque, hwaddr addr, unsigned size) 1001 { 1002 PVSCSIState *s = opaque; 1003 1004 switch (addr) { 1005 case PVSCSI_REG_OFFSET_INTR_STATUS: 1006 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS", 1007 s->reg_interrupt_status); 1008 return s->reg_interrupt_status; 1009 1010 case PVSCSI_REG_OFFSET_INTR_MASK: 1011 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK", 1012 s->reg_interrupt_status); 1013 return s->reg_interrupt_enabled; 1014 1015 case PVSCSI_REG_OFFSET_COMMAND_STATUS: 1016 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS", 1017 s->reg_interrupt_status); 1018 return s->reg_command_status; 1019 1020 default: 1021 trace_pvscsi_io_read_unknown(addr, size); 1022 return 0; 1023 } 1024 } 1025 1026 1027 static bool 1028 pvscsi_init_msi(PVSCSIState *s) 1029 { 1030 int res; 1031 PCIDevice *d = PCI_DEVICE(s); 1032 1033 res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS, 1034 PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK); 1035 if (res < 0) { 1036 trace_pvscsi_init_msi_fail(res); 1037 s->msi_used = false; 1038 } else { 1039 s->msi_used = true; 1040 } 1041 1042 return s->msi_used; 1043 } 1044 1045 static void 1046 pvscsi_cleanup_msi(PVSCSIState *s) 1047 { 1048 PCIDevice *d = PCI_DEVICE(s); 1049 1050 if (s->msi_used) { 1051 msi_uninit(d); 1052 } 1053 } 1054 1055 static const MemoryRegionOps pvscsi_ops = { 1056 .read = pvscsi_io_read, 1057 .write = pvscsi_io_write, 1058 .endianness = DEVICE_LITTLE_ENDIAN, 1059 .impl = { 1060 .min_access_size = 4, 1061 .max_access_size = 4, 1062 }, 1063 }; 1064 1065 static const struct SCSIBusInfo pvscsi_scsi_info = { 1066 .tcq = true, 1067 .max_target = PVSCSI_MAX_DEVS, 1068 .max_channel = 0, 1069 .max_lun = 0, 1070 1071 .get_sg_list = pvscsi_get_sg_list, 1072 .complete = pvscsi_command_complete, 1073 .cancel = pvscsi_request_cancelled, 1074 }; 1075 1076 static int 1077 pvscsi_init(PCIDevice *pci_dev) 1078 { 1079 PVSCSIState *s = PVSCSI(pci_dev); 1080 1081 trace_pvscsi_state("init"); 1082 1083 /* PCI subsystem ID, subsystem vendor ID, revision */ 1084 if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) { 1085 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000); 1086 } else { 1087 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, 1088 PCI_VENDOR_ID_VMWARE); 1089 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 1090 PCI_DEVICE_ID_VMWARE_PVSCSI); 1091 pci_config_set_revision(pci_dev->config, 0x2); 1092 } 1093 1094 /* PCI latency timer = 255 */ 1095 pci_dev->config[PCI_LATENCY_TIMER] = 0xff; 1096 1097 /* Interrupt pin A */ 1098 pci_config_set_interrupt_pin(pci_dev->config, 1); 1099 1100 memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s, 1101 "pvscsi-io", PVSCSI_MEM_SPACE_SIZE); 1102 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space); 1103 1104 pvscsi_init_msi(s); 1105 1106 s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); 1107 if (!s->completion_worker) { 1108 pvscsi_cleanup_msi(s); 1109 return -ENOMEM; 1110 } 1111 1112 scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev), 1113 &pvscsi_scsi_info, NULL); 1114 /* override default SCSI bus hotplug-handler, with pvscsi's one */ 1115 qbus_set_hotplug_handler(BUS(&s->bus), DEVICE(s), &error_abort); 1116 pvscsi_reset_state(s); 1117 1118 return 0; 1119 } 1120 1121 static void 1122 pvscsi_uninit(PCIDevice *pci_dev) 1123 { 1124 PVSCSIState *s = PVSCSI(pci_dev); 1125 1126 trace_pvscsi_state("uninit"); 1127 qemu_bh_delete(s->completion_worker); 1128 1129 pvscsi_cleanup_msi(s); 1130 } 1131 1132 static void 1133 pvscsi_reset(DeviceState *dev) 1134 { 1135 PCIDevice *d = PCI_DEVICE(dev); 1136 PVSCSIState *s = PVSCSI(d); 1137 1138 trace_pvscsi_state("reset"); 1139 pvscsi_reset_adapter(s); 1140 } 1141 1142 static void 1143 pvscsi_pre_save(void *opaque) 1144 { 1145 PVSCSIState *s = (PVSCSIState *) opaque; 1146 1147 trace_pvscsi_state("presave"); 1148 1149 assert(QTAILQ_EMPTY(&s->pending_queue)); 1150 assert(QTAILQ_EMPTY(&s->completion_queue)); 1151 } 1152 1153 static int 1154 pvscsi_post_load(void *opaque, int version_id) 1155 { 1156 trace_pvscsi_state("postload"); 1157 return 0; 1158 } 1159 1160 static const VMStateDescription vmstate_pvscsi = { 1161 .name = "pvscsi", 1162 .version_id = 0, 1163 .minimum_version_id = 0, 1164 .pre_save = pvscsi_pre_save, 1165 .post_load = pvscsi_post_load, 1166 .fields = (VMStateField[]) { 1167 VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState), 1168 VMSTATE_UINT8(msi_used, PVSCSIState), 1169 VMSTATE_UINT32(resetting, PVSCSIState), 1170 VMSTATE_UINT64(reg_interrupt_status, PVSCSIState), 1171 VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState), 1172 VMSTATE_UINT64(reg_command_status, PVSCSIState), 1173 VMSTATE_UINT64(curr_cmd, PVSCSIState), 1174 VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState), 1175 VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState, 1176 ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)), 1177 VMSTATE_UINT8(rings_info_valid, PVSCSIState), 1178 VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState), 1179 VMSTATE_UINT8(use_msg, PVSCSIState), 1180 1181 VMSTATE_UINT64(rings.rs_pa, PVSCSIState), 1182 VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState), 1183 VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState), 1184 VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState, 1185 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES), 1186 VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState, 1187 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES), 1188 VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState), 1189 VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState), 1190 1191 VMSTATE_END_OF_LIST() 1192 } 1193 }; 1194 1195 static Property pvscsi_properties[] = { 1196 DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1), 1197 DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags, 1198 PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false), 1199 DEFINE_PROP_END_OF_LIST(), 1200 }; 1201 1202 static void pvscsi_class_init(ObjectClass *klass, void *data) 1203 { 1204 DeviceClass *dc = DEVICE_CLASS(klass); 1205 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1206 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1207 1208 k->init = pvscsi_init; 1209 k->exit = pvscsi_uninit; 1210 k->vendor_id = PCI_VENDOR_ID_VMWARE; 1211 k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI; 1212 k->class_id = PCI_CLASS_STORAGE_SCSI; 1213 k->subsystem_id = 0x1000; 1214 dc->reset = pvscsi_reset; 1215 dc->vmsd = &vmstate_pvscsi; 1216 dc->props = pvscsi_properties; 1217 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1218 hc->unplug = pvscsi_hot_unplug; 1219 hc->plug = pvscsi_hotplug; 1220 } 1221 1222 static const TypeInfo pvscsi_info = { 1223 .name = TYPE_PVSCSI, 1224 .parent = TYPE_PCI_DEVICE, 1225 .instance_size = sizeof(PVSCSIState), 1226 .class_init = pvscsi_class_init, 1227 .interfaces = (InterfaceInfo[]) { 1228 { TYPE_HOTPLUG_HANDLER }, 1229 { } 1230 } 1231 }; 1232 1233 static void 1234 pvscsi_register_types(void) 1235 { 1236 type_register_static(&pvscsi_info); 1237 } 1238 1239 type_init(pvscsi_register_types); 1240