1 /*
2 * QEMU VMWARE PVSCSI paravirtual SCSI bus
3 *
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5 *
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
7 *
8 * Based on implementation by Paolo Bonzini
9 * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html
10 *
11 * Authors:
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Dmitry Fleytman <dmitry@daynix.com>
14 * Yan Vugenfirer <yan@daynix.com>
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2.
17 * See the COPYING file in the top-level directory.
18 *
19 * NOTE about MSI-X:
20 * MSI-X support has been removed for the moment because it leads Windows OS
21 * to crash on startup. The crash happens because Windows driver requires
22 * MSI-X shared memory to be part of the same BAR used for rings state
23 * registers, etc. This is not supported by QEMU infrastructure so separate
24 * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs.
25 *
26 */
27
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/module.h"
32 #include "hw/scsi/scsi.h"
33 #include "migration/vmstate.h"
34 #include "scsi/constants.h"
35 #include "hw/pci/msi.h"
36 #include "hw/qdev-properties.h"
37 #include "vmw_pvscsi.h"
38 #include "trace.h"
39 #include "qom/object.h"
40
41
42 #define PVSCSI_USE_64BIT (true)
43 #define PVSCSI_PER_VECTOR_MASK (false)
44
45 #define PVSCSI_MAX_DEVS (64)
46 #define PVSCSI_MSIX_NUM_VECTORS (1)
47
48 #define PVSCSI_MAX_SG_ELEM 2048
49
50 #define PVSCSI_MAX_CMD_DATA_WORDS \
51 (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
52
53 #define RS_GET_FIELD(pval, m, field) \
54 ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
55 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), \
56 pval, MEMTXATTRS_UNSPECIFIED)
57 #define RS_SET_FIELD(m, field, val) \
58 (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
59 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val, \
60 MEMTXATTRS_UNSPECIFIED))
61
62 struct PVSCSIClass {
63 PCIDeviceClass parent_class;
64 DeviceRealize parent_dc_realize;
65 };
66
67 #define TYPE_PVSCSI "pvscsi"
68 OBJECT_DECLARE_TYPE(PVSCSIState, PVSCSIClass, PVSCSI)
69
70
71 #define PVSCSI_MSI_OFFSET (0x7c)
72 #define PVSCSI_EXP_EP_OFFSET (0x40)
73
74 typedef struct PVSCSIRingInfo {
75 uint64_t rs_pa;
76 uint32_t txr_len_mask;
77 uint32_t rxr_len_mask;
78 uint32_t msg_len_mask;
79 uint64_t req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
80 uint64_t cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
81 uint64_t msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
82 uint64_t consumed_ptr;
83 uint64_t filled_cmp_ptr;
84 uint64_t filled_msg_ptr;
85 } PVSCSIRingInfo;
86
87 typedef struct PVSCSISGState {
88 hwaddr elemAddr;
89 hwaddr dataAddr;
90 uint32_t resid;
91 } PVSCSISGState;
92
93 typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList;
94
95 struct PVSCSIState {
96 PCIDevice parent_obj;
97 MemoryRegion io_space;
98 SCSIBus bus;
99 QEMUBH *completion_worker;
100 PVSCSIRequestList pending_queue;
101 PVSCSIRequestList completion_queue;
102
103 uint64_t reg_interrupt_status; /* Interrupt status register value */
104 uint64_t reg_interrupt_enabled; /* Interrupt mask register value */
105 uint64_t reg_command_status; /* Command status register value */
106
107 /* Command data adoption mechanism */
108 uint64_t curr_cmd; /* Last command arrived */
109 uint32_t curr_cmd_data_cntr; /* Amount of data for last command */
110
111 /* Collector for current command data */
112 uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS];
113
114 uint8_t rings_info_valid; /* Whether data rings initialized */
115 uint8_t msg_ring_info_valid; /* Whether message ring initialized */
116 uint8_t use_msg; /* Whether to use message ring */
117
118 uint8_t msi_used; /* For migration compatibility */
119 PVSCSIRingInfo rings; /* Data transfer rings manager */
120 uint32_t resetting; /* Reset in progress */
121 };
122
123 typedef struct PVSCSIRequest {
124 SCSIRequest *sreq;
125 PVSCSIState *dev;
126 uint8_t sense_key;
127 uint8_t completed;
128 int lun;
129 QEMUSGList sgl;
130 PVSCSISGState sg;
131 struct PVSCSIRingReqDesc req;
132 struct PVSCSIRingCmpDesc cmp;
133 QTAILQ_ENTRY(PVSCSIRequest) next;
134 } PVSCSIRequest;
135
136 /* Integer binary logarithm */
137 static int
pvscsi_log2(uint32_t input)138 pvscsi_log2(uint32_t input)
139 {
140 int log = 0;
141 assert(input > 0);
142 while (input >> ++log) {
143 }
144 return log;
145 }
146
147 static void
pvscsi_ring_init_data(PVSCSIRingInfo * m,PVSCSICmdDescSetupRings * ri)148 pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
149 {
150 int i;
151 uint32_t txr_len_log2, rxr_len_log2;
152 uint32_t req_ring_size, cmp_ring_size;
153 m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
154
155 req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
156 cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
157 txr_len_log2 = pvscsi_log2(req_ring_size - 1);
158 rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1);
159
160 m->txr_len_mask = MASK(txr_len_log2);
161 m->rxr_len_mask = MASK(rxr_len_log2);
162
163 m->consumed_ptr = 0;
164 m->filled_cmp_ptr = 0;
165
166 for (i = 0; i < ri->reqRingNumPages; i++) {
167 m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT;
168 }
169
170 for (i = 0; i < ri->cmpRingNumPages; i++) {
171 m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
172 }
173
174 RS_SET_FIELD(m, reqProdIdx, 0);
175 RS_SET_FIELD(m, reqConsIdx, 0);
176 RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2);
177
178 RS_SET_FIELD(m, cmpProdIdx, 0);
179 RS_SET_FIELD(m, cmpConsIdx, 0);
180 RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2);
181
182 trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
183
184 /* Flush ring state page changes */
185 smp_wmb();
186 }
187
188 static int
pvscsi_ring_init_msg(PVSCSIRingInfo * m,PVSCSICmdDescSetupMsgRing * ri)189 pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
190 {
191 int i;
192 uint32_t len_log2;
193 uint32_t ring_size;
194
195 if (!ri->numPages || ri->numPages > PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES) {
196 return -1;
197 }
198 ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
199 len_log2 = pvscsi_log2(ring_size - 1);
200
201 m->msg_len_mask = MASK(len_log2);
202
203 m->filled_msg_ptr = 0;
204
205 for (i = 0; i < ri->numPages; i++) {
206 m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
207 }
208
209 RS_SET_FIELD(m, msgProdIdx, 0);
210 RS_SET_FIELD(m, msgConsIdx, 0);
211 RS_SET_FIELD(m, msgNumEntriesLog2, len_log2);
212
213 trace_pvscsi_ring_init_msg(len_log2);
214
215 /* Flush ring state page changes */
216 smp_wmb();
217
218 return 0;
219 }
220
221 static void
pvscsi_ring_cleanup(PVSCSIRingInfo * mgr)222 pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
223 {
224 mgr->rs_pa = 0;
225 mgr->txr_len_mask = 0;
226 mgr->rxr_len_mask = 0;
227 mgr->msg_len_mask = 0;
228 mgr->consumed_ptr = 0;
229 mgr->filled_cmp_ptr = 0;
230 mgr->filled_msg_ptr = 0;
231 memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa));
232 memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa));
233 memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa));
234 }
235
236 static hwaddr
pvscsi_ring_pop_req_descr(PVSCSIRingInfo * mgr)237 pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
238 {
239 uint32_t ready_ptr;
240 uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING
241 * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
242
243 RS_GET_FIELD(&ready_ptr, mgr, reqProdIdx);
244 if (ready_ptr != mgr->consumed_ptr
245 && ready_ptr - mgr->consumed_ptr < ring_size) {
246 uint32_t next_ready_ptr =
247 mgr->consumed_ptr++ & mgr->txr_len_mask;
248 uint32_t next_ready_page =
249 next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
250 uint32_t inpage_idx =
251 next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
252
253 return mgr->req_ring_pages_pa[next_ready_page] +
254 inpage_idx * sizeof(PVSCSIRingReqDesc);
255 } else {
256 return 0;
257 }
258 }
259
260 static void
pvscsi_ring_flush_req(PVSCSIRingInfo * mgr)261 pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
262 {
263 RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr);
264 }
265
266 static hwaddr
pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo * mgr)267 pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr)
268 {
269 /*
270 * According to Linux driver code it explicitly verifies that number
271 * of requests being processed by device is less then the size of
272 * completion queue, so device may omit completion queue overflow
273 * conditions check. We assume that this is true for other (Windows)
274 * drivers as well.
275 */
276
277 uint32_t free_cmp_ptr =
278 mgr->filled_cmp_ptr++ & mgr->rxr_len_mask;
279 uint32_t free_cmp_page =
280 free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
281 uint32_t inpage_idx =
282 free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
283 return mgr->cmp_ring_pages_pa[free_cmp_page] +
284 inpage_idx * sizeof(PVSCSIRingCmpDesc);
285 }
286
287 static hwaddr
pvscsi_ring_pop_msg_descr(PVSCSIRingInfo * mgr)288 pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr)
289 {
290 uint32_t free_msg_ptr =
291 mgr->filled_msg_ptr++ & mgr->msg_len_mask;
292 uint32_t free_msg_page =
293 free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
294 uint32_t inpage_idx =
295 free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
296 return mgr->msg_ring_pages_pa[free_msg_page] +
297 inpage_idx * sizeof(PVSCSIRingMsgDesc);
298 }
299
300 static void
pvscsi_ring_flush_cmp(PVSCSIRingInfo * mgr)301 pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
302 {
303 /* Flush descriptor changes */
304 smp_wmb();
305
306 trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
307
308 RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr);
309 }
310
311 static bool
pvscsi_ring_msg_has_room(PVSCSIRingInfo * mgr)312 pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
313 {
314 uint32_t prodIdx;
315 uint32_t consIdx;
316
317 RS_GET_FIELD(&prodIdx, mgr, msgProdIdx);
318 RS_GET_FIELD(&consIdx, mgr, msgConsIdx);
319
320 return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
321 }
322
323 static void
pvscsi_ring_flush_msg(PVSCSIRingInfo * mgr)324 pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
325 {
326 /* Flush descriptor changes */
327 smp_wmb();
328
329 trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
330
331 RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr);
332 }
333
334 static void
pvscsi_reset_state(PVSCSIState * s)335 pvscsi_reset_state(PVSCSIState *s)
336 {
337 s->curr_cmd = PVSCSI_CMD_FIRST;
338 s->curr_cmd_data_cntr = 0;
339 s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
340 s->reg_interrupt_status = 0;
341 pvscsi_ring_cleanup(&s->rings);
342 s->rings_info_valid = FALSE;
343 s->msg_ring_info_valid = FALSE;
344 QTAILQ_INIT(&s->pending_queue);
345 QTAILQ_INIT(&s->completion_queue);
346 }
347
348 static void
pvscsi_update_irq_status(PVSCSIState * s)349 pvscsi_update_irq_status(PVSCSIState *s)
350 {
351 PCIDevice *d = PCI_DEVICE(s);
352 bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status;
353
354 trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled,
355 s->reg_interrupt_status);
356
357 if (msi_enabled(d)) {
358 if (should_raise) {
359 trace_pvscsi_update_irq_msi();
360 msi_notify(d, PVSCSI_VECTOR_COMPLETION);
361 }
362 return;
363 }
364
365 pci_set_irq(d, !!should_raise);
366 }
367
368 static void
pvscsi_raise_completion_interrupt(PVSCSIState * s)369 pvscsi_raise_completion_interrupt(PVSCSIState *s)
370 {
371 s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0;
372
373 /* Memory barrier to flush interrupt status register changes*/
374 smp_wmb();
375
376 pvscsi_update_irq_status(s);
377 }
378
379 static void
pvscsi_raise_message_interrupt(PVSCSIState * s)380 pvscsi_raise_message_interrupt(PVSCSIState *s)
381 {
382 s->reg_interrupt_status |= PVSCSI_INTR_MSG_0;
383
384 /* Memory barrier to flush interrupt status register changes*/
385 smp_wmb();
386
387 pvscsi_update_irq_status(s);
388 }
389
390 static void
pvscsi_cmp_ring_put(PVSCSIState * s,struct PVSCSIRingCmpDesc * cmp_desc)391 pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc)
392 {
393 hwaddr cmp_descr_pa;
394
395 cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings);
396 trace_pvscsi_cmp_ring_put(cmp_descr_pa);
397 cpu_physical_memory_write(cmp_descr_pa, cmp_desc, sizeof(*cmp_desc));
398 }
399
400 static void
pvscsi_msg_ring_put(PVSCSIState * s,struct PVSCSIRingMsgDesc * msg_desc)401 pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc)
402 {
403 hwaddr msg_descr_pa;
404
405 msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings);
406 trace_pvscsi_msg_ring_put(msg_descr_pa);
407 cpu_physical_memory_write(msg_descr_pa, msg_desc, sizeof(*msg_desc));
408 }
409
410 static void
pvscsi_process_completion_queue(void * opaque)411 pvscsi_process_completion_queue(void *opaque)
412 {
413 PVSCSIState *s = opaque;
414 PVSCSIRequest *pvscsi_req;
415 bool has_completed = false;
416
417 while (!QTAILQ_EMPTY(&s->completion_queue)) {
418 pvscsi_req = QTAILQ_FIRST(&s->completion_queue);
419 QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next);
420 pvscsi_cmp_ring_put(s, &pvscsi_req->cmp);
421 g_free(pvscsi_req);
422 has_completed = true;
423 }
424
425 if (has_completed) {
426 pvscsi_ring_flush_cmp(&s->rings);
427 pvscsi_raise_completion_interrupt(s);
428 }
429 }
430
431 static void
pvscsi_reset_adapter(PVSCSIState * s)432 pvscsi_reset_adapter(PVSCSIState *s)
433 {
434 s->resetting++;
435 bus_cold_reset(BUS(&s->bus));
436 s->resetting--;
437 pvscsi_process_completion_queue(s);
438 assert(QTAILQ_EMPTY(&s->pending_queue));
439 pvscsi_reset_state(s);
440 }
441
442 static void
pvscsi_schedule_completion_processing(PVSCSIState * s)443 pvscsi_schedule_completion_processing(PVSCSIState *s)
444 {
445 /* Try putting more complete requests on the ring. */
446 if (!QTAILQ_EMPTY(&s->completion_queue)) {
447 qemu_bh_schedule(s->completion_worker);
448 }
449 }
450
451 static void
pvscsi_complete_request(PVSCSIState * s,PVSCSIRequest * r)452 pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r)
453 {
454 assert(!r->completed);
455
456 trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen,
457 r->sense_key);
458 if (r->sreq != NULL) {
459 scsi_req_unref(r->sreq);
460 r->sreq = NULL;
461 }
462 r->completed = 1;
463 QTAILQ_REMOVE(&s->pending_queue, r, next);
464 QTAILQ_INSERT_TAIL(&s->completion_queue, r, next);
465 pvscsi_schedule_completion_processing(s);
466 }
467
pvscsi_get_sg_list(SCSIRequest * r)468 static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r)
469 {
470 PVSCSIRequest *req = r->hba_private;
471
472 trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size);
473
474 return &req->sgl;
475 }
476
477 static void
pvscsi_get_next_sg_elem(PVSCSISGState * sg)478 pvscsi_get_next_sg_elem(PVSCSISGState *sg)
479 {
480 struct PVSCSISGElement elem;
481
482 cpu_physical_memory_read(sg->elemAddr, &elem, sizeof(elem));
483 if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) {
484 /*
485 * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in
486 * header file but its value is unknown. This flag requires
487 * additional processing, so we put warning here to catch it
488 * some day and make proper implementation
489 */
490 trace_pvscsi_get_next_sg_elem(elem.flags);
491 }
492
493 sg->elemAddr += sizeof(elem);
494 sg->dataAddr = elem.addr;
495 sg->resid = elem.length;
496 }
497
498 static void
pvscsi_write_sense(PVSCSIRequest * r,uint8_t * sense,int len)499 pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
500 {
501 r->cmp.senseLen = MIN(r->req.senseLen, len);
502 r->sense_key = sense[(sense[0] & 2) ? 1 : 2];
503 cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen);
504 }
505
506 static void
pvscsi_command_failed(SCSIRequest * req)507 pvscsi_command_failed(SCSIRequest *req)
508 {
509 PVSCSIRequest *pvscsi_req = req->hba_private;
510 PVSCSIState *s;
511
512 if (!pvscsi_req) {
513 trace_pvscsi_command_complete_not_found(req->tag);
514 return;
515 }
516 s = pvscsi_req->dev;
517
518 switch (req->host_status) {
519 case SCSI_HOST_NO_LUN:
520 pvscsi_req->cmp.hostStatus = BTSTAT_LUNMISMATCH;
521 break;
522 case SCSI_HOST_BUSY:
523 pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
524 break;
525 case SCSI_HOST_TIME_OUT:
526 case SCSI_HOST_ABORTED:
527 pvscsi_req->cmp.hostStatus = BTSTAT_SENTRST;
528 break;
529 case SCSI_HOST_BAD_RESPONSE:
530 pvscsi_req->cmp.hostStatus = BTSTAT_SELTIMEO;
531 break;
532 case SCSI_HOST_RESET:
533 pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
534 break;
535 default:
536 pvscsi_req->cmp.hostStatus = BTSTAT_HASOFTWARE;
537 break;
538 }
539 pvscsi_req->cmp.scsiStatus = GOOD;
540 qemu_sglist_destroy(&pvscsi_req->sgl);
541 pvscsi_complete_request(s, pvscsi_req);
542 }
543
544 static void
pvscsi_command_complete(SCSIRequest * req,size_t resid)545 pvscsi_command_complete(SCSIRequest *req, size_t resid)
546 {
547 PVSCSIRequest *pvscsi_req = req->hba_private;
548 PVSCSIState *s;
549
550 if (!pvscsi_req) {
551 trace_pvscsi_command_complete_not_found(req->tag);
552 return;
553 }
554 s = pvscsi_req->dev;
555
556 if (resid) {
557 /* Short transfer. */
558 trace_pvscsi_command_complete_data_run();
559 pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN;
560 }
561
562 pvscsi_req->cmp.scsiStatus = req->status;
563 if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) {
564 uint8_t sense[SCSI_SENSE_BUF_SIZE];
565 int sense_len =
566 scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense));
567
568 trace_pvscsi_command_complete_sense_len(sense_len);
569 pvscsi_write_sense(pvscsi_req, sense, sense_len);
570 }
571 qemu_sglist_destroy(&pvscsi_req->sgl);
572 pvscsi_complete_request(s, pvscsi_req);
573 }
574
575 static void
pvscsi_send_msg(PVSCSIState * s,SCSIDevice * dev,uint32_t msg_type)576 pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type)
577 {
578 if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) {
579 PVSCSIMsgDescDevStatusChanged msg = {0};
580
581 msg.type = msg_type;
582 msg.bus = dev->channel;
583 msg.target = dev->id;
584 msg.lun[1] = dev->lun;
585
586 pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg);
587 pvscsi_ring_flush_msg(&s->rings);
588 pvscsi_raise_message_interrupt(s);
589 }
590 }
591
592 static void
pvscsi_hotplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)593 pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
594 {
595 PVSCSIState *s = PVSCSI(hotplug_dev);
596
597 pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED);
598 }
599
600 static void
pvscsi_hot_unplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)601 pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
602 {
603 PVSCSIState *s = PVSCSI(hotplug_dev);
604
605 pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED);
606 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
607 }
608
609 static void
pvscsi_request_cancelled(SCSIRequest * req)610 pvscsi_request_cancelled(SCSIRequest *req)
611 {
612 PVSCSIRequest *pvscsi_req = req->hba_private;
613 PVSCSIState *s = pvscsi_req->dev;
614
615 if (pvscsi_req->completed) {
616 return;
617 }
618
619 if (pvscsi_req->dev->resetting) {
620 pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
621 } else {
622 pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
623 }
624
625 pvscsi_complete_request(s, pvscsi_req);
626 }
627
628 static SCSIDevice*
pvscsi_device_find(PVSCSIState * s,int channel,int target,uint8_t * requested_lun,uint8_t * target_lun)629 pvscsi_device_find(PVSCSIState *s, int channel, int target,
630 uint8_t *requested_lun, uint8_t *target_lun)
631 {
632 if (requested_lun[0] || requested_lun[2] || requested_lun[3] ||
633 requested_lun[4] || requested_lun[5] || requested_lun[6] ||
634 requested_lun[7] || (target > PVSCSI_MAX_DEVS)) {
635 return NULL;
636 } else {
637 *target_lun = requested_lun[1];
638 return scsi_device_find(&s->bus, channel, target, *target_lun);
639 }
640 }
641
642 static PVSCSIRequest *
pvscsi_queue_pending_descriptor(PVSCSIState * s,SCSIDevice ** d,struct PVSCSIRingReqDesc * descr)643 pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
644 struct PVSCSIRingReqDesc *descr)
645 {
646 PVSCSIRequest *pvscsi_req;
647 uint8_t lun;
648
649 pvscsi_req = g_malloc0(sizeof(*pvscsi_req));
650 pvscsi_req->dev = s;
651 pvscsi_req->req = *descr;
652 pvscsi_req->cmp.context = pvscsi_req->req.context;
653 QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next);
654
655 *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun);
656 if (*d) {
657 pvscsi_req->lun = lun;
658 }
659
660 return pvscsi_req;
661 }
662
663 static void
pvscsi_convert_sglist(PVSCSIRequest * r)664 pvscsi_convert_sglist(PVSCSIRequest *r)
665 {
666 uint32_t chunk_size, elmcnt = 0;
667 uint64_t data_length = r->req.dataLen;
668 PVSCSISGState sg = r->sg;
669 while (data_length && elmcnt < PVSCSI_MAX_SG_ELEM) {
670 while (!sg.resid && elmcnt++ < PVSCSI_MAX_SG_ELEM) {
671 pvscsi_get_next_sg_elem(&sg);
672 trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
673 r->sg.resid);
674 }
675 chunk_size = MIN(data_length, sg.resid);
676 if (chunk_size) {
677 qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
678 }
679
680 sg.dataAddr += chunk_size;
681 data_length -= chunk_size;
682 sg.resid -= chunk_size;
683 }
684 }
685
686 static void
pvscsi_build_sglist(PVSCSIState * s,PVSCSIRequest * r)687 pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r)
688 {
689 PCIDevice *d = PCI_DEVICE(s);
690
691 pci_dma_sglist_init(&r->sgl, d, 1);
692 if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
693 pvscsi_convert_sglist(r);
694 } else {
695 qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen);
696 }
697 }
698
699 static void
pvscsi_process_request_descriptor(PVSCSIState * s,struct PVSCSIRingReqDesc * descr)700 pvscsi_process_request_descriptor(PVSCSIState *s,
701 struct PVSCSIRingReqDesc *descr)
702 {
703 SCSIDevice *d;
704 PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr);
705 int64_t n;
706
707 trace_pvscsi_process_req_descr(descr->cdb[0], descr->context);
708
709 if (!d) {
710 r->cmp.hostStatus = BTSTAT_SELTIMEO;
711 trace_pvscsi_process_req_descr_unknown_device();
712 pvscsi_complete_request(s, r);
713 return;
714 }
715
716 if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
717 r->sg.elemAddr = descr->dataAddr;
718 }
719
720 r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, descr->cdbLen, r);
721 if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV &&
722 (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) {
723 r->cmp.hostStatus = BTSTAT_BADMSG;
724 trace_pvscsi_process_req_descr_invalid_dir();
725 scsi_req_cancel(r->sreq);
726 return;
727 }
728 if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV &&
729 (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) {
730 r->cmp.hostStatus = BTSTAT_BADMSG;
731 trace_pvscsi_process_req_descr_invalid_dir();
732 scsi_req_cancel(r->sreq);
733 return;
734 }
735
736 pvscsi_build_sglist(s, r);
737 n = scsi_req_enqueue(r->sreq);
738
739 if (n) {
740 scsi_req_continue(r->sreq);
741 }
742 }
743
744 static void
pvscsi_process_io(PVSCSIState * s)745 pvscsi_process_io(PVSCSIState *s)
746 {
747 PVSCSIRingReqDesc descr;
748 hwaddr next_descr_pa;
749
750 if (!s->rings_info_valid) {
751 return;
752 }
753
754 while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
755
756 /* Only read after production index verification */
757 smp_rmb();
758
759 trace_pvscsi_process_io(next_descr_pa);
760 cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr));
761 pvscsi_process_request_descriptor(s, &descr);
762 }
763
764 pvscsi_ring_flush_req(&s->rings);
765 }
766
767 static void
pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings * rc)768 pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
769 {
770 int i;
771 trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN);
772
773 trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages);
774 for (i = 0; i < rc->reqRingNumPages; i++) {
775 trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]);
776 }
777
778 trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
779 for (i = 0; i < rc->cmpRingNumPages; i++) {
780 trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->cmpRingPPNs[i]);
781 }
782 }
783
784 static uint64_t
pvscsi_on_cmd_config(PVSCSIState * s)785 pvscsi_on_cmd_config(PVSCSIState *s)
786 {
787 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG");
788 return PVSCSI_COMMAND_PROCESSING_FAILED;
789 }
790
791 static uint64_t
pvscsi_on_cmd_unplug(PVSCSIState * s)792 pvscsi_on_cmd_unplug(PVSCSIState *s)
793 {
794 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG");
795 return PVSCSI_COMMAND_PROCESSING_FAILED;
796 }
797
798 static uint64_t
pvscsi_on_issue_scsi(PVSCSIState * s)799 pvscsi_on_issue_scsi(PVSCSIState *s)
800 {
801 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI");
802 return PVSCSI_COMMAND_PROCESSING_FAILED;
803 }
804
805 static uint64_t
pvscsi_on_cmd_setup_rings(PVSCSIState * s)806 pvscsi_on_cmd_setup_rings(PVSCSIState *s)
807 {
808 PVSCSICmdDescSetupRings *rc =
809 (PVSCSICmdDescSetupRings *) s->curr_cmd_data;
810
811 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
812
813 if (!rc->reqRingNumPages
814 || rc->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
815 || !rc->cmpRingNumPages
816 || rc->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) {
817 return PVSCSI_COMMAND_PROCESSING_FAILED;
818 }
819
820 pvscsi_dbg_dump_tx_rings_config(rc);
821 pvscsi_ring_init_data(&s->rings, rc);
822
823 s->rings_info_valid = TRUE;
824 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
825 }
826
827 static uint64_t
pvscsi_on_cmd_abort(PVSCSIState * s)828 pvscsi_on_cmd_abort(PVSCSIState *s)
829 {
830 PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data;
831 PVSCSIRequest *r, *next;
832
833 trace_pvscsi_on_cmd_abort(cmd->context, cmd->target);
834
835 QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) {
836 if (r->req.context == cmd->context) {
837 break;
838 }
839 }
840 if (r) {
841 assert(!r->completed);
842 r->cmp.hostStatus = BTSTAT_ABORTQUEUE;
843 scsi_req_cancel(r->sreq);
844 }
845
846 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
847 }
848
849 static uint64_t
pvscsi_on_cmd_unknown(PVSCSIState * s)850 pvscsi_on_cmd_unknown(PVSCSIState *s)
851 {
852 trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]);
853 return PVSCSI_COMMAND_PROCESSING_FAILED;
854 }
855
856 static uint64_t
pvscsi_on_cmd_reset_device(PVSCSIState * s)857 pvscsi_on_cmd_reset_device(PVSCSIState *s)
858 {
859 uint8_t target_lun = 0;
860 struct PVSCSICmdDescResetDevice *cmd =
861 (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data;
862 SCSIDevice *sdev;
863
864 sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun);
865
866 trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev);
867
868 if (sdev != NULL) {
869 s->resetting++;
870 device_cold_reset(&sdev->qdev);
871 s->resetting--;
872 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
873 }
874
875 return PVSCSI_COMMAND_PROCESSING_FAILED;
876 }
877
878 static uint64_t
pvscsi_on_cmd_reset_bus(PVSCSIState * s)879 pvscsi_on_cmd_reset_bus(PVSCSIState *s)
880 {
881 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS");
882
883 s->resetting++;
884 bus_cold_reset(BUS(&s->bus));
885 s->resetting--;
886 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
887 }
888
889 static uint64_t
pvscsi_on_cmd_setup_msg_ring(PVSCSIState * s)890 pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s)
891 {
892 PVSCSICmdDescSetupMsgRing *rc =
893 (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data;
894
895 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING");
896
897 if (!s->use_msg) {
898 return PVSCSI_COMMAND_PROCESSING_FAILED;
899 }
900
901 if (s->rings_info_valid) {
902 if (pvscsi_ring_init_msg(&s->rings, rc) < 0) {
903 return PVSCSI_COMMAND_PROCESSING_FAILED;
904 }
905 s->msg_ring_info_valid = TRUE;
906 }
907 return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t);
908 }
909
910 static uint64_t
pvscsi_on_cmd_adapter_reset(PVSCSIState * s)911 pvscsi_on_cmd_adapter_reset(PVSCSIState *s)
912 {
913 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET");
914
915 pvscsi_reset_adapter(s);
916 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
917 }
918
919 static const struct {
920 int data_size;
921 uint64_t (*handler_fn)(PVSCSIState *s);
922 } pvscsi_commands[] = {
923 [PVSCSI_CMD_FIRST] = {
924 .data_size = 0,
925 .handler_fn = pvscsi_on_cmd_unknown,
926 },
927
928 /* Not implemented, data size defined based on what arrives on windows */
929 [PVSCSI_CMD_CONFIG] = {
930 .data_size = 6 * sizeof(uint32_t),
931 .handler_fn = pvscsi_on_cmd_config,
932 },
933
934 /* Command not implemented, data size is unknown */
935 [PVSCSI_CMD_ISSUE_SCSI] = {
936 .data_size = 0,
937 .handler_fn = pvscsi_on_issue_scsi,
938 },
939
940 /* Command not implemented, data size is unknown */
941 [PVSCSI_CMD_DEVICE_UNPLUG] = {
942 .data_size = 0,
943 .handler_fn = pvscsi_on_cmd_unplug,
944 },
945
946 [PVSCSI_CMD_SETUP_RINGS] = {
947 .data_size = sizeof(PVSCSICmdDescSetupRings),
948 .handler_fn = pvscsi_on_cmd_setup_rings,
949 },
950
951 [PVSCSI_CMD_RESET_DEVICE] = {
952 .data_size = sizeof(struct PVSCSICmdDescResetDevice),
953 .handler_fn = pvscsi_on_cmd_reset_device,
954 },
955
956 [PVSCSI_CMD_RESET_BUS] = {
957 .data_size = 0,
958 .handler_fn = pvscsi_on_cmd_reset_bus,
959 },
960
961 [PVSCSI_CMD_SETUP_MSG_RING] = {
962 .data_size = sizeof(PVSCSICmdDescSetupMsgRing),
963 .handler_fn = pvscsi_on_cmd_setup_msg_ring,
964 },
965
966 [PVSCSI_CMD_ADAPTER_RESET] = {
967 .data_size = 0,
968 .handler_fn = pvscsi_on_cmd_adapter_reset,
969 },
970
971 [PVSCSI_CMD_ABORT_CMD] = {
972 .data_size = sizeof(struct PVSCSICmdDescAbortCmd),
973 .handler_fn = pvscsi_on_cmd_abort,
974 },
975 };
976
977 static void
pvscsi_do_command_processing(PVSCSIState * s)978 pvscsi_do_command_processing(PVSCSIState *s)
979 {
980 size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
981
982 assert(s->curr_cmd < PVSCSI_CMD_LAST);
983 if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) {
984 s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s);
985 s->curr_cmd = PVSCSI_CMD_FIRST;
986 s->curr_cmd_data_cntr = 0;
987 }
988 }
989
990 static void
pvscsi_on_command_data(PVSCSIState * s,uint32_t value)991 pvscsi_on_command_data(PVSCSIState *s, uint32_t value)
992 {
993 size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
994
995 assert(bytes_arrived < sizeof(s->curr_cmd_data));
996 s->curr_cmd_data[s->curr_cmd_data_cntr++] = value;
997
998 pvscsi_do_command_processing(s);
999 }
1000
1001 static void
pvscsi_on_command(PVSCSIState * s,uint64_t cmd_id)1002 pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id)
1003 {
1004 if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) {
1005 s->curr_cmd = cmd_id;
1006 } else {
1007 s->curr_cmd = PVSCSI_CMD_FIRST;
1008 trace_pvscsi_on_cmd_unknown(cmd_id);
1009 }
1010
1011 s->curr_cmd_data_cntr = 0;
1012 s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA;
1013
1014 pvscsi_do_command_processing(s);
1015 }
1016
1017 static void
pvscsi_io_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1018 pvscsi_io_write(void *opaque, hwaddr addr,
1019 uint64_t val, unsigned size)
1020 {
1021 PVSCSIState *s = opaque;
1022
1023 switch (addr) {
1024 case PVSCSI_REG_OFFSET_COMMAND:
1025 pvscsi_on_command(s, val);
1026 break;
1027
1028 case PVSCSI_REG_OFFSET_COMMAND_DATA:
1029 pvscsi_on_command_data(s, (uint32_t) val);
1030 break;
1031
1032 case PVSCSI_REG_OFFSET_INTR_STATUS:
1033 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val);
1034 s->reg_interrupt_status &= ~val;
1035 pvscsi_update_irq_status(s);
1036 pvscsi_schedule_completion_processing(s);
1037 break;
1038
1039 case PVSCSI_REG_OFFSET_INTR_MASK:
1040 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val);
1041 s->reg_interrupt_enabled = val;
1042 pvscsi_update_irq_status(s);
1043 break;
1044
1045 case PVSCSI_REG_OFFSET_KICK_NON_RW_IO:
1046 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val);
1047 pvscsi_process_io(s);
1048 break;
1049
1050 case PVSCSI_REG_OFFSET_KICK_RW_IO:
1051 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val);
1052 pvscsi_process_io(s);
1053 break;
1054
1055 case PVSCSI_REG_OFFSET_DEBUG:
1056 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val);
1057 break;
1058
1059 default:
1060 trace_pvscsi_io_write_unknown(addr, size, val);
1061 break;
1062 }
1063
1064 }
1065
1066 static uint64_t
pvscsi_io_read(void * opaque,hwaddr addr,unsigned size)1067 pvscsi_io_read(void *opaque, hwaddr addr, unsigned size)
1068 {
1069 PVSCSIState *s = opaque;
1070
1071 switch (addr) {
1072 case PVSCSI_REG_OFFSET_INTR_STATUS:
1073 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS",
1074 s->reg_interrupt_status);
1075 return s->reg_interrupt_status;
1076
1077 case PVSCSI_REG_OFFSET_INTR_MASK:
1078 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK",
1079 s->reg_interrupt_status);
1080 return s->reg_interrupt_enabled;
1081
1082 case PVSCSI_REG_OFFSET_COMMAND_STATUS:
1083 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS",
1084 s->reg_interrupt_status);
1085 return s->reg_command_status;
1086
1087 default:
1088 trace_pvscsi_io_read_unknown(addr, size);
1089 return 0;
1090 }
1091 }
1092
1093
1094 static void
pvscsi_init_msi(PVSCSIState * s)1095 pvscsi_init_msi(PVSCSIState *s)
1096 {
1097 int res;
1098 PCIDevice *d = PCI_DEVICE(s);
1099
1100 res = msi_init(d, PVSCSI_MSI_OFFSET, PVSCSI_MSIX_NUM_VECTORS,
1101 PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL);
1102 if (res < 0) {
1103 trace_pvscsi_init_msi_fail(res);
1104 s->msi_used = false;
1105 } else {
1106 s->msi_used = true;
1107 }
1108 }
1109
1110 static void
pvscsi_cleanup_msi(PVSCSIState * s)1111 pvscsi_cleanup_msi(PVSCSIState *s)
1112 {
1113 PCIDevice *d = PCI_DEVICE(s);
1114
1115 msi_uninit(d);
1116 }
1117
1118 static const MemoryRegionOps pvscsi_ops = {
1119 .read = pvscsi_io_read,
1120 .write = pvscsi_io_write,
1121 .endianness = DEVICE_LITTLE_ENDIAN,
1122 .impl = {
1123 .min_access_size = 4,
1124 .max_access_size = 4,
1125 },
1126 };
1127
1128 static const struct SCSIBusInfo pvscsi_scsi_info = {
1129 .tcq = true,
1130 .max_target = PVSCSI_MAX_DEVS,
1131 .max_channel = 0,
1132 .max_lun = 0,
1133
1134 .get_sg_list = pvscsi_get_sg_list,
1135 .complete = pvscsi_command_complete,
1136 .cancel = pvscsi_request_cancelled,
1137 .fail = pvscsi_command_failed,
1138 };
1139
1140 static void
pvscsi_realizefn(PCIDevice * pci_dev,Error ** errp)1141 pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
1142 {
1143 PVSCSIState *s = PVSCSI(pci_dev);
1144
1145 trace_pvscsi_state("init");
1146
1147 /* PCI subsystem ID, subsystem vendor ID, revision */
1148 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1149 PCI_VENDOR_ID_VMWARE);
1150 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1151 PCI_DEVICE_ID_VMWARE_PVSCSI);
1152 pci_config_set_revision(pci_dev->config, 0x2);
1153
1154 /* PCI latency timer = 255 */
1155 pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
1156
1157 /* Interrupt pin A */
1158 pci_config_set_interrupt_pin(pci_dev->config, 1);
1159
1160 memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s,
1161 "pvscsi-io", PVSCSI_MEM_SPACE_SIZE);
1162 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space);
1163
1164 pvscsi_init_msi(s);
1165
1166 if (pci_is_express(pci_dev) && pci_bus_is_express(pci_get_bus(pci_dev))) {
1167 pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET);
1168 }
1169
1170 s->completion_worker = qemu_bh_new_guarded(pvscsi_process_completion_queue, s,
1171 &DEVICE(pci_dev)->mem_reentrancy_guard);
1172
1173 scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info);
1174 /* override default SCSI bus hotplug-handler, with pvscsi's one */
1175 qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s));
1176 pvscsi_reset_state(s);
1177 }
1178
1179 static void
pvscsi_uninit(PCIDevice * pci_dev)1180 pvscsi_uninit(PCIDevice *pci_dev)
1181 {
1182 PVSCSIState *s = PVSCSI(pci_dev);
1183
1184 trace_pvscsi_state("uninit");
1185 qemu_bh_delete(s->completion_worker);
1186
1187 pvscsi_cleanup_msi(s);
1188 }
1189
1190 static void
pvscsi_reset(DeviceState * dev)1191 pvscsi_reset(DeviceState *dev)
1192 {
1193 PCIDevice *d = PCI_DEVICE(dev);
1194 PVSCSIState *s = PVSCSI(d);
1195
1196 trace_pvscsi_state("reset");
1197 pvscsi_reset_adapter(s);
1198 }
1199
1200 static int
pvscsi_pre_save(void * opaque)1201 pvscsi_pre_save(void *opaque)
1202 {
1203 PVSCSIState *s = (PVSCSIState *) opaque;
1204
1205 trace_pvscsi_state("presave");
1206
1207 assert(QTAILQ_EMPTY(&s->pending_queue));
1208 assert(QTAILQ_EMPTY(&s->completion_queue));
1209
1210 return 0;
1211 }
1212
1213 static int
pvscsi_post_load(void * opaque,int version_id)1214 pvscsi_post_load(void *opaque, int version_id)
1215 {
1216 trace_pvscsi_state("postload");
1217 return 0;
1218 }
1219
1220 static const VMStateDescription vmstate_pvscsi_pcie_device = {
1221 .name = "pvscsi/pcie",
1222 .fields = (const VMStateField[]) {
1223 VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
1224 VMSTATE_END_OF_LIST()
1225 }
1226 };
1227
1228 static const VMStateDescription vmstate_pvscsi = {
1229 .name = "pvscsi",
1230 .version_id = 0,
1231 .minimum_version_id = 0,
1232 .pre_save = pvscsi_pre_save,
1233 .post_load = pvscsi_post_load,
1234 .fields = (const VMStateField[]) {
1235 VMSTATE_UINT8(msi_used, PVSCSIState),
1236 VMSTATE_UINT32(resetting, PVSCSIState),
1237 VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
1238 VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState),
1239 VMSTATE_UINT64(reg_command_status, PVSCSIState),
1240 VMSTATE_UINT64(curr_cmd, PVSCSIState),
1241 VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState),
1242 VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState,
1243 ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)),
1244 VMSTATE_UINT8(rings_info_valid, PVSCSIState),
1245 VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState),
1246 VMSTATE_UINT8(use_msg, PVSCSIState),
1247
1248 VMSTATE_UINT64(rings.rs_pa, PVSCSIState),
1249 VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState),
1250 VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState),
1251 VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState,
1252 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1253 VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState,
1254 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1255 VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState),
1256 VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState),
1257
1258 VMSTATE_END_OF_LIST()
1259 },
1260 .subsections = (const VMStateDescription * const []) {
1261 &vmstate_pvscsi_pcie_device,
1262 NULL
1263 }
1264 };
1265
1266 static const Property pvscsi_properties[] = {
1267 DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
1268 };
1269
pvscsi_instance_init(Object * obj)1270 static void pvscsi_instance_init(Object *obj)
1271 {
1272 PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
1273 }
1274
pvscsi_class_init(ObjectClass * klass,const void * data)1275 static void pvscsi_class_init(ObjectClass *klass, const void *data)
1276 {
1277 DeviceClass *dc = DEVICE_CLASS(klass);
1278 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1279 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1280
1281 k->realize = pvscsi_realizefn;
1282 k->exit = pvscsi_uninit;
1283 k->vendor_id = PCI_VENDOR_ID_VMWARE;
1284 k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
1285 k->class_id = PCI_CLASS_STORAGE_SCSI;
1286 k->subsystem_id = 0x1000;
1287 device_class_set_legacy_reset(dc, pvscsi_reset);
1288 dc->vmsd = &vmstate_pvscsi;
1289 device_class_set_props(dc, pvscsi_properties);
1290 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1291 hc->unplug = pvscsi_hot_unplug;
1292 hc->plug = pvscsi_hotplug;
1293 }
1294
1295 static const TypeInfo pvscsi_info = {
1296 .name = TYPE_PVSCSI,
1297 .parent = TYPE_PCI_DEVICE,
1298 .class_size = sizeof(PVSCSIClass),
1299 .instance_size = sizeof(PVSCSIState),
1300 .class_init = pvscsi_class_init,
1301 .instance_init = pvscsi_instance_init,
1302 .interfaces = (const InterfaceInfo[]) {
1303 { TYPE_HOTPLUG_HANDLER },
1304 { INTERFACE_PCIE_DEVICE },
1305 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1306 { }
1307 }
1308 };
1309
1310 static void
pvscsi_register_types(void)1311 pvscsi_register_types(void)
1312 {
1313 type_register_static(&pvscsi_info);
1314 }
1315
1316 type_init(pvscsi_register_types);
1317