1 /*
2 * Virtio SCSI HBA
3 *
4 * Copyright IBM, Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "standard-headers/linux/virtio_ids.h"
19 #include "hw/virtio/virtio-scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "qemu/defer-call.h"
22 #include "qemu/error-report.h"
23 #include "qemu/iov.h"
24 #include "qemu/module.h"
25 #include "system/block-backend.h"
26 #include "system/dma.h"
27 #include "hw/qdev-properties.h"
28 #include "hw/scsi/scsi.h"
29 #include "scsi/constants.h"
30 #include "hw/virtio/iothread-vq-mapping.h"
31 #include "hw/virtio/virtio-bus.h"
32 #include "hw/virtio/virtio-access.h"
33 #include "trace.h"
34
35 typedef struct VirtIOSCSIReq {
36 /*
37 * Note:
38 * - fields up to resp_iov are initialized by virtio_scsi_init_req;
39 * - fields starting at vring are zeroed by virtio_scsi_init_req.
40 */
41 VirtQueueElement elem;
42
43 VirtIOSCSI *dev;
44 VirtQueue *vq;
45 QEMUSGList qsgl;
46 QEMUIOVector resp_iov;
47
48 /* Used for two-stage request submission and TMFs deferred to BH */
49 QTAILQ_ENTRY(VirtIOSCSIReq) next;
50
51 /* Used for cancellation of request during TMFs. Atomic. */
52 int remaining;
53
54 SCSIRequest *sreq;
55 size_t resp_size;
56 enum SCSIXferMode mode;
57 union {
58 VirtIOSCSICmdResp cmd;
59 VirtIOSCSICtrlTMFResp tmf;
60 VirtIOSCSICtrlANResp an;
61 VirtIOSCSIEvent event;
62 } resp;
63 union {
64 VirtIOSCSICmdReq cmd;
65 VirtIOSCSICtrlTMFReq tmf;
66 VirtIOSCSICtrlANReq an;
67 } req;
68 } VirtIOSCSIReq;
69
virtio_scsi_get_lun(uint8_t * lun)70 static inline int virtio_scsi_get_lun(uint8_t *lun)
71 {
72 return ((lun[2] << 8) | lun[3]) & 0x3FFF;
73 }
74
virtio_scsi_device_get(VirtIOSCSI * s,uint8_t * lun)75 static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
76 {
77 if (lun[0] != 1) {
78 return NULL;
79 }
80 if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
81 return NULL;
82 }
83 return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
84 }
85
virtio_scsi_init_req(VirtIOSCSI * s,VirtQueue * vq,VirtIOSCSIReq * req)86 static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
87 {
88 VirtIODevice *vdev = VIRTIO_DEVICE(s);
89 const size_t zero_skip =
90 offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
91
92 req->vq = vq;
93 req->dev = s;
94 qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
95 qemu_iovec_init(&req->resp_iov, 1);
96 memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
97 }
98
virtio_scsi_free_req(VirtIOSCSIReq * req)99 static void virtio_scsi_free_req(VirtIOSCSIReq *req)
100 {
101 qemu_iovec_destroy(&req->resp_iov);
102 qemu_sglist_destroy(&req->qsgl);
103 g_free(req);
104 }
105
virtio_scsi_complete_req(VirtIOSCSIReq * req,QemuMutex * vq_lock)106 static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
107 {
108 VirtIOSCSI *s = req->dev;
109 VirtQueue *vq = req->vq;
110 VirtIODevice *vdev = VIRTIO_DEVICE(s);
111
112 qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
113
114 if (vq_lock) {
115 qemu_mutex_lock(vq_lock);
116 }
117
118 virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
119 if (s->dataplane_started && !s->dataplane_fenced) {
120 virtio_notify_irqfd(vdev, vq);
121 } else {
122 virtio_notify(vdev, vq);
123 }
124
125 if (vq_lock) {
126 qemu_mutex_unlock(vq_lock);
127 }
128
129 if (req->sreq) {
130 req->sreq->hba_private = NULL;
131 scsi_req_unref(req->sreq);
132 }
133 virtio_scsi_free_req(req);
134 }
135
virtio_scsi_bad_req(VirtIOSCSIReq * req,QemuMutex * vq_lock)136 static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
137 {
138 virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
139
140 if (vq_lock) {
141 qemu_mutex_lock(vq_lock);
142 }
143
144 virtqueue_detach_element(req->vq, &req->elem, 0);
145
146 if (vq_lock) {
147 qemu_mutex_unlock(vq_lock);
148 }
149
150 virtio_scsi_free_req(req);
151 }
152
qemu_sgl_concat(VirtIOSCSIReq * req,struct iovec * iov,hwaddr * addr,int num,size_t skip)153 static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
154 hwaddr *addr, int num, size_t skip)
155 {
156 QEMUSGList *qsgl = &req->qsgl;
157 size_t copied = 0;
158
159 while (num) {
160 if (skip >= iov->iov_len) {
161 skip -= iov->iov_len;
162 } else {
163 qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
164 copied += iov->iov_len - skip;
165 skip = 0;
166 }
167 iov++;
168 addr++;
169 num--;
170 }
171
172 assert(skip == 0);
173 return copied;
174 }
175
virtio_scsi_parse_req(VirtIOSCSIReq * req,unsigned req_size,unsigned resp_size)176 static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
177 unsigned req_size, unsigned resp_size)
178 {
179 VirtIODevice *vdev = (VirtIODevice *) req->dev;
180 size_t in_size, out_size;
181
182 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
183 &req->req, req_size) < req_size) {
184 return -EINVAL;
185 }
186
187 if (qemu_iovec_concat_iov(&req->resp_iov,
188 req->elem.in_sg, req->elem.in_num, 0,
189 resp_size) < resp_size) {
190 return -EINVAL;
191 }
192
193 req->resp_size = resp_size;
194
195 /* Old BIOSes left some padding by mistake after the req_size/resp_size.
196 * As a workaround, always consider the first buffer as the virtio-scsi
197 * request/response, making the payload start at the second element
198 * of the iovec.
199 *
200 * The actual length of the response header, stored in req->resp_size,
201 * does not change.
202 *
203 * TODO: always disable this workaround for virtio 1.0 devices.
204 */
205 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
206 if (req->elem.out_num) {
207 req_size = req->elem.out_sg[0].iov_len;
208 }
209 if (req->elem.in_num) {
210 resp_size = req->elem.in_sg[0].iov_len;
211 }
212 }
213
214 out_size = qemu_sgl_concat(req, req->elem.out_sg,
215 &req->elem.out_addr[0], req->elem.out_num,
216 req_size);
217 in_size = qemu_sgl_concat(req, req->elem.in_sg,
218 &req->elem.in_addr[0], req->elem.in_num,
219 resp_size);
220
221 if (out_size && in_size) {
222 return -ENOTSUP;
223 }
224
225 if (out_size) {
226 req->mode = SCSI_XFER_TO_DEV;
227 } else if (in_size) {
228 req->mode = SCSI_XFER_FROM_DEV;
229 }
230
231 return 0;
232 }
233
virtio_scsi_pop_req(VirtIOSCSI * s,VirtQueue * vq,QemuMutex * vq_lock)234 static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock)
235 {
236 VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
237 VirtIOSCSIReq *req;
238
239 if (vq_lock) {
240 qemu_mutex_lock(vq_lock);
241 }
242
243 req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
244
245 if (vq_lock) {
246 qemu_mutex_unlock(vq_lock);
247 }
248
249 if (!req) {
250 return NULL;
251 }
252 virtio_scsi_init_req(s, vq, req);
253 return req;
254 }
255
virtio_scsi_save_request(QEMUFile * f,SCSIRequest * sreq)256 static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
257 {
258 VirtIOSCSIReq *req = sreq->hba_private;
259 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
260 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
261 uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
262
263 assert(n < vs->conf.num_queues);
264 qemu_put_be32s(f, &n);
265 qemu_put_virtqueue_element(vdev, f, &req->elem);
266 }
267
virtio_scsi_load_request(QEMUFile * f,SCSIRequest * sreq)268 static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
269 {
270 SCSIBus *bus = sreq->bus;
271 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
272 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
273 VirtIODevice *vdev = VIRTIO_DEVICE(s);
274 VirtIOSCSIReq *req;
275 uint32_t n;
276
277 qemu_get_be32s(f, &n);
278 assert(n < vs->conf.num_queues);
279 req = qemu_get_virtqueue_element(vdev, f,
280 sizeof(VirtIOSCSIReq) + vs->cdb_size);
281 virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
282
283 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
284 sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
285 error_report("invalid SCSI request migration data");
286 exit(1);
287 }
288
289 scsi_req_ref(sreq);
290 req->sreq = sreq;
291 if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
292 assert(req->sreq->cmd.mode == req->mode);
293 }
294 return req;
295 }
296
297 typedef struct {
298 Notifier notifier;
299 VirtIOSCSIReq *tmf_req;
300 } VirtIOSCSICancelNotifier;
301
virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq * tmf)302 static void virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq *tmf)
303 {
304 if (qatomic_fetch_dec(&tmf->remaining) == 1) {
305 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(tmf->req.tmf.lun),
306 tmf->req.tmf.tag, tmf->resp.tmf.response);
307
308 virtio_scsi_complete_req(tmf, &tmf->dev->ctrl_lock);
309 }
310 }
311
virtio_scsi_cancel_notify(Notifier * notifier,void * data)312 static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
313 {
314 VirtIOSCSICancelNotifier *n = container_of(notifier,
315 VirtIOSCSICancelNotifier,
316 notifier);
317
318 virtio_scsi_tmf_dec_remaining(n->tmf_req);
319 g_free(n);
320 }
321
virtio_scsi_tmf_cancel_req(VirtIOSCSIReq * tmf,SCSIRequest * r)322 static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r)
323 {
324 VirtIOSCSICancelNotifier *notifier;
325
326 assert(r->ctx == qemu_get_current_aio_context());
327
328 /* Decremented in virtio_scsi_cancel_notify() */
329 qatomic_inc(&tmf->remaining);
330
331 notifier = g_new(VirtIOSCSICancelNotifier, 1);
332 notifier->notifier.notify = virtio_scsi_cancel_notify;
333 notifier->tmf_req = tmf;
334 scsi_req_cancel_async(r, ¬ifier->notifier);
335 }
336
337 /* Execute a TMF on the requests in the current AioContext */
virtio_scsi_do_tmf_aio_context(void * opaque)338 static void virtio_scsi_do_tmf_aio_context(void *opaque)
339 {
340 AioContext *ctx = qemu_get_current_aio_context();
341 VirtIOSCSIReq *tmf = opaque;
342 VirtIOSCSI *s = tmf->dev;
343 SCSIDevice *d = virtio_scsi_device_get(s, tmf->req.tmf.lun);
344 SCSIRequest *r;
345 bool match_tag;
346
347 if (!d) {
348 tmf->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
349 virtio_scsi_tmf_dec_remaining(tmf);
350 return;
351 }
352
353 /*
354 * This function could handle other subtypes that need to be processed in
355 * the request's AioContext in the future, but for now only request
356 * cancelation subtypes are performed here.
357 */
358 switch (tmf->req.tmf.subtype) {
359 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
360 match_tag = true;
361 break;
362 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
363 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
364 match_tag = false;
365 break;
366 default:
367 g_assert_not_reached();
368 }
369
370 WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
371 QTAILQ_FOREACH(r, &d->requests, next) {
372 VirtIOSCSIReq *cmd_req = r->hba_private;
373 assert(cmd_req); /* request has hba_private while enqueued */
374
375 if (r->ctx != ctx) {
376 continue;
377 }
378 if (match_tag && cmd_req->req.cmd.tag != tmf->req.tmf.tag) {
379 continue;
380 }
381 virtio_scsi_tmf_cancel_req(tmf, r);
382 }
383 }
384
385 /* Incremented by virtio_scsi_do_tmf() */
386 virtio_scsi_tmf_dec_remaining(tmf);
387
388 object_unref(d);
389 }
390
dummy_bh(void * opaque)391 static void dummy_bh(void *opaque)
392 {
393 /* Do nothing */
394 }
395
396 /*
397 * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs.
398 */
virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI * s)399 static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s)
400 {
401 GLOBAL_STATE_CODE();
402
403 assert(!s->dataplane_started);
404
405 for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
406 AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
407
408 /* Our BH only runs after previously scheduled BHs */
409 aio_wait_bh_oneshot(ctx, dummy_bh, NULL);
410 }
411 }
412
413 /*
414 * Run the TMF in a specific AioContext, handling only requests in that
415 * AioContext. This is necessary because requests can run in different
416 * AioContext and it is only possible to cancel them from the AioContext where
417 * they are running.
418 */
virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq * tmf,AioContext * ctx)419 static void virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq *tmf,
420 AioContext *ctx)
421 {
422 /* Decremented in virtio_scsi_do_tmf_aio_context() */
423 qatomic_inc(&tmf->remaining);
424
425 /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */
426 aio_bh_schedule_oneshot(ctx, virtio_scsi_do_tmf_aio_context, tmf);
427 }
428
429 /*
430 * Returns the AioContext for a given TMF's tag field or NULL. Note that the
431 * request identified by the tag may have completed by the time you can execute
432 * a BH in the AioContext, so don't assume the request still exists in your BH.
433 */
find_aio_context_for_tmf_tag(SCSIDevice * d,VirtIOSCSIReq * tmf)434 static AioContext *find_aio_context_for_tmf_tag(SCSIDevice *d,
435 VirtIOSCSIReq *tmf)
436 {
437 WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
438 SCSIRequest *r;
439 SCSIRequest *next;
440
441 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
442 VirtIOSCSIReq *cmd_req = r->hba_private;
443
444 /* hba_private is non-NULL while the request is enqueued */
445 assert(cmd_req);
446
447 if (cmd_req->req.cmd.tag == tmf->req.tmf.tag) {
448 return r->ctx;
449 }
450 }
451 }
452 return NULL;
453 }
454
455 /* Return 0 if the request is ready to be completed and return to guest;
456 * -EINPROGRESS if the request is submitted and will be completed later, in the
457 * case of async cancellation. */
virtio_scsi_do_tmf(VirtIOSCSI * s,VirtIOSCSIReq * req)458 static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
459 {
460 SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
461 SCSIRequest *r, *next;
462 AioContext *ctx;
463 int ret = 0;
464
465 /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
466 req->resp.tmf.response = VIRTIO_SCSI_S_OK;
467
468 /*
469 * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
470 * to avoid compiler errors.
471 */
472 req->req.tmf.subtype =
473 virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
474
475 trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
476 req->req.tmf.tag, req->req.tmf.subtype);
477
478 switch (req->req.tmf.subtype) {
479 case VIRTIO_SCSI_T_TMF_ABORT_TASK: {
480 if (!d) {
481 goto fail;
482 }
483 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
484 goto incorrect_lun;
485 }
486
487 ctx = find_aio_context_for_tmf_tag(d, req);
488 if (ctx) {
489 virtio_scsi_defer_tmf_to_aio_context(req, ctx);
490 ret = -EINPROGRESS;
491 }
492 break;
493 }
494
495 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
496 if (!d) {
497 goto fail;
498 }
499 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
500 goto incorrect_lun;
501 }
502
503 WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
504 QTAILQ_FOREACH(r, &d->requests, next) {
505 VirtIOSCSIReq *cmd_req = r->hba_private;
506 assert(cmd_req); /* request has hba_private while enqueued */
507
508 if (cmd_req->req.cmd.tag == req->req.tmf.tag) {
509 /*
510 * "If the specified command is present in the task set,
511 * then return a service response set to FUNCTION
512 * SUCCEEDED".
513 */
514 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
515 }
516 }
517 }
518 break;
519
520 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
521 if (!d) {
522 goto fail;
523 }
524 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
525 goto incorrect_lun;
526 }
527 qatomic_inc(&s->resetting);
528 device_cold_reset(&d->qdev);
529 qatomic_dec(&s->resetting);
530 break;
531
532 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: {
533 BusChild *kid;
534 int target = req->req.tmf.lun[1];
535 qatomic_inc(&s->resetting);
536
537 rcu_read_lock();
538 QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
539 SCSIDevice *d1 = SCSI_DEVICE(kid->child);
540 if (d1->channel == 0 && d1->id == target) {
541 device_cold_reset(&d1->qdev);
542 }
543 }
544 rcu_read_unlock();
545
546 qatomic_dec(&s->resetting);
547 break;
548 }
549
550 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
551 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
552 g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
553
554 if (!d) {
555 goto fail;
556 }
557 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
558 goto incorrect_lun;
559 }
560
561 qatomic_inc(&req->remaining);
562
563 for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
564 ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
565
566 if (!g_hash_table_add(aio_contexts, ctx)) {
567 continue; /* skip previously added AioContext */
568 }
569
570 virtio_scsi_defer_tmf_to_aio_context(req, ctx);
571 }
572
573 virtio_scsi_tmf_dec_remaining(req);
574 ret = -EINPROGRESS;
575 break;
576 }
577
578 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
579 if (!d) {
580 goto fail;
581 }
582 if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
583 goto incorrect_lun;
584 }
585
586 WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
587 QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
588 /* Request has hba_private while enqueued */
589 assert(r->hba_private);
590
591 /*
592 * "If there is any command present in the task set, then
593 * return a service response set to FUNCTION SUCCEEDED".
594 */
595 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
596 break;
597 }
598 }
599 break;
600
601 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
602 default:
603 req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
604 break;
605 }
606
607 object_unref(OBJECT(d));
608 return ret;
609
610 incorrect_lun:
611 req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
612 object_unref(OBJECT(d));
613 return ret;
614
615 fail:
616 req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
617 object_unref(OBJECT(d));
618 return ret;
619 }
620
virtio_scsi_handle_ctrl_req(VirtIOSCSI * s,VirtIOSCSIReq * req)621 static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
622 {
623 VirtIODevice *vdev = (VirtIODevice *)s;
624 uint32_t type;
625 int r = 0;
626
627 if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
628 &type, sizeof(type)) < sizeof(type)) {
629 virtio_scsi_bad_req(req, &s->ctrl_lock);
630 return;
631 }
632
633 virtio_tswap32s(vdev, &type);
634 if (type == VIRTIO_SCSI_T_TMF) {
635 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
636 sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
637 virtio_scsi_bad_req(req, &s->ctrl_lock);
638 return;
639 } else {
640 r = virtio_scsi_do_tmf(s, req);
641 }
642
643 } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
644 type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
645 if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
646 sizeof(VirtIOSCSICtrlANResp)) < 0) {
647 virtio_scsi_bad_req(req, &s->ctrl_lock);
648 return;
649 } else {
650 req->req.an.event_requested =
651 virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
652 trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
653 req->req.an.event_requested);
654 req->resp.an.event_actual = 0;
655 req->resp.an.response = VIRTIO_SCSI_S_OK;
656 }
657 }
658 if (r == 0) {
659 if (type == VIRTIO_SCSI_T_TMF)
660 trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
661 req->req.tmf.tag,
662 req->resp.tmf.response);
663 else if (type == VIRTIO_SCSI_T_AN_QUERY ||
664 type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
665 trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
666 req->resp.an.response);
667 virtio_scsi_complete_req(req, &s->ctrl_lock);
668 } else {
669 assert(r == -EINPROGRESS);
670 }
671 }
672
virtio_scsi_handle_ctrl_vq(VirtIOSCSI * s,VirtQueue * vq)673 static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
674 {
675 VirtIOSCSIReq *req;
676
677 while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) {
678 virtio_scsi_handle_ctrl_req(s, req);
679 }
680 }
681
682 /*
683 * If dataplane is configured but not yet started, do so now and return true on
684 * success.
685 *
686 * Dataplane is started by the core virtio code but virtqueue handler functions
687 * can also be invoked when a guest kicks before DRIVER_OK, so this helper
688 * function helps us deal with manually starting ioeventfd in that case.
689 */
virtio_scsi_defer_to_dataplane(VirtIOSCSI * s)690 static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
691 {
692 if (s->dataplane_started) {
693 return false;
694 }
695 if (s->vq_aio_context[0] == qemu_get_aio_context()) {
696 return false; /* not using IOThreads */
697 }
698
699 virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
700 return !s->dataplane_fenced;
701 }
702
virtio_scsi_handle_ctrl(VirtIODevice * vdev,VirtQueue * vq)703 static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
704 {
705 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
706
707 if (virtio_scsi_defer_to_dataplane(s)) {
708 return;
709 }
710
711 virtio_scsi_handle_ctrl_vq(s, vq);
712 }
713
virtio_scsi_complete_cmd_req(VirtIOSCSIReq * req)714 static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
715 {
716 trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
717 req->req.cmd.tag,
718 req->resp.cmd.response,
719 req->resp.cmd.status);
720 /* Sense data is not in req->resp and is copied separately
721 * in virtio_scsi_command_complete.
722 */
723 req->resp_size = sizeof(VirtIOSCSICmdResp);
724 virtio_scsi_complete_req(req, NULL);
725 }
726
virtio_scsi_command_failed(SCSIRequest * r)727 static void virtio_scsi_command_failed(SCSIRequest *r)
728 {
729 VirtIOSCSIReq *req = r->hba_private;
730
731 if (r->io_canceled) {
732 return;
733 }
734
735 req->resp.cmd.status = GOOD;
736 switch (r->host_status) {
737 case SCSI_HOST_NO_LUN:
738 req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
739 break;
740 case SCSI_HOST_BUSY:
741 req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
742 break;
743 case SCSI_HOST_TIME_OUT:
744 case SCSI_HOST_ABORTED:
745 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
746 break;
747 case SCSI_HOST_BAD_RESPONSE:
748 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
749 break;
750 case SCSI_HOST_RESET:
751 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
752 break;
753 case SCSI_HOST_TRANSPORT_DISRUPTED:
754 req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
755 break;
756 case SCSI_HOST_TARGET_FAILURE:
757 req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
758 break;
759 case SCSI_HOST_RESERVATION_ERROR:
760 req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
761 break;
762 case SCSI_HOST_ALLOCATION_FAILURE:
763 case SCSI_HOST_MEDIUM_ERROR:
764 case SCSI_HOST_ERROR:
765 default:
766 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
767 break;
768 }
769 virtio_scsi_complete_cmd_req(req);
770 }
771
virtio_scsi_command_complete(SCSIRequest * r,size_t resid)772 static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
773 {
774 VirtIOSCSIReq *req = r->hba_private;
775 uint8_t sense[SCSI_SENSE_BUF_SIZE];
776 uint32_t sense_len;
777 VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
778
779 if (r->io_canceled) {
780 return;
781 }
782
783 req->resp.cmd.response = VIRTIO_SCSI_S_OK;
784 req->resp.cmd.status = r->status;
785 if (req->resp.cmd.status == GOOD) {
786 req->resp.cmd.resid = virtio_tswap32(vdev, resid);
787 } else {
788 req->resp.cmd.resid = 0;
789 sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
790 sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
791 qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
792 sense, sense_len);
793 req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
794 }
795 virtio_scsi_complete_cmd_req(req);
796 }
797
virtio_scsi_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len,void * hba_private)798 static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
799 uint8_t *buf, size_t buf_len,
800 void *hba_private)
801 {
802 VirtIOSCSIReq *req = hba_private;
803
804 if (cmd->len == 0) {
805 cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
806 memcpy(cmd->buf, buf, cmd->len);
807 }
808
809 /* Extract the direction and mode directly from the request, for
810 * host device passthrough.
811 */
812 cmd->xfer = req->qsgl.size;
813 cmd->mode = req->mode;
814 return 0;
815 }
816
virtio_scsi_get_sg_list(SCSIRequest * r)817 static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
818 {
819 VirtIOSCSIReq *req = r->hba_private;
820
821 return &req->qsgl;
822 }
823
virtio_scsi_request_cancelled(SCSIRequest * r)824 static void virtio_scsi_request_cancelled(SCSIRequest *r)
825 {
826 VirtIOSCSIReq *req = r->hba_private;
827
828 if (!req) {
829 return;
830 }
831 if (qatomic_read(&req->dev->resetting)) {
832 req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
833 } else {
834 req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
835 }
836 virtio_scsi_complete_cmd_req(req);
837 }
838
virtio_scsi_fail_cmd_req(VirtIOSCSIReq * req)839 static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
840 {
841 req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
842 virtio_scsi_complete_cmd_req(req);
843 }
844
virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI * s,VirtIOSCSIReq * req)845 static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
846 {
847 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
848 SCSIDevice *d;
849 int rc;
850
851 rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
852 sizeof(VirtIOSCSICmdResp) + vs->sense_size);
853 if (rc < 0) {
854 if (rc == -ENOTSUP) {
855 virtio_scsi_fail_cmd_req(req);
856 return -ENOTSUP;
857 } else {
858 virtio_scsi_bad_req(req, NULL);
859 return -EINVAL;
860 }
861 }
862 trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
863 req->req.cmd.tag, req->req.cmd.cdb[0]);
864
865 d = virtio_scsi_device_get(s, req->req.cmd.lun);
866 if (!d) {
867 req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
868 virtio_scsi_complete_cmd_req(req);
869 return -ENOENT;
870 }
871 req->sreq = scsi_req_new(d, req->req.cmd.tag,
872 virtio_scsi_get_lun(req->req.cmd.lun),
873 req->req.cmd.cdb, vs->cdb_size, req);
874
875 if (req->sreq->cmd.mode != SCSI_XFER_NONE
876 && (req->sreq->cmd.mode != req->mode ||
877 req->sreq->cmd.xfer > req->qsgl.size)) {
878 req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
879 virtio_scsi_complete_cmd_req(req);
880 object_unref(OBJECT(d));
881 return -ENOBUFS;
882 }
883 scsi_req_ref(req->sreq);
884 defer_call_begin();
885 object_unref(OBJECT(d));
886 return 0;
887 }
888
virtio_scsi_handle_cmd_req_submit(VirtIOSCSI * s,VirtIOSCSIReq * req)889 static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
890 {
891 SCSIRequest *sreq = req->sreq;
892 if (scsi_req_enqueue(sreq)) {
893 scsi_req_continue(sreq);
894 }
895 defer_call_end();
896 scsi_req_unref(sreq);
897 }
898
virtio_scsi_handle_cmd_vq(VirtIOSCSI * s,VirtQueue * vq)899 static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
900 {
901 VirtIOSCSIReq *req, *next;
902 int ret = 0;
903 bool suppress_notifications = virtio_queue_get_notification(vq);
904
905 QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
906
907 do {
908 if (suppress_notifications) {
909 virtio_queue_set_notification(vq, 0);
910 }
911
912 while ((req = virtio_scsi_pop_req(s, vq, NULL))) {
913 ret = virtio_scsi_handle_cmd_req_prepare(s, req);
914 if (!ret) {
915 QTAILQ_INSERT_TAIL(&reqs, req, next);
916 } else if (ret == -EINVAL) {
917 /* The device is broken and shouldn't process any request */
918 while (!QTAILQ_EMPTY(&reqs)) {
919 req = QTAILQ_FIRST(&reqs);
920 QTAILQ_REMOVE(&reqs, req, next);
921 defer_call_end();
922 scsi_req_unref(req->sreq);
923 virtqueue_detach_element(req->vq, &req->elem, 0);
924 virtio_scsi_free_req(req);
925 }
926 }
927 }
928
929 if (suppress_notifications) {
930 virtio_queue_set_notification(vq, 1);
931 }
932 } while (ret != -EINVAL && !virtio_queue_empty(vq));
933
934 QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
935 virtio_scsi_handle_cmd_req_submit(s, req);
936 }
937 }
938
virtio_scsi_handle_cmd(VirtIODevice * vdev,VirtQueue * vq)939 static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
940 {
941 /* use non-QOM casts in the data path */
942 VirtIOSCSI *s = (VirtIOSCSI *)vdev;
943
944 if (virtio_scsi_defer_to_dataplane(s)) {
945 return;
946 }
947
948 virtio_scsi_handle_cmd_vq(s, vq);
949 }
950
virtio_scsi_get_config(VirtIODevice * vdev,uint8_t * config)951 static void virtio_scsi_get_config(VirtIODevice *vdev,
952 uint8_t *config)
953 {
954 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
955 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
956
957 virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
958 virtio_stl_p(vdev, &scsiconf->seg_max,
959 s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
960 virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
961 virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
962 virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
963 virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
964 virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
965 virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
966 virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
967 virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
968 }
969
virtio_scsi_set_config(VirtIODevice * vdev,const uint8_t * config)970 static void virtio_scsi_set_config(VirtIODevice *vdev,
971 const uint8_t *config)
972 {
973 VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
974 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
975
976 if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
977 (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
978 virtio_error(vdev,
979 "bad data written to virtio-scsi configuration space");
980 return;
981 }
982
983 vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
984 vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
985 }
986
virtio_scsi_get_features(VirtIODevice * vdev,uint64_t requested_features,Error ** errp)987 static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
988 uint64_t requested_features,
989 Error **errp)
990 {
991 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
992
993 /* Firstly sync all virtio-scsi possible supported features */
994 requested_features |= s->host_features;
995 return requested_features;
996 }
997
virtio_scsi_reset(VirtIODevice * vdev)998 static void virtio_scsi_reset(VirtIODevice *vdev)
999 {
1000 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1001 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
1002
1003 assert(!s->dataplane_started);
1004
1005 virtio_scsi_flush_defer_tmf_to_aio_context(s);
1006
1007 qatomic_inc(&s->resetting);
1008 bus_cold_reset(BUS(&s->bus));
1009 qatomic_dec(&s->resetting);
1010
1011 vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
1012 vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
1013
1014 WITH_QEMU_LOCK_GUARD(&s->event_lock) {
1015 s->events_dropped = false;
1016 }
1017 }
1018
1019 typedef struct {
1020 uint32_t event;
1021 uint32_t reason;
1022 union {
1023 /* Used by messages specific to a device */
1024 struct {
1025 uint32_t id;
1026 uint32_t lun;
1027 } address;
1028 };
1029 } VirtIOSCSIEventInfo;
1030
virtio_scsi_push_event(VirtIOSCSI * s,const VirtIOSCSIEventInfo * info)1031 static void virtio_scsi_push_event(VirtIOSCSI *s,
1032 const VirtIOSCSIEventInfo *info)
1033 {
1034 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
1035 VirtIOSCSIReq *req;
1036 VirtIOSCSIEvent *evt;
1037 VirtIODevice *vdev = VIRTIO_DEVICE(s);
1038 uint32_t event = info->event;
1039 uint32_t reason = info->reason;
1040
1041 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1042 return;
1043 }
1044
1045 req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
1046 WITH_QEMU_LOCK_GUARD(&s->event_lock) {
1047 if (!req) {
1048 s->events_dropped = true;
1049 return;
1050 }
1051
1052 if (s->events_dropped) {
1053 event |= VIRTIO_SCSI_T_EVENTS_MISSED;
1054 s->events_dropped = false;
1055 }
1056 }
1057
1058 if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
1059 virtio_scsi_bad_req(req, &s->event_lock);
1060 return;
1061 }
1062
1063 evt = &req->resp.event;
1064 memset(evt, 0, sizeof(VirtIOSCSIEvent));
1065 evt->event = virtio_tswap32(vdev, event);
1066 evt->reason = virtio_tswap32(vdev, reason);
1067 if (event != VIRTIO_SCSI_T_EVENTS_MISSED) {
1068 evt->lun[0] = 1;
1069 evt->lun[1] = info->address.id;
1070
1071 /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
1072 if (info->address.lun >= 256) {
1073 evt->lun[2] = (info->address.lun >> 8) | 0x40;
1074 }
1075 evt->lun[3] = info->address.lun & 0xFF;
1076 }
1077 trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
1078
1079 virtio_scsi_complete_req(req, &s->event_lock);
1080 }
1081
virtio_scsi_handle_event_vq(VirtIOSCSI * s,VirtQueue * vq)1082 static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
1083 {
1084 bool events_dropped;
1085
1086 WITH_QEMU_LOCK_GUARD(&s->event_lock) {
1087 events_dropped = s->events_dropped;
1088 }
1089
1090 if (events_dropped) {
1091 VirtIOSCSIEventInfo info = {
1092 .event = VIRTIO_SCSI_T_NO_EVENT,
1093 };
1094 virtio_scsi_push_event(s, &info);
1095 }
1096 }
1097
virtio_scsi_handle_event(VirtIODevice * vdev,VirtQueue * vq)1098 static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
1099 {
1100 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1101
1102 if (virtio_scsi_defer_to_dataplane(s)) {
1103 return;
1104 }
1105
1106 virtio_scsi_handle_event_vq(s, vq);
1107 }
1108
virtio_scsi_change(SCSIBus * bus,SCSIDevice * dev,SCSISense sense)1109 static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
1110 {
1111 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1112 VirtIODevice *vdev = VIRTIO_DEVICE(s);
1113
1114 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
1115 dev->type != TYPE_ROM) {
1116 VirtIOSCSIEventInfo info = {
1117 .event = VIRTIO_SCSI_T_PARAM_CHANGE,
1118 .reason = sense.asc | (sense.ascq << 8),
1119 .address = {
1120 .id = dev->id,
1121 .lun = dev->lun,
1122 },
1123 };
1124
1125 virtio_scsi_push_event(s, &info);
1126 }
1127 }
1128
virtio_scsi_pre_hotplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)1129 static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
1130 DeviceState *dev, Error **errp)
1131 {
1132 SCSIDevice *sd = SCSI_DEVICE(dev);
1133 sd->hba_supports_iothread = true;
1134 }
1135
virtio_scsi_hotplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)1136 static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1137 Error **errp)
1138 {
1139 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
1140 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1141 AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED];
1142 SCSIDevice *sd = SCSI_DEVICE(dev);
1143
1144 if (ctx != qemu_get_aio_context() && !s->dataplane_fenced) {
1145 /*
1146 * Try to make the BlockBackend's AioContext match ours. Ignore failure
1147 * because I/O will still work although block jobs and other users
1148 * might be slower when multiple AioContexts use a BlockBackend.
1149 */
1150 blk_set_aio_context(sd->conf.blk, ctx, NULL);
1151 }
1152
1153 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
1154 VirtIOSCSIEventInfo info = {
1155 .event = VIRTIO_SCSI_T_TRANSPORT_RESET,
1156 .reason = VIRTIO_SCSI_EVT_RESET_RESCAN,
1157 .address = {
1158 .id = sd->id,
1159 .lun = sd->lun,
1160 },
1161 };
1162
1163 virtio_scsi_push_event(s, &info);
1164 scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
1165 }
1166 }
1167
virtio_scsi_hotunplug(HotplugHandler * hotplug_dev,DeviceState * dev,Error ** errp)1168 static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1169 Error **errp)
1170 {
1171 VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
1172 VirtIOSCSI *s = VIRTIO_SCSI(vdev);
1173 SCSIDevice *sd = SCSI_DEVICE(dev);
1174 VirtIOSCSIEventInfo info = {
1175 .event = VIRTIO_SCSI_T_TRANSPORT_RESET,
1176 .reason = VIRTIO_SCSI_EVT_RESET_REMOVED,
1177 .address = {
1178 .id = sd->id,
1179 .lun = sd->lun,
1180 },
1181 };
1182
1183 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
1184
1185 if (s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED] != qemu_get_aio_context()) {
1186 /* If other users keep the BlockBackend in the iothread, that's ok */
1187 blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
1188 }
1189
1190 if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
1191 virtio_scsi_push_event(s, &info);
1192 scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
1193 }
1194 }
1195
1196 /* Suspend virtqueue ioeventfd processing during drain */
virtio_scsi_drained_begin(SCSIBus * bus)1197 static void virtio_scsi_drained_begin(SCSIBus *bus)
1198 {
1199 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1200 VirtIODevice *vdev = VIRTIO_DEVICE(s);
1201 uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
1202 s->parent_obj.conf.num_queues;
1203
1204 /*
1205 * Drain is called when stopping dataplane but the host notifier has
1206 * already been detached. Detaching multiple times is a no-op if nothing
1207 * else is using the monitoring same file descriptor, but avoid it just in
1208 * case.
1209 *
1210 * Also, don't detach if dataplane has not even been started yet because
1211 * the host notifier isn't attached.
1212 */
1213 if (s->dataplane_stopping || !s->dataplane_started) {
1214 return;
1215 }
1216
1217 for (uint32_t i = 0; i < total_queues; i++) {
1218 VirtQueue *vq = virtio_get_queue(vdev, i);
1219 virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
1220 }
1221 }
1222
1223 /* Resume virtqueue ioeventfd processing after drain */
virtio_scsi_drained_end(SCSIBus * bus)1224 static void virtio_scsi_drained_end(SCSIBus *bus)
1225 {
1226 VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
1227 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
1228 VirtIODevice *vdev = VIRTIO_DEVICE(s);
1229 uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
1230 s->parent_obj.conf.num_queues;
1231
1232 /*
1233 * Drain is called when stopping dataplane. Keep the host notifier detached
1234 * so it's not left dangling after dataplane is stopped.
1235 *
1236 * Also, don't attach if dataplane has not even been started yet. We're not
1237 * ready.
1238 */
1239 if (s->dataplane_stopping || !s->dataplane_started) {
1240 return;
1241 }
1242
1243 for (uint32_t i = 0; i < total_queues; i++) {
1244 VirtQueue *vq = virtio_get_queue(vdev, i);
1245 AioContext *ctx = s->vq_aio_context[i];
1246
1247 if (vq == vs->event_vq) {
1248 virtio_queue_aio_attach_host_notifier_no_poll(vq, ctx);
1249 } else {
1250 virtio_queue_aio_attach_host_notifier(vq, ctx);
1251 }
1252 }
1253 }
1254
1255 static struct SCSIBusInfo virtio_scsi_scsi_info = {
1256 .tcq = true,
1257 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
1258 .max_target = VIRTIO_SCSI_MAX_TARGET,
1259 .max_lun = VIRTIO_SCSI_MAX_LUN,
1260
1261 .complete = virtio_scsi_command_complete,
1262 .fail = virtio_scsi_command_failed,
1263 .cancel = virtio_scsi_request_cancelled,
1264 .change = virtio_scsi_change,
1265 .parse_cdb = virtio_scsi_parse_cdb,
1266 .get_sg_list = virtio_scsi_get_sg_list,
1267 .save_request = virtio_scsi_save_request,
1268 .load_request = virtio_scsi_load_request,
1269 .drained_begin = virtio_scsi_drained_begin,
1270 .drained_end = virtio_scsi_drained_end,
1271 };
1272
virtio_scsi_common_realize(DeviceState * dev,VirtIOHandleOutput ctrl,VirtIOHandleOutput evt,VirtIOHandleOutput cmd,Error ** errp)1273 void virtio_scsi_common_realize(DeviceState *dev,
1274 VirtIOHandleOutput ctrl,
1275 VirtIOHandleOutput evt,
1276 VirtIOHandleOutput cmd,
1277 Error **errp)
1278 {
1279 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1280 VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
1281 int i;
1282
1283 virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig));
1284
1285 if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
1286 s->conf.num_queues = 1;
1287 }
1288 if (s->conf.num_queues == 0 ||
1289 s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
1290 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1291 "must be a positive integer less than %d.",
1292 s->conf.num_queues,
1293 VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
1294 virtio_cleanup(vdev);
1295 return;
1296 }
1297 if (s->conf.virtqueue_size <= 2) {
1298 error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
1299 "must be > 2", s->conf.virtqueue_size);
1300 return;
1301 }
1302 s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
1303 s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
1304 s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
1305
1306 s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
1307 s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
1308 for (i = 0; i < s->conf.num_queues; i++) {
1309 s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
1310 }
1311 }
1312
virtio_scsi_device_realize(DeviceState * dev,Error ** errp)1313 static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
1314 {
1315 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1316 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1317 Error *err = NULL;
1318
1319 qemu_mutex_init(&s->ctrl_lock);
1320 qemu_mutex_init(&s->event_lock);
1321
1322 virtio_scsi_common_realize(dev,
1323 virtio_scsi_handle_ctrl,
1324 virtio_scsi_handle_event,
1325 virtio_scsi_handle_cmd,
1326 &err);
1327 if (err != NULL) {
1328 error_propagate(errp, err);
1329 return;
1330 }
1331
1332 scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
1333 &virtio_scsi_scsi_info, vdev->bus_name);
1334 /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
1335 qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
1336
1337 virtio_scsi_dataplane_setup(s, errp);
1338 }
1339
virtio_scsi_common_unrealize(DeviceState * dev)1340 void virtio_scsi_common_unrealize(DeviceState *dev)
1341 {
1342 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1343 VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
1344 int i;
1345
1346 virtio_delete_queue(vs->ctrl_vq);
1347 virtio_delete_queue(vs->event_vq);
1348 for (i = 0; i < vs->conf.num_queues; i++) {
1349 virtio_delete_queue(vs->cmd_vqs[i]);
1350 }
1351 g_free(vs->cmd_vqs);
1352 virtio_cleanup(vdev);
1353 }
1354
1355 /* main loop */
virtio_scsi_device_unrealize(DeviceState * dev)1356 static void virtio_scsi_device_unrealize(DeviceState *dev)
1357 {
1358 VirtIOSCSI *s = VIRTIO_SCSI(dev);
1359
1360 virtio_scsi_dataplane_cleanup(s);
1361 qbus_set_hotplug_handler(BUS(&s->bus), NULL);
1362 virtio_scsi_common_unrealize(dev);
1363 qemu_mutex_destroy(&s->event_lock);
1364 qemu_mutex_destroy(&s->ctrl_lock);
1365 }
1366
1367 static const Property virtio_scsi_properties[] = {
1368 DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
1369 VIRTIO_SCSI_AUTO_NUM_QUEUES),
1370 DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
1371 parent_obj.conf.virtqueue_size, 256),
1372 DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
1373 parent_obj.conf.seg_max_adjust, true),
1374 DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
1375 0xFFFF),
1376 DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
1377 128),
1378 DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
1379 VIRTIO_SCSI_F_HOTPLUG, true),
1380 DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
1381 VIRTIO_SCSI_F_CHANGE, true),
1382 DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
1383 TYPE_IOTHREAD, IOThread *),
1384 DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOSCSI,
1385 parent_obj.conf.iothread_vq_mapping_list),
1386 };
1387
1388 static const VMStateDescription vmstate_virtio_scsi = {
1389 .name = "virtio-scsi",
1390 .minimum_version_id = 1,
1391 .version_id = 1,
1392 .fields = (const VMStateField[]) {
1393 VMSTATE_VIRTIO_DEVICE,
1394 VMSTATE_END_OF_LIST()
1395 },
1396 };
1397
virtio_scsi_common_class_init(ObjectClass * klass,const void * data)1398 static void virtio_scsi_common_class_init(ObjectClass *klass, const void *data)
1399 {
1400 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1401 DeviceClass *dc = DEVICE_CLASS(klass);
1402
1403 vdc->get_config = virtio_scsi_get_config;
1404 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1405 }
1406
virtio_scsi_class_init(ObjectClass * klass,const void * data)1407 static void virtio_scsi_class_init(ObjectClass *klass, const void *data)
1408 {
1409 DeviceClass *dc = DEVICE_CLASS(klass);
1410 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1411 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1412
1413 device_class_set_props(dc, virtio_scsi_properties);
1414 dc->vmsd = &vmstate_virtio_scsi;
1415 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1416 vdc->realize = virtio_scsi_device_realize;
1417 vdc->unrealize = virtio_scsi_device_unrealize;
1418 vdc->set_config = virtio_scsi_set_config;
1419 vdc->get_features = virtio_scsi_get_features;
1420 vdc->reset = virtio_scsi_reset;
1421 vdc->start_ioeventfd = virtio_scsi_dataplane_start;
1422 vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
1423 hc->pre_plug = virtio_scsi_pre_hotplug;
1424 hc->plug = virtio_scsi_hotplug;
1425 hc->unplug = virtio_scsi_hotunplug;
1426 }
1427
1428 static const TypeInfo virtio_scsi_common_info = {
1429 .name = TYPE_VIRTIO_SCSI_COMMON,
1430 .parent = TYPE_VIRTIO_DEVICE,
1431 .instance_size = sizeof(VirtIOSCSICommon),
1432 .abstract = true,
1433 .class_init = virtio_scsi_common_class_init,
1434 };
1435
1436 static const TypeInfo virtio_scsi_info = {
1437 .name = TYPE_VIRTIO_SCSI,
1438 .parent = TYPE_VIRTIO_SCSI_COMMON,
1439 .instance_size = sizeof(VirtIOSCSI),
1440 .class_init = virtio_scsi_class_init,
1441 .interfaces = (const InterfaceInfo[]) {
1442 { TYPE_HOTPLUG_HANDLER },
1443 { }
1444 }
1445 };
1446
virtio_register_types(void)1447 static void virtio_register_types(void)
1448 {
1449 type_register_static(&virtio_scsi_common_info);
1450 type_register_static(&virtio_scsi_info);
1451 }
1452
1453 type_init(virtio_register_types)
1454