xref: /qemu/hw/scsi/scsi-bus.c (revision 21596064081e8d0c0153f68714981c7f0e040973)
1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "qemu/error-report.h"
4 #include "qemu/module.h"
5 #include "qemu/option.h"
6 #include "qemu/hw-version.h"
7 #include "hw/qdev-properties.h"
8 #include "hw/scsi/scsi.h"
9 #include "migration/qemu-file-types.h"
10 #include "migration/vmstate.h"
11 #include "scsi/constants.h"
12 #include "system/block-backend.h"
13 #include "system/blockdev.h"
14 #include "system/system.h"
15 #include "system/runstate.h"
16 #include "trace.h"
17 #include "system/dma.h"
18 #include "qemu/cutils.h"
19 
20 static char *scsibus_get_dev_path(DeviceState *dev);
21 static char *scsibus_get_fw_dev_path(DeviceState *dev);
22 static void scsi_req_dequeue(SCSIRequest *req);
23 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
24 static void scsi_target_free_buf(SCSIRequest *req);
25 static void scsi_clear_reported_luns_changed(SCSIRequest *req);
26 
27 static int next_scsi_bus;
28 
do_scsi_device_find(SCSIBus * bus,int channel,int id,int lun,bool include_unrealized)29 static SCSIDevice *do_scsi_device_find(SCSIBus *bus,
30                                        int channel, int id, int lun,
31                                        bool include_unrealized)
32 {
33     BusChild *kid;
34     SCSIDevice *retval = NULL;
35 
36     QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) {
37         DeviceState *qdev = kid->child;
38         SCSIDevice *dev = SCSI_DEVICE(qdev);
39 
40         if (dev->channel == channel && dev->id == id) {
41             if (dev->lun == lun) {
42                 retval = dev;
43                 break;
44             }
45 
46             /*
47              * If we don't find exact match (channel/bus/lun),
48              * we will return the first device which matches channel/bus
49              */
50 
51             if (!retval) {
52                 retval = dev;
53             }
54         }
55     }
56 
57     /*
58      * This function might run on the IO thread and we might race against
59      * main thread hot-plugging the device.
60      * We assume that as soon as .realized is set to true we can let
61      * the user access the device.
62      */
63 
64     if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) {
65         retval = NULL;
66     }
67 
68     return retval;
69 }
70 
scsi_device_find(SCSIBus * bus,int channel,int id,int lun)71 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
72 {
73     RCU_READ_LOCK_GUARD();
74     return do_scsi_device_find(bus, channel, id, lun, false);
75 }
76 
scsi_device_get(SCSIBus * bus,int channel,int id,int lun)77 SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun)
78 {
79     SCSIDevice *d;
80     RCU_READ_LOCK_GUARD();
81     d = do_scsi_device_find(bus, channel, id, lun, false);
82     if (d) {
83         object_ref(d);
84     }
85     return d;
86 }
87 
88 /*
89  * Invoke @fn() for each enqueued request in device @s. Must be called from the
90  * main loop thread while the guest is stopped. This is only suitable for
91  * vmstate ->put(), use scsi_device_for_each_req_async() for other cases.
92  */
scsi_device_for_each_req_sync(SCSIDevice * s,void (* fn)(SCSIRequest *,void *),void * opaque)93 static void scsi_device_for_each_req_sync(SCSIDevice *s,
94                                           void (*fn)(SCSIRequest *, void *),
95                                           void *opaque)
96 {
97     SCSIRequest *req;
98     SCSIRequest *next_req;
99 
100     assert(!runstate_is_running());
101     assert(qemu_in_main_thread());
102 
103     /*
104      * Locking is not necessary because the guest is stopped and no other
105      * threads can be accessing the requests list, but take the lock for
106      * consistency.
107      */
108     WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
109         QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) {
110             fn(req, opaque);
111         }
112     }
113 }
114 
115 typedef struct {
116     SCSIDevice *s;
117     void (*fn)(SCSIRequest *, void *);
118     void *fn_opaque;
119 } SCSIDeviceForEachReqAsyncData;
120 
scsi_device_for_each_req_async_bh(void * opaque)121 static void scsi_device_for_each_req_async_bh(void *opaque)
122 {
123     g_autofree SCSIDeviceForEachReqAsyncData *data = opaque;
124     SCSIDevice *s = data->s;
125     g_autoptr(GList) reqs = NULL;
126 
127     /*
128      * Build a list of requests in this AioContext so fn() can be invoked later
129      * outside requests_lock.
130      */
131     WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
132         AioContext *ctx = qemu_get_current_aio_context();
133         SCSIRequest *req;
134         SCSIRequest *next;
135 
136         QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
137             if (req->ctx == ctx) {
138                 scsi_req_ref(req); /* dropped after calling fn() */
139                 reqs = g_list_prepend(reqs, req);
140             }
141         }
142     }
143 
144     /* Call fn() on each request */
145     for (GList *elem = g_list_first(reqs); elem; elem = g_list_next(elem)) {
146         data->fn(elem->data, data->fn_opaque);
147         scsi_req_unref(elem->data);
148     }
149 
150     /* Drop the reference taken by scsi_device_for_each_req_async() */
151     object_unref(OBJECT(s));
152 
153     /* Paired with blk_inc_in_flight() in scsi_device_for_each_req_async() */
154     blk_dec_in_flight(s->conf.blk);
155 }
156 
scsi_device_for_each_req_async_do_ctx(gpointer key,gpointer value,gpointer user_data)157 static void scsi_device_for_each_req_async_do_ctx(gpointer key, gpointer value,
158                                                   gpointer user_data)
159 {
160     AioContext *ctx = key;
161     SCSIDeviceForEachReqAsyncData *params = user_data;
162     SCSIDeviceForEachReqAsyncData *data;
163 
164     data = g_new(SCSIDeviceForEachReqAsyncData, 1);
165     data->s = params->s;
166     data->fn = params->fn;
167     data->fn_opaque = params->fn_opaque;
168 
169     /*
170      * Hold a reference to the SCSIDevice until
171      * scsi_device_for_each_req_async_bh() finishes.
172      */
173     object_ref(OBJECT(data->s));
174 
175     /* Paired with scsi_device_for_each_req_async_bh() */
176     blk_inc_in_flight(data->s->conf.blk);
177 
178     aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, data);
179 }
180 
181 /*
182  * Schedule @fn() to be invoked for each enqueued request in device @s. @fn()
183  * must be thread-safe because it runs concurrently in each AioContext that is
184  * executing a request.
185  *
186  * Keeps the BlockBackend's in-flight counter incremented until everything is
187  * done, so draining it will settle all scheduled @fn() calls.
188  */
scsi_device_for_each_req_async(SCSIDevice * s,void (* fn)(SCSIRequest *,void *),void * opaque)189 static void scsi_device_for_each_req_async(SCSIDevice *s,
190                                            void (*fn)(SCSIRequest *, void *),
191                                            void *opaque)
192 {
193     assert(qemu_in_main_thread());
194 
195     /* The set of AioContexts where the requests are being processed */
196     g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
197     WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
198         SCSIRequest *req;
199         QTAILQ_FOREACH(req, &s->requests, next) {
200             g_hash_table_add(aio_contexts, req->ctx);
201         }
202     }
203 
204     /* Schedule a BH for each AioContext */
205     SCSIDeviceForEachReqAsyncData params = {
206         .s = s,
207         .fn = fn,
208         .fn_opaque = opaque,
209     };
210     g_hash_table_foreach(
211             aio_contexts,
212             scsi_device_for_each_req_async_do_ctx,
213             &params
214     );
215 }
216 
scsi_device_realize(SCSIDevice * s,Error ** errp)217 static void scsi_device_realize(SCSIDevice *s, Error **errp)
218 {
219     SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
220     if (sc->realize) {
221         sc->realize(s, errp);
222     }
223 }
224 
scsi_device_unrealize(SCSIDevice * s)225 static void scsi_device_unrealize(SCSIDevice *s)
226 {
227     SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
228     if (sc->unrealize) {
229         sc->unrealize(s);
230     }
231 }
232 
scsi_bus_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len,void * hba_private)233 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
234                        size_t buf_len, void *hba_private)
235 {
236     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
237     int rc;
238 
239     assert(cmd->len == 0);
240     rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len);
241     if (bus->info->parse_cdb) {
242         rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private);
243     }
244     return rc;
245 }
246 
scsi_device_alloc_req(SCSIDevice * s,uint32_t tag,uint32_t lun,uint8_t * buf,void * hba_private)247 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
248                                           uint8_t *buf, void *hba_private)
249 {
250     SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
251     if (sc->alloc_req) {
252         return sc->alloc_req(s, tag, lun, buf, hba_private);
253     }
254 
255     return NULL;
256 }
257 
scsi_device_unit_attention_reported(SCSIDevice * s)258 void scsi_device_unit_attention_reported(SCSIDevice *s)
259 {
260     SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
261     if (sc->unit_attention_reported) {
262         sc->unit_attention_reported(s);
263     }
264 }
265 
266 /* Create a scsi bus, and attach devices to it.  */
scsi_bus_init_named(SCSIBus * bus,size_t bus_size,DeviceState * host,const SCSIBusInfo * info,const char * bus_name)267 void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host,
268                          const SCSIBusInfo *info, const char *bus_name)
269 {
270     qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
271     bus->busnr = next_scsi_bus++;
272     bus->info = info;
273     qbus_set_bus_hotplug_handler(BUS(bus));
274 }
275 
scsi_req_retry(SCSIRequest * req)276 void scsi_req_retry(SCSIRequest *req)
277 {
278     req->retry = true;
279 }
280 
281 /* Called in the AioContext that is executing the request */
scsi_dma_restart_req(SCSIRequest * req,void * opaque)282 static void scsi_dma_restart_req(SCSIRequest *req, void *opaque)
283 {
284     scsi_req_ref(req);
285     if (req->retry) {
286         req->retry = false;
287         switch (req->cmd.mode) {
288             case SCSI_XFER_FROM_DEV:
289             case SCSI_XFER_TO_DEV:
290                 scsi_req_continue(req);
291                 break;
292             case SCSI_XFER_NONE:
293                 scsi_req_dequeue(req);
294                 scsi_req_enqueue(req);
295                 break;
296         }
297     }
298     scsi_req_unref(req);
299 }
300 
scsi_dma_restart_cb(void * opaque,bool running,RunState state)301 static void scsi_dma_restart_cb(void *opaque, bool running, RunState state)
302 {
303     SCSIDevice *s = opaque;
304 
305     assert(qemu_in_main_thread());
306 
307     if (!running) {
308         return;
309     }
310 
311     scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL);
312 }
313 
scsi_bus_is_address_free(SCSIBus * bus,int channel,int target,int lun,SCSIDevice ** p_dev)314 static bool scsi_bus_is_address_free(SCSIBus *bus,
315                                      int channel, int target, int lun,
316                                      SCSIDevice **p_dev)
317 {
318     SCSIDevice *d;
319 
320     RCU_READ_LOCK_GUARD();
321     d = do_scsi_device_find(bus, channel, target, lun, true);
322     if (d && d->lun == lun) {
323         if (p_dev) {
324             *p_dev = d;
325         }
326         return false;
327     }
328     if (p_dev) {
329         *p_dev = NULL;
330     }
331     return true;
332 }
333 
scsi_bus_check_address(BusState * qbus,DeviceState * qdev,Error ** errp)334 static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp)
335 {
336     SCSIDevice *dev = SCSI_DEVICE(qdev);
337     SCSIBus *bus = SCSI_BUS(qbus);
338 
339     if (dev->channel > bus->info->max_channel) {
340         error_setg(errp, "bad scsi channel id: %d", dev->channel);
341         return false;
342     }
343     if (dev->id != -1 && dev->id > bus->info->max_target) {
344         error_setg(errp, "bad scsi device id: %d", dev->id);
345         return false;
346     }
347     if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
348         error_setg(errp, "bad scsi device lun: %d", dev->lun);
349         return false;
350     }
351 
352     if (dev->id != -1 && dev->lun != -1) {
353         SCSIDevice *d;
354         if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) {
355             error_setg(errp, "lun already used by '%s'", d->qdev.id);
356             return false;
357         }
358     }
359 
360     return true;
361 }
362 
scsi_qdev_realize(DeviceState * qdev,Error ** errp)363 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
364 {
365     SCSIDevice *dev = SCSI_DEVICE(qdev);
366     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
367     bool is_free;
368     Error *local_err = NULL;
369 
370     if (dev->id == -1) {
371         int id = -1;
372         if (dev->lun == -1) {
373             dev->lun = 0;
374         }
375         do {
376             is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL);
377         } while (!is_free && id < bus->info->max_target);
378         if (!is_free) {
379             error_setg(errp, "no free target");
380             return;
381         }
382         dev->id = id;
383     } else if (dev->lun == -1) {
384         int lun = -1;
385         do {
386             is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL);
387         } while (!is_free && lun < bus->info->max_lun);
388         if (!is_free) {
389             error_setg(errp, "no free lun");
390             return;
391         }
392         dev->lun = lun;
393     }
394 
395     qemu_mutex_init(&dev->requests_lock);
396     QTAILQ_INIT(&dev->requests);
397     scsi_device_realize(dev, &local_err);
398     if (local_err) {
399         error_propagate(errp, local_err);
400         return;
401     }
402     dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
403             scsi_dma_restart_cb, NULL, dev);
404 }
405 
scsi_qdev_unrealize(DeviceState * qdev)406 static void scsi_qdev_unrealize(DeviceState *qdev)
407 {
408     SCSIDevice *dev = SCSI_DEVICE(qdev);
409 
410     if (dev->vmsentry) {
411         qemu_del_vm_change_state_handler(dev->vmsentry);
412     }
413 
414     scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
415 
416     qemu_mutex_destroy(&dev->requests_lock);
417 
418     scsi_device_unrealize(dev);
419 
420     blockdev_mark_auto_del(dev->conf.blk);
421 }
422 
423 /* handle legacy '-drive if=scsi,...' cmd line args */
scsi_bus_legacy_add_drive(SCSIBus * bus,BlockBackend * blk,int unit,bool removable,BlockConf * conf,const char * serial,Error ** errp)424 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
425                                       int unit, bool removable, BlockConf *conf,
426                                       const char *serial, Error **errp)
427 {
428     const char *driver;
429     char *name;
430     DeviceState *dev;
431     SCSIDevice *s;
432     DriveInfo *dinfo;
433     Error *local_err = NULL;
434 
435     if (blk_is_sg(blk)) {
436         driver = "scsi-generic";
437     } else {
438         dinfo = blk_legacy_dinfo(blk);
439         if (dinfo && dinfo->media_cd) {
440             driver = "scsi-cd";
441         } else {
442             driver = "scsi-hd";
443         }
444     }
445     dev = qdev_new(driver);
446     name = g_strdup_printf("legacy[%d]", unit);
447     object_property_add_child(OBJECT(bus), name, OBJECT(dev));
448     g_free(name);
449 
450     s = SCSI_DEVICE(dev);
451     s->conf = *conf;
452 
453     check_boot_index(conf->bootindex, &local_err);
454     if (local_err) {
455         object_unparent(OBJECT(dev));
456         error_propagate(errp, local_err);
457         return NULL;
458     }
459     add_boot_device_path(conf->bootindex, dev, NULL);
460 
461     qdev_prop_set_uint32(dev, "scsi-id", unit);
462     if (object_property_find(OBJECT(dev), "removable")) {
463         qdev_prop_set_bit(dev, "removable", removable);
464     }
465     if (serial && object_property_find(OBJECT(dev), "serial")) {
466         qdev_prop_set_string(dev, "serial", serial);
467     }
468     if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) {
469         object_unparent(OBJECT(dev));
470         return NULL;
471     }
472 
473     if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) {
474         object_unparent(OBJECT(dev));
475         return NULL;
476     }
477     return s;
478 }
479 
scsi_bus_legacy_handle_cmdline(SCSIBus * bus)480 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
481 {
482     Location loc;
483     DriveInfo *dinfo;
484     int unit;
485     BlockConf conf = {
486         .bootindex = -1,
487         .share_rw = false,
488         .rerror = BLOCKDEV_ON_ERROR_AUTO,
489         .werror = BLOCKDEV_ON_ERROR_AUTO,
490     };
491 
492     loc_push_none(&loc);
493     for (unit = 0; unit <= bus->info->max_target; unit++) {
494         dinfo = drive_get(IF_SCSI, bus->busnr, unit);
495         if (dinfo == NULL) {
496             continue;
497         }
498         qemu_opts_loc_restore(dinfo->opts);
499         scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
500                                   unit, false, &conf, NULL, &error_fatal);
501     }
502     loc_pop(&loc);
503 }
504 
scsi_invalid_field(SCSIRequest * req,uint8_t * buf)505 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
506 {
507     scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
508     scsi_req_complete(req, CHECK_CONDITION);
509     return 0;
510 }
511 
512 static const struct SCSIReqOps reqops_invalid_field = {
513     .size         = sizeof(SCSIRequest),
514     .send_command = scsi_invalid_field
515 };
516 
517 /* SCSIReqOps implementation for invalid commands.  */
518 
scsi_invalid_command(SCSIRequest * req,uint8_t * buf)519 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
520 {
521     scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
522     scsi_req_complete(req, CHECK_CONDITION);
523     return 0;
524 }
525 
526 static const struct SCSIReqOps reqops_invalid_opcode = {
527     .size         = sizeof(SCSIRequest),
528     .send_command = scsi_invalid_command
529 };
530 
531 /* SCSIReqOps implementation for unit attention conditions.  */
532 
scsi_fetch_unit_attention_sense(SCSIRequest * req)533 static void scsi_fetch_unit_attention_sense(SCSIRequest *req)
534 {
535     SCSISense *ua = NULL;
536 
537     if (req->dev->unit_attention.key == UNIT_ATTENTION) {
538         ua = &req->dev->unit_attention;
539     } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
540         ua = &req->bus->unit_attention;
541     }
542 
543     /*
544      * Fetch the unit attention sense immediately so that another
545      * scsi_req_new does not use reqops_unit_attention.
546      */
547     if (ua) {
548         scsi_req_build_sense(req, *ua);
549         *ua = SENSE_CODE(NO_SENSE);
550     }
551 }
552 
scsi_unit_attention(SCSIRequest * req,uint8_t * buf)553 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
554 {
555     scsi_req_complete(req, CHECK_CONDITION);
556     return 0;
557 }
558 
559 static const struct SCSIReqOps reqops_unit_attention = {
560     .size         = sizeof(SCSIRequest),
561     .init_req     = scsi_fetch_unit_attention_sense,
562     .send_command = scsi_unit_attention
563 };
564 
565 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
566    an invalid LUN.  */
567 
568 typedef struct SCSITargetReq SCSITargetReq;
569 
570 struct SCSITargetReq {
571     SCSIRequest req;
572     int len;
573     uint8_t *buf;
574     int buf_len;
575 };
576 
store_lun(uint8_t * outbuf,int lun)577 static void store_lun(uint8_t *outbuf, int lun)
578 {
579     if (lun < 256) {
580         /* Simple logical unit addressing method*/
581         outbuf[0] = 0;
582         outbuf[1] = lun;
583     } else {
584         /* Flat space addressing method */
585         outbuf[0] = 0x40 | (lun >> 8);
586         outbuf[1] = (lun & 255);
587     }
588 }
589 
scsi_target_emulate_report_luns(SCSITargetReq * r)590 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
591 {
592     BusChild *kid;
593     int channel, id;
594     uint8_t tmp[8] = {0};
595     int len = 0;
596     GByteArray *buf;
597 
598     if (r->req.cmd.xfer < 16) {
599         return false;
600     }
601     if (r->req.cmd.buf[2] > 2) {
602         return false;
603     }
604 
605     /* reserve space for 63 LUNs*/
606     buf = g_byte_array_sized_new(512);
607 
608     channel = r->req.dev->channel;
609     id = r->req.dev->id;
610 
611     /* add size (will be updated later to correct value */
612     g_byte_array_append(buf, tmp, 8);
613     len += 8;
614 
615     /* add LUN0 */
616     g_byte_array_append(buf, tmp, 8);
617     len += 8;
618 
619     WITH_RCU_READ_LOCK_GUARD() {
620         QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) {
621             DeviceState *qdev = kid->child;
622             SCSIDevice *dev = SCSI_DEVICE(qdev);
623 
624             if (dev->channel == channel && dev->id == id && dev->lun != 0 &&
625                 qdev_is_realized(&dev->qdev)) {
626                 store_lun(tmp, dev->lun);
627                 g_byte_array_append(buf, tmp, 8);
628                 len += 8;
629             }
630         }
631     }
632 
633     r->buf_len = len;
634     r->buf = g_byte_array_free(buf, FALSE);
635     r->len = MIN(len, r->req.cmd.xfer & ~7);
636 
637     /* store the LUN list length */
638     stl_be_p(&r->buf[0], len - 8);
639 
640     /*
641      * If a REPORT LUNS command enters the enabled command state, [...]
642      * the device server shall clear any pending unit attention condition
643      * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
644      */
645     scsi_clear_reported_luns_changed(&r->req);
646 
647     return true;
648 }
649 
scsi_target_emulate_inquiry(SCSITargetReq * r)650 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
651 {
652     assert(r->req.dev->lun != r->req.lun);
653 
654     scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
655 
656     if (r->req.cmd.buf[1] & 0x2) {
657         /* Command support data - optional, not implemented */
658         return false;
659     }
660 
661     if (r->req.cmd.buf[1] & 0x1) {
662         /* Vital product data */
663         uint8_t page_code = r->req.cmd.buf[2];
664         r->buf[r->len++] = page_code ; /* this page */
665         r->buf[r->len++] = 0x00;
666 
667         switch (page_code) {
668         case 0x00: /* Supported page codes, mandatory */
669         {
670             int pages;
671             pages = r->len++;
672             r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
673             r->buf[pages] = r->len - pages - 1; /* number of pages */
674             break;
675         }
676         default:
677             return false;
678         }
679         /* done with EVPD */
680         assert(r->len < r->buf_len);
681         r->len = MIN(r->req.cmd.xfer, r->len);
682         return true;
683     }
684 
685     /* Standard INQUIRY data */
686     if (r->req.cmd.buf[2] != 0) {
687         return false;
688     }
689 
690     /* PAGE CODE == 0 */
691     r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
692     memset(r->buf, 0, r->len);
693     if (r->req.lun != 0) {
694         r->buf[0] = TYPE_NO_LUN;
695     } else {
696         r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
697         r->buf[2] = 5; /* Version */
698         r->buf[3] = 2 | 0x10; /* HiSup, response data format */
699         r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
700         r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ.  */
701         memcpy(&r->buf[8], "QEMU    ", 8);
702         memcpy(&r->buf[16], "QEMU TARGET     ", 16);
703         pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
704     }
705     return true;
706 }
707 
scsi_sense_len(SCSIRequest * req)708 static size_t scsi_sense_len(SCSIRequest *req)
709 {
710     if (req->dev->type == TYPE_SCANNER)
711         return SCSI_SENSE_LEN_SCANNER;
712     else
713         return SCSI_SENSE_LEN;
714 }
715 
scsi_target_send_command(SCSIRequest * req,uint8_t * buf)716 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
717 {
718     SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
719     int fixed_sense = (req->cmd.buf[1] & 1) == 0;
720 
721     if (req->lun != 0 &&
722         buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
723         scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
724         scsi_req_complete(req, CHECK_CONDITION);
725         return 0;
726     }
727     switch (buf[0]) {
728     case REPORT_LUNS:
729         if (!scsi_target_emulate_report_luns(r)) {
730             goto illegal_request;
731         }
732         break;
733     case INQUIRY:
734         if (!scsi_target_emulate_inquiry(r)) {
735             goto illegal_request;
736         }
737         break;
738     case REQUEST_SENSE:
739         scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
740         if (req->lun != 0) {
741             const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
742 
743             r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
744                                           sense, fixed_sense);
745         } else {
746             r->len = scsi_device_get_sense(r->req.dev, r->buf,
747                                            MIN(req->cmd.xfer, r->buf_len),
748                                            fixed_sense);
749         }
750         if (r->req.dev->sense_is_ua) {
751             scsi_device_unit_attention_reported(req->dev);
752             r->req.dev->sense_len = 0;
753             r->req.dev->sense_is_ua = false;
754         }
755         break;
756     case TEST_UNIT_READY:
757         break;
758     default:
759         scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
760         scsi_req_complete(req, CHECK_CONDITION);
761         return 0;
762     illegal_request:
763         scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
764         scsi_req_complete(req, CHECK_CONDITION);
765         return 0;
766     }
767 
768     if (!r->len) {
769         scsi_req_complete(req, GOOD);
770     }
771     return r->len;
772 }
773 
scsi_target_read_data(SCSIRequest * req)774 static void scsi_target_read_data(SCSIRequest *req)
775 {
776     SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
777     uint32_t n;
778 
779     n = r->len;
780     if (n > 0) {
781         r->len = 0;
782         scsi_req_data(&r->req, n);
783     } else {
784         scsi_req_complete(&r->req, GOOD);
785     }
786 }
787 
scsi_target_get_buf(SCSIRequest * req)788 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
789 {
790     SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
791 
792     return r->buf;
793 }
794 
scsi_target_alloc_buf(SCSIRequest * req,size_t len)795 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
796 {
797     SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
798 
799     r->buf = g_malloc(len);
800     r->buf_len = len;
801 
802     return r->buf;
803 }
804 
scsi_target_free_buf(SCSIRequest * req)805 static void scsi_target_free_buf(SCSIRequest *req)
806 {
807     SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
808 
809     g_free(r->buf);
810 }
811 
812 static const struct SCSIReqOps reqops_target_command = {
813     .size         = sizeof(SCSITargetReq),
814     .send_command = scsi_target_send_command,
815     .read_data    = scsi_target_read_data,
816     .get_buf      = scsi_target_get_buf,
817     .free_req     = scsi_target_free_buf,
818 };
819 
820 
scsi_req_alloc(const SCSIReqOps * reqops,SCSIDevice * d,uint32_t tag,uint32_t lun,void * hba_private)821 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
822                             uint32_t tag, uint32_t lun, void *hba_private)
823 {
824     SCSIRequest *req;
825     SCSIBus *bus = scsi_bus_from_device(d);
826     BusState *qbus = BUS(bus);
827     const int memset_off = offsetof(SCSIRequest, sense)
828                            + sizeof(req->sense);
829 
830     req = g_malloc(reqops->size);
831     memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
832     req->refcount = 1;
833     req->bus = bus;
834     req->dev = d;
835     req->tag = tag;
836     req->lun = lun;
837     req->hba_private = hba_private;
838     req->status = -1;
839     req->host_status = -1;
840     req->ops = reqops;
841     object_ref(OBJECT(d));
842     object_ref(OBJECT(qbus->parent));
843     notifier_list_init(&req->cancel_notifiers);
844 
845     if (reqops->init_req) {
846         reqops->init_req(req);
847     }
848 
849     trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
850     return req;
851 }
852 
scsi_req_new(SCSIDevice * d,uint32_t tag,uint32_t lun,uint8_t * buf,size_t buf_len,void * hba_private)853 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
854                           uint8_t *buf, size_t buf_len, void *hba_private)
855 {
856     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
857     const SCSIReqOps *ops;
858     SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
859     SCSIRequest *req;
860     SCSICommand cmd = { .len = 0 };
861     int ret;
862 
863     if (buf_len == 0) {
864         trace_scsi_req_parse_bad(d->id, lun, tag, 0);
865         goto invalid_opcode;
866     }
867 
868     if ((d->unit_attention.key == UNIT_ATTENTION ||
869          bus->unit_attention.key == UNIT_ATTENTION) &&
870         (buf[0] != INQUIRY &&
871          buf[0] != REPORT_LUNS &&
872          buf[0] != GET_CONFIGURATION &&
873          buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
874 
875          /*
876           * If we already have a pending unit attention condition,
877           * report this one before triggering another one.
878           */
879          !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
880         ops = &reqops_unit_attention;
881     } else if (lun != d->lun ||
882                buf[0] == REPORT_LUNS ||
883                (buf[0] == REQUEST_SENSE && d->sense_len)) {
884         ops = &reqops_target_command;
885     } else {
886         ops = NULL;
887     }
888 
889     if (ops != NULL || !sc->parse_cdb) {
890         ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len);
891     } else {
892         ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private);
893     }
894 
895     if (ret != 0) {
896         trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
897 invalid_opcode:
898         req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
899     } else {
900         assert(cmd.len != 0);
901         trace_scsi_req_parsed(d->id, lun, tag, buf[0],
902                               cmd.mode, cmd.xfer);
903         if (cmd.lba != -1) {
904             trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
905                                       cmd.lba);
906         }
907 
908         if (cmd.xfer > INT32_MAX) {
909             req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
910         } else if (ops) {
911             req = scsi_req_alloc(ops, d, tag, lun, hba_private);
912         } else {
913             req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
914         }
915     }
916 
917     req->ctx = qemu_get_current_aio_context();
918     req->cmd = cmd;
919     req->residual = req->cmd.xfer;
920 
921     switch (buf[0]) {
922     case INQUIRY:
923         trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
924         break;
925     case TEST_UNIT_READY:
926         trace_scsi_test_unit_ready(d->id, lun, tag);
927         break;
928     case REPORT_LUNS:
929         trace_scsi_report_luns(d->id, lun, tag);
930         break;
931     case REQUEST_SENSE:
932         trace_scsi_request_sense(d->id, lun, tag);
933         break;
934     default:
935         break;
936     }
937 
938     return req;
939 }
940 
scsi_req_get_buf(SCSIRequest * req)941 uint8_t *scsi_req_get_buf(SCSIRequest *req)
942 {
943     return req->ops->get_buf(req);
944 }
945 
scsi_clear_reported_luns_changed(SCSIRequest * req)946 static void scsi_clear_reported_luns_changed(SCSIRequest *req)
947 {
948     SCSISense *ua;
949 
950     if (req->dev->unit_attention.key == UNIT_ATTENTION) {
951         ua = &req->dev->unit_attention;
952     } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
953         ua = &req->bus->unit_attention;
954     } else {
955         return;
956     }
957 
958     if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
959         ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) {
960         *ua = SENSE_CODE(NO_SENSE);
961     }
962 }
963 
scsi_req_get_sense(SCSIRequest * req,uint8_t * buf,int len)964 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
965 {
966     int ret;
967 
968     assert(len >= 14);
969     if (!req->sense_len) {
970         return 0;
971     }
972 
973     ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
974 
975     /*
976      * FIXME: clearing unit attention conditions upon autosense should be done
977      * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
978      * (SAM-5, 5.14).
979      *
980      * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
981      * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
982      * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
983      */
984     if (req->dev->sense_is_ua) {
985         scsi_device_unit_attention_reported(req->dev);
986         req->dev->sense_len = 0;
987         req->dev->sense_is_ua = false;
988     }
989     return ret;
990 }
991 
scsi_device_get_sense(SCSIDevice * dev,uint8_t * buf,int len,bool fixed)992 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
993 {
994     return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
995 }
996 
scsi_req_build_sense(SCSIRequest * req,SCSISense sense)997 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
998 {
999     trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
1000                                sense.key, sense.asc, sense.ascq);
1001     req->sense_len = scsi_build_sense(req->sense, sense);
1002 }
1003 
scsi_req_enqueue_internal(SCSIRequest * req)1004 static void scsi_req_enqueue_internal(SCSIRequest *req)
1005 {
1006     assert(!req->enqueued);
1007     scsi_req_ref(req);
1008     if (req->bus->info->get_sg_list) {
1009         req->sg = req->bus->info->get_sg_list(req);
1010     } else {
1011         req->sg = NULL;
1012     }
1013     req->enqueued = true;
1014 
1015     WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
1016         QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
1017     }
1018 }
1019 
scsi_req_enqueue(SCSIRequest * req)1020 int32_t scsi_req_enqueue(SCSIRequest *req)
1021 {
1022     int32_t rc;
1023 
1024     assert(!req->retry);
1025     scsi_req_enqueue_internal(req);
1026     scsi_req_ref(req);
1027     rc = req->ops->send_command(req, req->cmd.buf);
1028     scsi_req_unref(req);
1029     return rc;
1030 }
1031 
scsi_req_dequeue(SCSIRequest * req)1032 static void scsi_req_dequeue(SCSIRequest *req)
1033 {
1034     trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
1035     req->retry = false;
1036     if (req->enqueued) {
1037         WITH_QEMU_LOCK_GUARD(&req->dev->requests_lock) {
1038             QTAILQ_REMOVE(&req->dev->requests, req, next);
1039         }
1040         req->enqueued = false;
1041         scsi_req_unref(req);
1042     }
1043 }
1044 
scsi_get_performance_length(int num_desc,int type,int data_type)1045 static int scsi_get_performance_length(int num_desc, int type, int data_type)
1046 {
1047     /* MMC-6, paragraph 6.7.  */
1048     switch (type) {
1049     case 0:
1050         if ((data_type & 3) == 0) {
1051             /* Each descriptor is as in Table 295 - Nominal performance.  */
1052             return 16 * num_desc + 8;
1053         } else {
1054             /* Each descriptor is as in Table 296 - Exceptions.  */
1055             return 6 * num_desc + 8;
1056         }
1057     case 1:
1058     case 4:
1059     case 5:
1060         return 8 * num_desc + 8;
1061     case 2:
1062         return 2048 * num_desc + 8;
1063     case 3:
1064         return 16 * num_desc + 8;
1065     default:
1066         return 8;
1067     }
1068 }
1069 
ata_passthrough_xfer_unit(SCSIDevice * dev,uint8_t * buf)1070 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
1071 {
1072     int byte_block = (buf[2] >> 2) & 0x1;
1073     int type = (buf[2] >> 4) & 0x1;
1074     int xfer_unit;
1075 
1076     if (byte_block) {
1077         if (type) {
1078             xfer_unit = dev->blocksize;
1079         } else {
1080             xfer_unit = 512;
1081         }
1082     } else {
1083         xfer_unit = 1;
1084     }
1085 
1086     return xfer_unit;
1087 }
1088 
ata_passthrough_12_xfer(SCSIDevice * dev,uint8_t * buf)1089 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
1090 {
1091     int length = buf[2] & 0x3;
1092     int xfer;
1093     int unit = ata_passthrough_xfer_unit(dev, buf);
1094 
1095     switch (length) {
1096     case 0:
1097     case 3: /* USB-specific.  */
1098     default:
1099         xfer = 0;
1100         break;
1101     case 1:
1102         xfer = buf[3];
1103         break;
1104     case 2:
1105         xfer = buf[4];
1106         break;
1107     }
1108 
1109     return xfer * unit;
1110 }
1111 
ata_passthrough_16_xfer(SCSIDevice * dev,uint8_t * buf)1112 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
1113 {
1114     int extend = buf[1] & 0x1;
1115     int length = buf[2] & 0x3;
1116     int xfer;
1117     int unit = ata_passthrough_xfer_unit(dev, buf);
1118 
1119     switch (length) {
1120     case 0:
1121     case 3: /* USB-specific.  */
1122     default:
1123         xfer = 0;
1124         break;
1125     case 1:
1126         xfer = buf[4];
1127         xfer |= (extend ? buf[3] << 8 : 0);
1128         break;
1129     case 2:
1130         xfer = buf[6];
1131         xfer |= (extend ? buf[5] << 8 : 0);
1132         break;
1133     }
1134 
1135     return xfer * unit;
1136 }
1137 
scsi_req_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1138 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1139 {
1140     cmd->xfer = scsi_cdb_xfer(buf);
1141     switch (buf[0]) {
1142     case TEST_UNIT_READY:
1143     case REWIND:
1144     case START_STOP:
1145     case SET_CAPACITY:
1146     case WRITE_FILEMARKS:
1147     case WRITE_FILEMARKS_16:
1148     case SPACE:
1149     case RESERVE:
1150     case RELEASE:
1151     case ERASE:
1152     case ALLOW_MEDIUM_REMOVAL:
1153     case SEEK_10:
1154     case SYNCHRONIZE_CACHE:
1155     case SYNCHRONIZE_CACHE_16:
1156     case LOCATE_16:
1157     case LOCK_UNLOCK_CACHE:
1158     case SET_CD_SPEED:
1159     case SET_LIMITS:
1160     case WRITE_LONG_10:
1161     case UPDATE_BLOCK:
1162     case RESERVE_TRACK:
1163     case SET_READ_AHEAD:
1164     case PRE_FETCH:
1165     case PRE_FETCH_16:
1166     case ALLOW_OVERWRITE:
1167         cmd->xfer = 0;
1168         break;
1169     case VERIFY_10:
1170     case VERIFY_12:
1171     case VERIFY_16:
1172         if ((buf[1] & 2) == 0) {
1173             cmd->xfer = 0;
1174         } else if ((buf[1] & 4) != 0) {
1175             cmd->xfer = 1;
1176         }
1177         cmd->xfer *= dev->blocksize;
1178         break;
1179     case MODE_SENSE:
1180         break;
1181     case WRITE_SAME_10:
1182     case WRITE_SAME_16:
1183         cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
1184         break;
1185     case READ_CAPACITY_10:
1186         cmd->xfer = 8;
1187         break;
1188     case READ_BLOCK_LIMITS:
1189         cmd->xfer = 6;
1190         break;
1191     case SEND_VOLUME_TAG:
1192         /* GPCMD_SET_STREAMING from multimedia commands.  */
1193         if (dev->type == TYPE_ROM) {
1194             cmd->xfer = buf[10] | (buf[9] << 8);
1195         } else {
1196             cmd->xfer = buf[9] | (buf[8] << 8);
1197         }
1198         break;
1199     case WRITE_6:
1200         /* length 0 means 256 blocks */
1201         if (cmd->xfer == 0) {
1202             cmd->xfer = 256;
1203         }
1204         /* fall through */
1205     case WRITE_10:
1206     case WRITE_VERIFY_10:
1207     case WRITE_12:
1208     case WRITE_VERIFY_12:
1209     case WRITE_16:
1210     case WRITE_VERIFY_16:
1211         cmd->xfer *= dev->blocksize;
1212         break;
1213     case READ_6:
1214     case READ_REVERSE:
1215         /* length 0 means 256 blocks */
1216         if (cmd->xfer == 0) {
1217             cmd->xfer = 256;
1218         }
1219         /* fall through */
1220     case READ_10:
1221     case READ_12:
1222     case READ_16:
1223         cmd->xfer *= dev->blocksize;
1224         break;
1225     case FORMAT_UNIT:
1226         /* MMC mandates the parameter list to be 12-bytes long.  Parameters
1227          * for block devices are restricted to the header right now.  */
1228         if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1229             cmd->xfer = 12;
1230         } else {
1231             cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1232         }
1233         break;
1234     case INQUIRY:
1235     case RECEIVE_DIAGNOSTIC:
1236     case SEND_DIAGNOSTIC:
1237         cmd->xfer = buf[4] | (buf[3] << 8);
1238         break;
1239     case READ_CD:
1240     case READ_BUFFER:
1241     case WRITE_BUFFER:
1242     case SEND_CUE_SHEET:
1243         cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1244         break;
1245     case PERSISTENT_RESERVE_OUT:
1246         cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1247         break;
1248     case ERASE_12:
1249         if (dev->type == TYPE_ROM) {
1250             /* MMC command GET PERFORMANCE.  */
1251             cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1252                                                     buf[10], buf[1] & 0x1f);
1253         }
1254         break;
1255     case MECHANISM_STATUS:
1256     case READ_DVD_STRUCTURE:
1257     case SEND_DVD_STRUCTURE:
1258     case MAINTENANCE_OUT:
1259     case MAINTENANCE_IN:
1260         if (dev->type == TYPE_ROM) {
1261             /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1262             cmd->xfer = buf[9] | (buf[8] << 8);
1263         }
1264         break;
1265     case ATA_PASSTHROUGH_12:
1266         if (dev->type == TYPE_ROM) {
1267             /* BLANK command of MMC */
1268             cmd->xfer = 0;
1269         } else {
1270             cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1271         }
1272         break;
1273     case ATA_PASSTHROUGH_16:
1274         cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1275         break;
1276     }
1277     return 0;
1278 }
1279 
scsi_req_stream_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1280 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1281 {
1282     switch (buf[0]) {
1283     /* stream commands */
1284     case ERASE_12:
1285     case ERASE_16:
1286         cmd->xfer = 0;
1287         break;
1288     case READ_6:
1289     case READ_REVERSE:
1290     case RECOVER_BUFFERED_DATA:
1291     case WRITE_6:
1292         cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1293         if (buf[1] & 0x01) { /* fixed */
1294             cmd->xfer *= dev->blocksize;
1295         }
1296         break;
1297     case READ_16:
1298     case READ_REVERSE_16:
1299     case VERIFY_16:
1300     case WRITE_16:
1301         cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1302         if (buf[1] & 0x01) { /* fixed */
1303             cmd->xfer *= dev->blocksize;
1304         }
1305         break;
1306     case REWIND:
1307     case LOAD_UNLOAD:
1308         cmd->xfer = 0;
1309         break;
1310     case SPACE_16:
1311         cmd->xfer = buf[13] | (buf[12] << 8);
1312         break;
1313     case READ_POSITION:
1314         switch (buf[1] & 0x1f) /* operation code */ {
1315         case SHORT_FORM_BLOCK_ID:
1316         case SHORT_FORM_VENDOR_SPECIFIC:
1317             cmd->xfer = 20;
1318             break;
1319         case LONG_FORM:
1320             cmd->xfer = 32;
1321             break;
1322         case EXTENDED_FORM:
1323             cmd->xfer = buf[8] | (buf[7] << 8);
1324             break;
1325         default:
1326             return -1;
1327         }
1328 
1329         break;
1330     case FORMAT_UNIT:
1331         cmd->xfer = buf[4] | (buf[3] << 8);
1332         break;
1333     /* generic commands */
1334     default:
1335         return scsi_req_xfer(cmd, dev, buf);
1336     }
1337     return 0;
1338 }
1339 
scsi_req_medium_changer_xfer(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1340 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1341 {
1342     switch (buf[0]) {
1343     /* medium changer commands */
1344     case EXCHANGE_MEDIUM:
1345     case INITIALIZE_ELEMENT_STATUS:
1346     case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1347     case MOVE_MEDIUM:
1348     case POSITION_TO_ELEMENT:
1349         cmd->xfer = 0;
1350         break;
1351     case READ_ELEMENT_STATUS:
1352         cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1353         break;
1354 
1355     /* generic commands */
1356     default:
1357         return scsi_req_xfer(cmd, dev, buf);
1358     }
1359     return 0;
1360 }
1361 
scsi_req_scanner_length(SCSICommand * cmd,SCSIDevice * dev,uint8_t * buf)1362 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1363 {
1364     switch (buf[0]) {
1365     /* Scanner commands */
1366     case OBJECT_POSITION:
1367         cmd->xfer = 0;
1368         break;
1369     case SCAN:
1370         cmd->xfer = buf[4];
1371         break;
1372     case READ_10:
1373     case SEND:
1374     case GET_WINDOW:
1375     case SET_WINDOW:
1376         cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1377         break;
1378     default:
1379         /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1380         return scsi_req_xfer(cmd, dev, buf);
1381     }
1382 
1383     return 0;
1384 }
1385 
scsi_cmd_xfer_mode(SCSICommand * cmd)1386 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1387 {
1388     if (!cmd->xfer) {
1389         cmd->mode = SCSI_XFER_NONE;
1390         return;
1391     }
1392     switch (cmd->buf[0]) {
1393     case WRITE_6:
1394     case WRITE_10:
1395     case WRITE_VERIFY_10:
1396     case WRITE_12:
1397     case WRITE_VERIFY_12:
1398     case WRITE_16:
1399     case WRITE_VERIFY_16:
1400     case VERIFY_10:
1401     case VERIFY_12:
1402     case VERIFY_16:
1403     case COPY:
1404     case COPY_VERIFY:
1405     case COMPARE:
1406     case CHANGE_DEFINITION:
1407     case LOG_SELECT:
1408     case MODE_SELECT:
1409     case MODE_SELECT_10:
1410     case SEND_DIAGNOSTIC:
1411     case WRITE_BUFFER:
1412     case FORMAT_UNIT:
1413     case REASSIGN_BLOCKS:
1414     case SEARCH_EQUAL:
1415     case SEARCH_HIGH:
1416     case SEARCH_LOW:
1417     case UPDATE_BLOCK:
1418     case WRITE_LONG_10:
1419     case WRITE_SAME_10:
1420     case WRITE_SAME_16:
1421     case UNMAP:
1422     case SEARCH_HIGH_12:
1423     case SEARCH_EQUAL_12:
1424     case SEARCH_LOW_12:
1425     case MEDIUM_SCAN:
1426     case SEND_VOLUME_TAG:
1427     case SEND_CUE_SHEET:
1428     case SEND_DVD_STRUCTURE:
1429     case PERSISTENT_RESERVE_OUT:
1430     case MAINTENANCE_OUT:
1431     case SET_WINDOW:
1432     case SCAN:
1433         /* SCAN conflicts with START_STOP.  START_STOP has cmd->xfer set to 0 for
1434          * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1435          */
1436         cmd->mode = SCSI_XFER_TO_DEV;
1437         break;
1438     case ATA_PASSTHROUGH_12:
1439     case ATA_PASSTHROUGH_16:
1440         /* T_DIR */
1441         cmd->mode = (cmd->buf[2] & 0x8) ?
1442                    SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1443         break;
1444     default:
1445         cmd->mode = SCSI_XFER_FROM_DEV;
1446         break;
1447     }
1448 }
1449 
scsi_req_parse_cdb(SCSIDevice * dev,SCSICommand * cmd,uint8_t * buf,size_t buf_len)1450 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
1451                        size_t buf_len)
1452 {
1453     int rc;
1454     int len;
1455 
1456     cmd->lba = -1;
1457     len = scsi_cdb_length(buf);
1458     if (len < 0 || len > buf_len) {
1459         return -1;
1460     }
1461 
1462     cmd->len = len;
1463     switch (dev->type) {
1464     case TYPE_TAPE:
1465         rc = scsi_req_stream_xfer(cmd, dev, buf);
1466         break;
1467     case TYPE_MEDIUM_CHANGER:
1468         rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1469         break;
1470     case TYPE_SCANNER:
1471         rc = scsi_req_scanner_length(cmd, dev, buf);
1472         break;
1473     default:
1474         rc = scsi_req_xfer(cmd, dev, buf);
1475         break;
1476     }
1477 
1478     if (rc != 0)
1479         return rc;
1480 
1481     memcpy(cmd->buf, buf, cmd->len);
1482     scsi_cmd_xfer_mode(cmd);
1483     cmd->lba = scsi_cmd_lba(cmd);
1484     return 0;
1485 }
1486 
scsi_device_report_change(SCSIDevice * dev,SCSISense sense)1487 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1488 {
1489     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1490 
1491     scsi_device_set_ua(dev, sense);
1492     if (bus->info->change) {
1493         bus->info->change(bus, dev, sense);
1494     }
1495 }
1496 
scsi_req_ref(SCSIRequest * req)1497 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1498 {
1499     assert(req->refcount > 0);
1500     req->refcount++;
1501     return req;
1502 }
1503 
scsi_req_unref(SCSIRequest * req)1504 void scsi_req_unref(SCSIRequest *req)
1505 {
1506     assert(req->refcount > 0);
1507     if (--req->refcount == 0) {
1508         BusState *qbus = req->dev->qdev.parent_bus;
1509         SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1510 
1511         if (bus->info->free_request && req->hba_private) {
1512             bus->info->free_request(bus, req->hba_private);
1513         }
1514         if (req->ops->free_req) {
1515             req->ops->free_req(req);
1516         }
1517         object_unref(OBJECT(req->dev));
1518         object_unref(OBJECT(qbus->parent));
1519         g_free(req);
1520     }
1521 }
1522 
1523 /* Tell the device that we finished processing this chunk of I/O.  It
1524    will start the next chunk or complete the command.  */
scsi_req_continue(SCSIRequest * req)1525 void scsi_req_continue(SCSIRequest *req)
1526 {
1527     if (req->io_canceled) {
1528         trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1529         return;
1530     }
1531     trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1532     if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1533         req->ops->write_data(req);
1534     } else {
1535         req->ops->read_data(req);
1536     }
1537 }
1538 
1539 /* Called by the devices when data is ready for the HBA.  The HBA should
1540    start a DMA operation to read or fill the device's data buffer.
1541    Once it completes, calling scsi_req_continue will restart I/O.  */
scsi_req_data(SCSIRequest * req,int len)1542 void scsi_req_data(SCSIRequest *req, int len)
1543 {
1544     uint8_t *buf;
1545     if (req->io_canceled) {
1546         trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1547         return;
1548     }
1549     trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1550     assert(req->cmd.mode != SCSI_XFER_NONE);
1551     if (!req->sg) {
1552         req->residual -= len;
1553         req->bus->info->transfer_data(req, len);
1554         return;
1555     }
1556 
1557     /* If the device calls scsi_req_data and the HBA specified a
1558      * scatter/gather list, the transfer has to happen in a single
1559      * step.  */
1560     assert(!req->dma_started);
1561     req->dma_started = true;
1562 
1563     buf = scsi_req_get_buf(req);
1564     if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1565         dma_buf_read(buf, len, &req->residual, req->sg,
1566                      MEMTXATTRS_UNSPECIFIED);
1567     } else {
1568         dma_buf_write(buf, len, &req->residual, req->sg,
1569                       MEMTXATTRS_UNSPECIFIED);
1570     }
1571     scsi_req_continue(req);
1572 }
1573 
scsi_req_print(SCSIRequest * req)1574 void scsi_req_print(SCSIRequest *req)
1575 {
1576     FILE *fp = stderr;
1577     int i;
1578 
1579     fprintf(fp, "[%s id=%d] %s",
1580             req->dev->qdev.parent_bus->name,
1581             req->dev->id,
1582             scsi_command_name(req->cmd.buf[0]));
1583     for (i = 1; i < req->cmd.len; i++) {
1584         fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1585     }
1586     switch (req->cmd.mode) {
1587     case SCSI_XFER_NONE:
1588         fprintf(fp, " - none\n");
1589         break;
1590     case SCSI_XFER_FROM_DEV:
1591         fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1592         break;
1593     case SCSI_XFER_TO_DEV:
1594         fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1595         break;
1596     default:
1597         fprintf(fp, " - Oops\n");
1598         break;
1599     }
1600 }
1601 
scsi_req_complete_failed(SCSIRequest * req,int host_status)1602 void scsi_req_complete_failed(SCSIRequest *req, int host_status)
1603 {
1604     SCSISense sense;
1605     int status;
1606 
1607     assert(req->status == -1 && req->host_status == -1);
1608     assert(req->ops != &reqops_unit_attention);
1609 
1610     if (!req->bus->info->fail) {
1611         status = scsi_sense_from_host_status(req->host_status, &sense);
1612         if (status == CHECK_CONDITION) {
1613             scsi_req_build_sense(req, sense);
1614         }
1615         scsi_req_complete(req, status);
1616         return;
1617     }
1618 
1619     req->host_status = host_status;
1620     scsi_req_ref(req);
1621     scsi_req_dequeue(req);
1622     req->bus->info->fail(req);
1623 
1624     /* Cancelled requests might end up being completed instead of cancelled */
1625     notifier_list_notify(&req->cancel_notifiers, req);
1626     scsi_req_unref(req);
1627 }
1628 
scsi_req_complete(SCSIRequest * req,int status)1629 void scsi_req_complete(SCSIRequest *req, int status)
1630 {
1631     assert(req->status == -1 && req->host_status == -1);
1632     req->status = status;
1633     req->host_status = SCSI_HOST_OK;
1634 
1635     assert(req->sense_len <= sizeof(req->sense));
1636     if (status == GOOD) {
1637         req->sense_len = 0;
1638     }
1639 
1640     if (req->sense_len) {
1641         memcpy(req->dev->sense, req->sense, req->sense_len);
1642         req->dev->sense_len = req->sense_len;
1643         req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1644     } else {
1645         req->dev->sense_len = 0;
1646         req->dev->sense_is_ua = false;
1647     }
1648 
1649     scsi_req_ref(req);
1650     scsi_req_dequeue(req);
1651     req->bus->info->complete(req, req->residual);
1652 
1653     /* Cancelled requests might end up being completed instead of cancelled */
1654     notifier_list_notify(&req->cancel_notifiers, req);
1655     scsi_req_unref(req);
1656 }
1657 
1658 /* Called by the devices when the request is canceled. */
scsi_req_cancel_complete(SCSIRequest * req)1659 void scsi_req_cancel_complete(SCSIRequest *req)
1660 {
1661     assert(req->io_canceled);
1662     if (req->bus->info->cancel) {
1663         req->bus->info->cancel(req);
1664     }
1665     notifier_list_notify(&req->cancel_notifiers, req);
1666     scsi_req_unref(req);
1667 }
1668 
1669 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1670  * notifier list, the bus will be notified the requests cancellation is
1671  * completed.
1672  * */
scsi_req_cancel_async(SCSIRequest * req,Notifier * notifier)1673 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1674 {
1675     trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1676     if (notifier) {
1677         notifier_list_add(&req->cancel_notifiers, notifier);
1678     }
1679     if (req->io_canceled) {
1680         /* A blk_aio_cancel_async is pending; when it finishes,
1681          * scsi_req_cancel_complete will be called and will
1682          * call the notifier we just added.  Just wait for that.
1683          */
1684         assert(req->aiocb);
1685         return;
1686     }
1687     /* Dropped in scsi_req_cancel_complete.  */
1688     scsi_req_ref(req);
1689     scsi_req_dequeue(req);
1690     req->io_canceled = true;
1691     if (req->aiocb) {
1692         blk_aio_cancel_async(req->aiocb);
1693     } else {
1694         scsi_req_cancel_complete(req);
1695     }
1696 }
1697 
scsi_req_cancel(SCSIRequest * req)1698 void scsi_req_cancel(SCSIRequest *req)
1699 {
1700     trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1701     if (!req->enqueued) {
1702         return;
1703     }
1704     assert(!req->io_canceled);
1705     /* Dropped in scsi_req_cancel_complete.  */
1706     scsi_req_ref(req);
1707     scsi_req_dequeue(req);
1708     req->io_canceled = true;
1709     if (req->aiocb) {
1710         blk_aio_cancel(req->aiocb);
1711     } else {
1712         scsi_req_cancel_complete(req);
1713     }
1714 }
1715 
scsi_ua_precedence(SCSISense sense)1716 static int scsi_ua_precedence(SCSISense sense)
1717 {
1718     if (sense.key != UNIT_ATTENTION) {
1719         return INT_MAX;
1720     }
1721     if (sense.asc == 0x29 && sense.ascq == 0x04) {
1722         /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1723         return 1;
1724     } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1725         /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1726         return 2;
1727     } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1728         /* These two go with "all others". */
1729         ;
1730     } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1731         /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1732          * POWER ON OCCURRED = 1
1733          * SCSI BUS RESET OCCURRED = 2
1734          * BUS DEVICE RESET FUNCTION OCCURRED = 3
1735          * I_T NEXUS LOSS OCCURRED = 7
1736          */
1737         return sense.ascq;
1738     } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1739         /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION  */
1740         return 8;
1741     }
1742     return (sense.asc << 8) | sense.ascq;
1743 }
1744 
scsi_bus_set_ua(SCSIBus * bus,SCSISense sense)1745 void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense)
1746 {
1747     int prec1, prec2;
1748     if (sense.key != UNIT_ATTENTION) {
1749         return;
1750     }
1751 
1752     /*
1753      * Override a pre-existing unit attention condition, except for a more
1754      * important reset condition.
1755      */
1756     prec1 = scsi_ua_precedence(bus->unit_attention);
1757     prec2 = scsi_ua_precedence(sense);
1758     if (prec2 < prec1) {
1759         bus->unit_attention = sense;
1760     }
1761 }
1762 
scsi_device_set_ua(SCSIDevice * sdev,SCSISense sense)1763 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1764 {
1765     int prec1, prec2;
1766     if (sense.key != UNIT_ATTENTION) {
1767         return;
1768     }
1769     trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1770                              sense.asc, sense.ascq);
1771 
1772     /*
1773      * Override a pre-existing unit attention condition, except for a more
1774      * important reset condition.
1775     */
1776     prec1 = scsi_ua_precedence(sdev->unit_attention);
1777     prec2 = scsi_ua_precedence(sense);
1778     if (prec2 < prec1) {
1779         sdev->unit_attention = sense;
1780     }
1781 }
1782 
scsi_device_purge_one_req(SCSIRequest * req,void * opaque)1783 static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque)
1784 {
1785     scsi_req_cancel_async(req, NULL);
1786 }
1787 
1788 /**
1789  * Cancel all requests, and block until they are deleted.
1790  */
scsi_device_purge_requests(SCSIDevice * sdev,SCSISense sense)1791 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1792 {
1793     scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL);
1794 
1795     /*
1796      * Await all the scsi_device_purge_one_req() calls scheduled by
1797      * scsi_device_for_each_req_async(), and all I/O requests that were
1798      * cancelled this way, but may still take a bit of time to settle.
1799      */
1800     blk_drain(sdev->conf.blk);
1801 
1802     scsi_device_set_ua(sdev, sense);
1803 }
1804 
scsi_device_drained_begin(SCSIDevice * sdev)1805 void scsi_device_drained_begin(SCSIDevice *sdev)
1806 {
1807     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1808     if (!bus) {
1809         return;
1810     }
1811 
1812     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1813     assert(bus->drain_count < INT_MAX);
1814 
1815     /*
1816      * Multiple BlockBackends can be on a SCSIBus and each may begin/end
1817      * draining at any time. Keep a counter so HBAs only see begin/end once.
1818      */
1819     if (bus->drain_count++ == 0) {
1820         trace_scsi_bus_drained_begin(bus, sdev);
1821         if (bus->info->drained_begin) {
1822             bus->info->drained_begin(bus);
1823         }
1824     }
1825 }
1826 
scsi_device_drained_end(SCSIDevice * sdev)1827 void scsi_device_drained_end(SCSIDevice *sdev)
1828 {
1829     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus);
1830     if (!bus) {
1831         return;
1832     }
1833 
1834     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1835     assert(bus->drain_count > 0);
1836 
1837     if (bus->drain_count-- == 1) {
1838         trace_scsi_bus_drained_end(bus, sdev);
1839         if (bus->info->drained_end) {
1840             bus->info->drained_end(bus);
1841         }
1842     }
1843 }
1844 
scsibus_get_dev_path(DeviceState * dev)1845 static char *scsibus_get_dev_path(DeviceState *dev)
1846 {
1847     SCSIDevice *d = SCSI_DEVICE(dev);
1848     DeviceState *hba = dev->parent_bus->parent;
1849     char *id;
1850     char *path;
1851 
1852     id = qdev_get_dev_path(hba);
1853     if (id) {
1854         path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1855     } else {
1856         path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1857     }
1858     g_free(id);
1859     return path;
1860 }
1861 
scsibus_get_fw_dev_path(DeviceState * dev)1862 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1863 {
1864     SCSIDevice *d = SCSI_DEVICE(dev);
1865     return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1866                            qdev_fw_name(dev), d->id, d->lun);
1867 }
1868 
1869 /* SCSI request list.  For simplicity, pv points to the whole device */
1870 
put_scsi_req(SCSIRequest * req,void * opaque)1871 static void put_scsi_req(SCSIRequest *req, void *opaque)
1872 {
1873     QEMUFile *f = opaque;
1874 
1875     assert(!req->io_canceled);
1876     assert(req->status == -1 && req->host_status == -1);
1877     assert(req->enqueued);
1878 
1879     qemu_put_sbyte(f, req->retry ? 1 : 2);
1880     qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1881     qemu_put_be32s(f, &req->tag);
1882     qemu_put_be32s(f, &req->lun);
1883     if (req->bus->info->save_request) {
1884         req->bus->info->save_request(f, req);
1885     }
1886     if (req->ops->save_request) {
1887         req->ops->save_request(f, req);
1888     }
1889 }
1890 
put_scsi_requests(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)1891 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1892                              const VMStateField *field, JSONWriter *vmdesc)
1893 {
1894     SCSIDevice *s = pv;
1895 
1896     scsi_device_for_each_req_sync(s, put_scsi_req, f);
1897     qemu_put_sbyte(f, 0);
1898     return 0;
1899 }
1900 
get_scsi_requests(QEMUFile * f,void * pv,size_t size,const VMStateField * field)1901 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1902                              const VMStateField *field)
1903 {
1904     SCSIDevice *s = pv;
1905     SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1906     int8_t sbyte;
1907 
1908     while ((sbyte = qemu_get_sbyte(f)) > 0) {
1909         uint8_t buf[SCSI_CMD_BUF_SIZE];
1910         uint32_t tag;
1911         uint32_t lun;
1912         SCSIRequest *req;
1913 
1914         qemu_get_buffer(f, buf, sizeof(buf));
1915         qemu_get_be32s(f, &tag);
1916         qemu_get_be32s(f, &lun);
1917         /*
1918          * A too-short CDB would have been rejected by scsi_req_new, so just use
1919          * SCSI_CMD_BUF_SIZE as the CDB length.
1920          */
1921         req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL);
1922         req->retry = (sbyte == 1);
1923         if (bus->info->load_request) {
1924             req->hba_private = bus->info->load_request(f, req);
1925         }
1926         if (req->ops->load_request) {
1927             req->ops->load_request(f, req);
1928         }
1929 
1930         /* Just restart it later.  */
1931         scsi_req_enqueue_internal(req);
1932 
1933         /* At this point, the request will be kept alive by the reference
1934          * added by scsi_req_enqueue_internal, so we can release our reference.
1935          * The HBA of course will add its own reference in the load_request
1936          * callback if it needs to hold on the SCSIRequest.
1937          */
1938         scsi_req_unref(req);
1939     }
1940 
1941     return 0;
1942 }
1943 
1944 static const VMStateInfo vmstate_info_scsi_requests = {
1945     .name = "scsi-requests",
1946     .get  = get_scsi_requests,
1947     .put  = put_scsi_requests,
1948 };
1949 
scsi_sense_state_needed(void * opaque)1950 static bool scsi_sense_state_needed(void *opaque)
1951 {
1952     SCSIDevice *s = opaque;
1953 
1954     return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1955 }
1956 
1957 static const VMStateDescription vmstate_scsi_sense_state = {
1958     .name = "SCSIDevice/sense",
1959     .version_id = 1,
1960     .minimum_version_id = 1,
1961     .needed = scsi_sense_state_needed,
1962     .fields = (const VMStateField[]) {
1963         VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1964                                 SCSI_SENSE_BUF_SIZE_OLD,
1965                                 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1966         VMSTATE_END_OF_LIST()
1967     }
1968 };
1969 
1970 const VMStateDescription vmstate_scsi_device = {
1971     .name = "SCSIDevice",
1972     .version_id = 1,
1973     .minimum_version_id = 1,
1974     .fields = (const VMStateField[]) {
1975         VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1976         VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1977         VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1978         VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1979         VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1980         VMSTATE_UINT32(sense_len, SCSIDevice),
1981         {
1982             .name         = "requests",
1983             .version_id   = 0,
1984             .field_exists = NULL,
1985             .size         = 0,   /* ouch */
1986             .info         = &vmstate_info_scsi_requests,
1987             .flags        = VMS_SINGLE,
1988             .offset       = 0,
1989         },
1990         VMSTATE_END_OF_LIST()
1991     },
1992     .subsections = (const VMStateDescription * const []) {
1993         &vmstate_scsi_sense_state,
1994         NULL
1995     }
1996 };
1997 
1998 static const Property scsi_props[] = {
1999     DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
2000     DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
2001     DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
2002 };
2003 
scsi_device_class_init(ObjectClass * klass,const void * data)2004 static void scsi_device_class_init(ObjectClass *klass, const void *data)
2005 {
2006     DeviceClass *k = DEVICE_CLASS(klass);
2007     set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
2008     k->bus_type  = TYPE_SCSI_BUS;
2009     k->realize   = scsi_qdev_realize;
2010     k->unrealize = scsi_qdev_unrealize;
2011     device_class_set_props(k, scsi_props);
2012 }
2013 
scsi_dev_instance_init(Object * obj)2014 static void scsi_dev_instance_init(Object *obj)
2015 {
2016     SCSIDevice *s = SCSI_DEVICE(obj);
2017 
2018     device_add_bootindex_property(obj, &s->conf.bootindex,
2019                                   "bootindex", NULL,
2020                                   &s->qdev);
2021 }
2022 
2023 static const TypeInfo scsi_device_type_info = {
2024     .name = TYPE_SCSI_DEVICE,
2025     .parent = TYPE_DEVICE,
2026     .instance_size = sizeof(SCSIDevice),
2027     .abstract = true,
2028     .class_size = sizeof(SCSIDeviceClass),
2029     .class_init = scsi_device_class_init,
2030     .instance_init = scsi_dev_instance_init,
2031 };
2032 
scsi_bus_class_init(ObjectClass * klass,const void * data)2033 static void scsi_bus_class_init(ObjectClass *klass, const void *data)
2034 {
2035     BusClass *k = BUS_CLASS(klass);
2036     HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
2037 
2038     k->get_dev_path = scsibus_get_dev_path;
2039     k->get_fw_dev_path = scsibus_get_fw_dev_path;
2040     k->check_address = scsi_bus_check_address;
2041     hc->unplug = qdev_simple_device_unplug_cb;
2042 }
2043 
2044 static const TypeInfo scsi_bus_info = {
2045     .name = TYPE_SCSI_BUS,
2046     .parent = TYPE_BUS,
2047     .instance_size = sizeof(SCSIBus),
2048     .class_init = scsi_bus_class_init,
2049     .interfaces = (const InterfaceInfo[]) {
2050         { TYPE_HOTPLUG_HANDLER },
2051         { }
2052     }
2053 };
2054 
scsi_register_types(void)2055 static void scsi_register_types(void)
2056 {
2057     type_register_static(&scsi_bus_info);
2058     type_register_static(&scsi_device_type_info);
2059 }
2060 
2061 type_init(scsi_register_types)
2062