xref: /qemu/hw/block/virtio-blk.c (revision 70ce076fa6dff60585c229a4b641b13e64bf03cf)
1 /*
2  * Virtio Block Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/defer-call.h"
16 #include "qapi/error.h"
17 #include "qemu/iov.h"
18 #include "qemu/module.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "block/block_int.h"
22 #include "trace.h"
23 #include "hw/block/block.h"
24 #include "hw/qdev-properties.h"
25 #include "system/blockdev.h"
26 #include "system/block-ram-registrar.h"
27 #include "system/system.h"
28 #include "system/runstate.h"
29 #include "hw/virtio/virtio-blk.h"
30 #include "scsi/constants.h"
31 #ifdef __linux__
32 # include <scsi/sg.h>
33 #endif
34 #include "hw/virtio/virtio-bus.h"
35 #include "migration/qemu-file-types.h"
36 #include "hw/virtio/virtio-access.h"
37 #include "hw/virtio/virtio-blk-common.h"
38 #include "qemu/coroutine.h"
39 
40 static void virtio_blk_ioeventfd_attach(VirtIOBlock *s);
41 
42 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
43                                     VirtIOBlockReq *req)
44 {
45     req->dev = s;
46     req->vq = vq;
47     req->qiov.size = 0;
48     req->in_len = 0;
49     req->next = NULL;
50     req->mr_next = NULL;
51 }
52 
53 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
54 {
55     VirtIOBlock *s = req->dev;
56     VirtIODevice *vdev = VIRTIO_DEVICE(s);
57 
58     trace_virtio_blk_req_complete(vdev, req, status);
59 
60     stb_p(&req->in->status, status);
61     iov_discard_undo(&req->inhdr_undo);
62     iov_discard_undo(&req->outhdr_undo);
63     virtqueue_push(req->vq, &req->elem, req->in_len);
64     if (qemu_in_iothread()) {
65         virtio_notify_irqfd(vdev, req->vq);
66     } else {
67         virtio_notify(vdev, req->vq);
68     }
69 }
70 
71 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
72     bool is_read, bool acct_failed)
73 {
74     VirtIOBlock *s = req->dev;
75     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
76 
77     if (action == BLOCK_ERROR_ACTION_STOP) {
78         /* Break the link as the next request is going to be parsed from the
79          * ring again. Otherwise we may end up doing a double completion! */
80         req->mr_next = NULL;
81 
82         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
83             req->next = s->rq;
84             s->rq = req;
85         }
86     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
87         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
88         if (acct_failed) {
89             block_acct_failed(blk_get_stats(s->blk), &req->acct);
90         }
91         g_free(req);
92     }
93 
94     blk_error_action(s->blk, action, is_read, error);
95     return action != BLOCK_ERROR_ACTION_IGNORE;
96 }
97 
98 static void virtio_blk_rw_complete(void *opaque, int ret)
99 {
100     VirtIOBlockReq *next = opaque;
101     VirtIOBlock *s = next->dev;
102     VirtIODevice *vdev = VIRTIO_DEVICE(s);
103 
104     while (next) {
105         VirtIOBlockReq *req = next;
106         next = req->mr_next;
107         trace_virtio_blk_rw_complete(vdev, req, ret);
108 
109         if (req->qiov.nalloc != -1) {
110             /* If nalloc is != -1 req->qiov is a local copy of the original
111              * external iovec. It was allocated in submit_requests to be
112              * able to merge requests. */
113             qemu_iovec_destroy(&req->qiov);
114         }
115 
116         if (ret) {
117             int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type);
118             bool is_read = !(p & VIRTIO_BLK_T_OUT);
119             /* Note that memory may be dirtied on read failure.  If the
120              * virtio request is not completed here, as is the case for
121              * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
122              * correctly during live migration.  While this is ugly,
123              * it is acceptable because the device is free to write to
124              * the memory until the request is completed (which will
125              * happen on the other side of the migration).
126              */
127             if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
128                 continue;
129             }
130         }
131 
132         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
133         block_acct_done(blk_get_stats(s->blk), &req->acct);
134         g_free(req);
135     }
136 }
137 
138 static void virtio_blk_flush_complete(void *opaque, int ret)
139 {
140     VirtIOBlockReq *req = opaque;
141     VirtIOBlock *s = req->dev;
142 
143     if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) {
144         return;
145     }
146 
147     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
148     block_acct_done(blk_get_stats(s->blk), &req->acct);
149     g_free(req);
150 }
151 
152 static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
153 {
154     VirtIOBlockReq *req = opaque;
155     VirtIOBlock *s = req->dev;
156     bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
157                             ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
158 
159     if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
160         return;
161     }
162 
163     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
164     if (is_write_zeroes) {
165         block_acct_done(blk_get_stats(s->blk), &req->acct);
166     }
167     g_free(req);
168 }
169 
170 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
171 {
172     VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
173 
174     if (req) {
175         virtio_blk_init_request(s, vq, req);
176     }
177     return req;
178 }
179 
180 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
181 {
182     int status;
183     struct virtio_scsi_inhdr *scsi;
184     VirtIOBlock *blk = req->dev;
185     VirtIODevice *vdev = VIRTIO_DEVICE(blk);
186     VirtQueueElement *elem = &req->elem;
187 
188     /*
189      * We require at least one output segment each for the virtio_blk_outhdr
190      * and the SCSI command block.
191      *
192      * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
193      * and the sense buffer pointer in the input segments.
194      */
195     if (elem->out_num < 2 || elem->in_num < 3) {
196         status = VIRTIO_BLK_S_IOERR;
197         goto fail;
198     }
199 
200     /*
201      * The scsi inhdr is placed in the second-to-last input segment, just
202      * before the regular inhdr.
203      *
204      * Just put anything nonzero so that the ioctl fails in the guest.
205      */
206     scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
207     virtio_stl_p(vdev, &scsi->errors, 255);
208     status = VIRTIO_BLK_S_UNSUPP;
209 
210 fail:
211     virtio_blk_req_complete(req, status);
212     g_free(req);
213 }
214 
215 static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
216                                    int start, int num_reqs, int niov)
217 {
218     BlockBackend *blk = s->blk;
219     QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
220     int64_t sector_num = mrb->reqs[start]->sector_num;
221     bool is_write = mrb->is_write;
222     BdrvRequestFlags flags = 0;
223 
224     if (num_reqs > 1) {
225         int i;
226         struct iovec *tmp_iov = qiov->iov;
227         int tmp_niov = qiov->niov;
228 
229         /* mrb->reqs[start]->qiov was initialized from external so we can't
230          * modify it here. We need to initialize it locally and then add the
231          * external iovecs. */
232         qemu_iovec_init(qiov, niov);
233 
234         for (i = 0; i < tmp_niov; i++) {
235             qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
236         }
237 
238         for (i = start + 1; i < start + num_reqs; i++) {
239             qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
240                               mrb->reqs[i]->qiov.size);
241             mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
242         }
243 
244         trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
245                                          mrb, start, num_reqs,
246                                          sector_num << BDRV_SECTOR_BITS,
247                                          qiov->size, is_write);
248         block_acct_merge_done(blk_get_stats(blk),
249                               is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
250                               num_reqs - 1);
251     }
252 
253     if (blk_ram_registrar_ok(&s->blk_ram_registrar)) {
254         flags |= BDRV_REQ_REGISTERED_BUF;
255     }
256 
257     if (is_write) {
258         blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov,
259                         flags, virtio_blk_rw_complete,
260                         mrb->reqs[start]);
261     } else {
262         blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov,
263                        flags, virtio_blk_rw_complete,
264                        mrb->reqs[start]);
265     }
266 }
267 
268 static int multireq_compare(const void *a, const void *b)
269 {
270     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
271                          *req2 = *(VirtIOBlockReq **)b;
272 
273     /*
274      * Note that we can't simply subtract sector_num1 from sector_num2
275      * here as that could overflow the return value.
276      */
277     if (req1->sector_num > req2->sector_num) {
278         return 1;
279     } else if (req1->sector_num < req2->sector_num) {
280         return -1;
281     } else {
282         return 0;
283     }
284 }
285 
286 static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb)
287 {
288     int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
289     uint32_t max_transfer;
290     int64_t sector_num = 0;
291 
292     if (mrb->num_reqs == 1) {
293         submit_requests(s, mrb, 0, 1, -1);
294         mrb->num_reqs = 0;
295         return;
296     }
297 
298     max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
299 
300     qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
301           &multireq_compare);
302 
303     for (i = 0; i < mrb->num_reqs; i++) {
304         VirtIOBlockReq *req = mrb->reqs[i];
305         if (num_reqs > 0) {
306             /*
307              * NOTE: We cannot merge the requests in below situations:
308              * 1. requests are not sequential
309              * 2. merge would exceed maximum number of IOVs
310              * 3. merge would exceed maximum transfer length of backend device
311              */
312             if (sector_num + nb_sectors != req->sector_num ||
313                 niov > blk_get_max_iov(s->blk) - req->qiov.niov ||
314                 req->qiov.size > max_transfer ||
315                 nb_sectors > (max_transfer -
316                               req->qiov.size) / BDRV_SECTOR_SIZE) {
317                 submit_requests(s, mrb, start, num_reqs, niov);
318                 num_reqs = 0;
319             }
320         }
321 
322         if (num_reqs == 0) {
323             sector_num = req->sector_num;
324             nb_sectors = niov = 0;
325             start = i;
326         }
327 
328         nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
329         niov += req->qiov.niov;
330         num_reqs++;
331     }
332 
333     submit_requests(s, mrb, start, num_reqs, niov);
334     mrb->num_reqs = 0;
335 }
336 
337 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
338 {
339     VirtIOBlock *s = req->dev;
340 
341     block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
342                      BLOCK_ACCT_FLUSH);
343 
344     /*
345      * Make sure all outstanding writes are posted to the backing device.
346      */
347     if (mrb->is_write && mrb->num_reqs > 0) {
348         virtio_blk_submit_multireq(s, mrb);
349     }
350     blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
351 }
352 
353 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
354                                      uint64_t sector, size_t size)
355 {
356     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
357     uint64_t total_sectors;
358 
359     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
360         return false;
361     }
362     if (sector & dev->sector_mask) {
363         return false;
364     }
365     if (size % dev->conf.conf.logical_block_size) {
366         return false;
367     }
368     blk_get_geometry(dev->blk, &total_sectors);
369     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
370         return false;
371     }
372     return true;
373 }
374 
375 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
376     struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
377 {
378     VirtIOBlock *s = req->dev;
379     VirtIODevice *vdev = VIRTIO_DEVICE(s);
380     uint64_t sector;
381     uint32_t num_sectors, flags, max_sectors;
382     uint8_t err_status;
383     int bytes;
384 
385     sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
386     num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
387     flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
388     max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
389                   s->conf.max_discard_sectors;
390 
391     /*
392      * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
393      * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
394      * the integer variable.
395      */
396     if (unlikely(num_sectors > max_sectors)) {
397         err_status = VIRTIO_BLK_S_IOERR;
398         goto err;
399     }
400 
401     bytes = num_sectors << BDRV_SECTOR_BITS;
402 
403     if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
404         err_status = VIRTIO_BLK_S_IOERR;
405         goto err;
406     }
407 
408     /*
409      * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
410      * and write zeroes commands if any unknown flag is set.
411      */
412     if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
413         err_status = VIRTIO_BLK_S_UNSUPP;
414         goto err;
415     }
416 
417     if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
418         int blk_aio_flags = 0;
419 
420         if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
421             blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
422         }
423 
424         block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
425                          BLOCK_ACCT_WRITE);
426 
427         blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
428                               bytes, blk_aio_flags,
429                               virtio_blk_discard_write_zeroes_complete, req);
430     } else { /* VIRTIO_BLK_T_DISCARD */
431         /*
432          * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
433          * discard commands if the unmap flag is set.
434          */
435         if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
436             err_status = VIRTIO_BLK_S_UNSUPP;
437             goto err;
438         }
439 
440         blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
441                          virtio_blk_discard_write_zeroes_complete, req);
442     }
443 
444     return VIRTIO_BLK_S_OK;
445 
446 err:
447     if (is_write_zeroes) {
448         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
449     }
450     return err_status;
451 }
452 
453 typedef struct ZoneCmdData {
454     VirtIOBlockReq *req;
455     struct iovec *in_iov;
456     unsigned in_num;
457     union {
458         struct {
459             unsigned int nr_zones;
460             BlockZoneDescriptor *zones;
461         } zone_report_data;
462         struct {
463             int64_t offset;
464         } zone_append_data;
465     };
466 } ZoneCmdData;
467 
468 /*
469  * check zoned_request: error checking before issuing requests. If all checks
470  * passed, return true.
471  * append: true if only zone append requests issued.
472  */
473 static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len,
474                              bool append, uint8_t *status) {
475     BlockDriverState *bs = blk_bs(s->blk);
476     int index;
477 
478     if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) {
479         *status = VIRTIO_BLK_S_UNSUPP;
480         return false;
481     }
482 
483     if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS)
484         || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) {
485         *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
486         return false;
487     }
488 
489     if (append) {
490         if (bs->bl.write_granularity) {
491             if ((offset % bs->bl.write_granularity) != 0) {
492                 *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP;
493                 return false;
494             }
495         }
496 
497         index = offset / bs->bl.zone_size;
498         if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) {
499             *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
500             return false;
501         }
502 
503         if (len / 512 > bs->bl.max_append_sectors) {
504             if (bs->bl.max_append_sectors == 0) {
505                 *status = VIRTIO_BLK_S_UNSUPP;
506             } else {
507                 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
508             }
509             return false;
510         }
511     }
512     return true;
513 }
514 
515 static void virtio_blk_zone_report_complete(void *opaque, int ret)
516 {
517     ZoneCmdData *data = opaque;
518     VirtIOBlockReq *req = data->req;
519     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
520     struct iovec *in_iov = data->in_iov;
521     unsigned in_num = data->in_num;
522     int64_t zrp_size, n, j = 0;
523     int64_t nz = data->zone_report_data.nr_zones;
524     int8_t err_status = VIRTIO_BLK_S_OK;
525     struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
526         .nr_zones = cpu_to_le64(nz),
527     };
528 
529     trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
530     if (ret) {
531         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
532         goto out;
533     }
534 
535     zrp_size = sizeof(struct virtio_blk_zone_report)
536                + sizeof(struct virtio_blk_zone_descriptor) * nz;
537     n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
538     if (n != sizeof(zrp_hdr)) {
539         virtio_error(vdev, "Driver provided input buffer that is too small!");
540         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
541         goto out;
542     }
543 
544     for (size_t i = sizeof(zrp_hdr); i < zrp_size;
545         i += sizeof(struct virtio_blk_zone_descriptor), ++j) {
546         struct virtio_blk_zone_descriptor desc =
547             (struct virtio_blk_zone_descriptor) {
548                 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start
549                     >> BDRV_SECTOR_BITS),
550                 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap
551                     >> BDRV_SECTOR_BITS),
552                 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp
553                     >> BDRV_SECTOR_BITS),
554         };
555 
556         switch (data->zone_report_data.zones[j].type) {
557         case BLK_ZT_CONV:
558             desc.z_type = VIRTIO_BLK_ZT_CONV;
559             break;
560         case BLK_ZT_SWR:
561             desc.z_type = VIRTIO_BLK_ZT_SWR;
562             break;
563         case BLK_ZT_SWP:
564             desc.z_type = VIRTIO_BLK_ZT_SWP;
565             break;
566         default:
567             g_assert_not_reached();
568         }
569 
570         switch (data->zone_report_data.zones[j].state) {
571         case BLK_ZS_RDONLY:
572             desc.z_state = VIRTIO_BLK_ZS_RDONLY;
573             break;
574         case BLK_ZS_OFFLINE:
575             desc.z_state = VIRTIO_BLK_ZS_OFFLINE;
576             break;
577         case BLK_ZS_EMPTY:
578             desc.z_state = VIRTIO_BLK_ZS_EMPTY;
579             break;
580         case BLK_ZS_CLOSED:
581             desc.z_state = VIRTIO_BLK_ZS_CLOSED;
582             break;
583         case BLK_ZS_FULL:
584             desc.z_state = VIRTIO_BLK_ZS_FULL;
585             break;
586         case BLK_ZS_EOPEN:
587             desc.z_state = VIRTIO_BLK_ZS_EOPEN;
588             break;
589         case BLK_ZS_IOPEN:
590             desc.z_state = VIRTIO_BLK_ZS_IOPEN;
591             break;
592         case BLK_ZS_NOT_WP:
593             desc.z_state = VIRTIO_BLK_ZS_NOT_WP;
594             break;
595         default:
596             g_assert_not_reached();
597         }
598 
599         /* TODO: it takes O(n^2) time complexity. Optimizations required. */
600         n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc));
601         if (n != sizeof(desc)) {
602             virtio_error(vdev, "Driver provided input buffer "
603                                "for descriptors that is too small!");
604             err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
605         }
606     }
607 
608 out:
609     virtio_blk_req_complete(req, err_status);
610     g_free(req);
611     g_free(data->zone_report_data.zones);
612     g_free(data);
613 }
614 
615 static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
616                                          struct iovec *in_iov,
617                                          unsigned in_num)
618 {
619     VirtIOBlock *s = req->dev;
620     VirtIODevice *vdev = VIRTIO_DEVICE(s);
621     unsigned int nr_zones;
622     ZoneCmdData *data;
623     int64_t zone_size, offset;
624     uint8_t err_status;
625 
626     if (req->in_len < sizeof(struct virtio_blk_inhdr) +
627             sizeof(struct virtio_blk_zone_report) +
628             sizeof(struct virtio_blk_zone_descriptor)) {
629         virtio_error(vdev, "in buffer too small for zone report");
630         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
631         goto out;
632     }
633 
634     /* start byte offset of the zone report */
635     offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
636     if (!check_zoned_request(s, offset, 0, false, &err_status)) {
637         goto out;
638     }
639     nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) -
640                 sizeof(struct virtio_blk_zone_report)) /
641                sizeof(struct virtio_blk_zone_descriptor);
642     trace_virtio_blk_handle_zone_report(vdev, req,
643                                         offset >> BDRV_SECTOR_BITS, nr_zones);
644 
645     zone_size = sizeof(BlockZoneDescriptor) * nr_zones;
646     data = g_malloc(sizeof(ZoneCmdData));
647     data->req = req;
648     data->in_iov = in_iov;
649     data->in_num = in_num;
650     data->zone_report_data.nr_zones = nr_zones;
651     data->zone_report_data.zones = g_malloc(zone_size),
652 
653     blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones,
654                         data->zone_report_data.zones,
655                         virtio_blk_zone_report_complete, data);
656     return;
657 out:
658     virtio_blk_req_complete(req, err_status);
659     g_free(req);
660 }
661 
662 static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
663 {
664     VirtIOBlockReq *req = opaque;
665     VirtIOBlock *s = req->dev;
666     VirtIODevice *vdev = VIRTIO_DEVICE(s);
667     int8_t err_status = VIRTIO_BLK_S_OK;
668     trace_virtio_blk_zone_mgmt_complete(vdev, req,ret);
669 
670     if (ret) {
671         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
672     }
673 
674     virtio_blk_req_complete(req, err_status);
675     g_free(req);
676 }
677 
678 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
679 {
680     VirtIOBlock *s = req->dev;
681     VirtIODevice *vdev = VIRTIO_DEVICE(s);
682     BlockDriverState *bs = blk_bs(s->blk);
683     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
684     uint64_t len;
685     uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
686     uint8_t err_status = VIRTIO_BLK_S_OK;
687 
688     uint32_t type = virtio_ldl_p(vdev, &req->out.type);
689     if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) {
690         /* Entire drive capacity */
691         offset = 0;
692         len = capacity;
693         trace_virtio_blk_handle_zone_reset_all(vdev, req, 0,
694                                                bs->total_sectors);
695     } else {
696         if (bs->bl.zone_size > capacity - offset) {
697             /* The zoned device allows the last smaller zone. */
698             len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1ull);
699         } else {
700             len = bs->bl.zone_size;
701         }
702         trace_virtio_blk_handle_zone_mgmt(vdev, req, op,
703                                           offset >> BDRV_SECTOR_BITS,
704                                           len >> BDRV_SECTOR_BITS);
705     }
706 
707     if (!check_zoned_request(s, offset, len, false, &err_status)) {
708         goto out;
709     }
710 
711     blk_aio_zone_mgmt(s->blk, op, offset, len,
712                       virtio_blk_zone_mgmt_complete, req);
713 
714     return 0;
715 out:
716     virtio_blk_req_complete(req, err_status);
717     g_free(req);
718     return err_status;
719 }
720 
721 static void virtio_blk_zone_append_complete(void *opaque, int ret)
722 {
723     ZoneCmdData *data = opaque;
724     VirtIOBlockReq *req = data->req;
725     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
726     int64_t append_sector, n;
727     uint8_t err_status = VIRTIO_BLK_S_OK;
728 
729     if (ret) {
730         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
731         goto out;
732     }
733 
734     virtio_stq_p(vdev, &append_sector,
735                  data->zone_append_data.offset >> BDRV_SECTOR_BITS);
736     n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector,
737                      sizeof(append_sector));
738     if (n != sizeof(append_sector)) {
739         virtio_error(vdev, "Driver provided input buffer less than size of "
740                            "append_sector");
741         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
742         goto out;
743     }
744     trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret);
745 
746 out:
747     virtio_blk_req_complete(req, err_status);
748     g_free(req);
749     g_free(data);
750 }
751 
752 static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
753                                          struct iovec *out_iov,
754                                          struct iovec *in_iov,
755                                          uint64_t out_num,
756                                          unsigned in_num) {
757     VirtIOBlock *s = req->dev;
758     VirtIODevice *vdev = VIRTIO_DEVICE(s);
759     uint8_t err_status = VIRTIO_BLK_S_OK;
760 
761     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
762     int64_t len = iov_size(out_iov, out_num);
763     ZoneCmdData *data;
764 
765     trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
766     if (!check_zoned_request(s, offset, len, true, &err_status)) {
767         goto out;
768     }
769 
770     data = g_malloc(sizeof(ZoneCmdData));
771     data->req = req;
772     data->in_iov = in_iov;
773     data->in_num = in_num;
774     data->zone_append_data.offset = offset;
775     qemu_iovec_init_external(&req->qiov, out_iov, out_num);
776 
777     block_acct_start(blk_get_stats(s->blk), &req->acct, len,
778                      BLOCK_ACCT_ZONE_APPEND);
779 
780     blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0,
781                         virtio_blk_zone_append_complete, data);
782     return 0;
783 
784 out:
785     virtio_blk_req_complete(req, err_status);
786     g_free(req);
787     return err_status;
788 }
789 
790 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
791 {
792     uint32_t type;
793     struct iovec *in_iov = req->elem.in_sg;
794     struct iovec *out_iov = req->elem.out_sg;
795     unsigned in_num = req->elem.in_num;
796     unsigned out_num = req->elem.out_num;
797     VirtIOBlock *s = req->dev;
798     VirtIODevice *vdev = VIRTIO_DEVICE(s);
799 
800     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
801         virtio_error(vdev, "virtio-blk missing headers");
802         return -1;
803     }
804 
805     if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
806                             sizeof(req->out)) != sizeof(req->out))) {
807         virtio_error(vdev, "virtio-blk request outhdr too short");
808         return -1;
809     }
810 
811     iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out),
812                                &req->outhdr_undo);
813 
814     if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
815         virtio_error(vdev, "virtio-blk request inhdr too short");
816         iov_discard_undo(&req->outhdr_undo);
817         return -1;
818     }
819 
820     /* We always touch the last byte, so just see how big in_iov is.  */
821     req->in_len = iov_size(in_iov, in_num);
822     req->in = (void *)in_iov[in_num - 1].iov_base
823               + in_iov[in_num - 1].iov_len
824               - sizeof(struct virtio_blk_inhdr);
825     iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr),
826                               &req->inhdr_undo);
827 
828     type = virtio_ldl_p(vdev, &req->out.type);
829 
830     /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
831      * is an optional flag. Although a guest should not send this flag if
832      * not negotiated we ignored it in the past. So keep ignoring it. */
833     switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
834     case VIRTIO_BLK_T_IN:
835     {
836         bool is_write = type & VIRTIO_BLK_T_OUT;
837         req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
838 
839         if (is_write) {
840             qemu_iovec_init_external(&req->qiov, out_iov, out_num);
841             trace_virtio_blk_handle_write(vdev, req, req->sector_num,
842                                           req->qiov.size / BDRV_SECTOR_SIZE);
843         } else {
844             qemu_iovec_init_external(&req->qiov, in_iov, in_num);
845             trace_virtio_blk_handle_read(vdev, req, req->sector_num,
846                                          req->qiov.size / BDRV_SECTOR_SIZE);
847         }
848 
849         if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
850             virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
851             block_acct_invalid(blk_get_stats(s->blk),
852                                is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
853             g_free(req);
854             return 0;
855         }
856 
857         block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
858                          is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
859 
860         /* merge would exceed maximum number of requests or IO direction
861          * changes */
862         if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
863                                   is_write != mrb->is_write ||
864                                   !s->conf.request_merging)) {
865             virtio_blk_submit_multireq(s, mrb);
866         }
867 
868         assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
869         mrb->reqs[mrb->num_reqs++] = req;
870         mrb->is_write = is_write;
871         break;
872     }
873     case VIRTIO_BLK_T_FLUSH:
874         virtio_blk_handle_flush(req, mrb);
875         break;
876     case VIRTIO_BLK_T_ZONE_REPORT:
877         virtio_blk_handle_zone_report(req, in_iov, in_num);
878         break;
879     case VIRTIO_BLK_T_ZONE_OPEN:
880         virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN);
881         break;
882     case VIRTIO_BLK_T_ZONE_CLOSE:
883         virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE);
884         break;
885     case VIRTIO_BLK_T_ZONE_FINISH:
886         virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH);
887         break;
888     case VIRTIO_BLK_T_ZONE_RESET:
889         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
890         break;
891     case VIRTIO_BLK_T_ZONE_RESET_ALL:
892         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
893         break;
894     case VIRTIO_BLK_T_SCSI_CMD:
895         virtio_blk_handle_scsi(req);
896         break;
897     case VIRTIO_BLK_T_GET_ID:
898     {
899         /*
900          * NB: per existing s/n string convention the string is
901          * terminated by '\0' only when shorter than buffer.
902          */
903         const char *serial = s->conf.serial ? s->conf.serial : "";
904         size_t size = MIN(strlen(serial) + 1,
905                           MIN(iov_size(in_iov, in_num),
906                               VIRTIO_BLK_ID_BYTES));
907         iov_from_buf(in_iov, in_num, 0, serial, size);
908         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
909         g_free(req);
910         break;
911     }
912     case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
913         /*
914          * Passing out_iov/out_num and in_iov/in_num is not safe
915          * to access req->elem.out_sg directly because it may be
916          * modified by virtio_blk_handle_request().
917          */
918         virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num);
919         break;
920     /*
921      * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
922      * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
923      * so we must mask it for these requests, then we will check if it is set.
924      */
925     case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
926     case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
927     {
928         struct virtio_blk_discard_write_zeroes dwz_hdr;
929         size_t out_len = iov_size(out_iov, out_num);
930         bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
931                                VIRTIO_BLK_T_WRITE_ZEROES;
932         uint8_t err_status;
933 
934         /*
935          * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
936          * more than one segment.
937          */
938         if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
939                      out_len > sizeof(dwz_hdr))) {
940             virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
941             g_free(req);
942             return 0;
943         }
944 
945         if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
946                                 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
947             iov_discard_undo(&req->inhdr_undo);
948             iov_discard_undo(&req->outhdr_undo);
949             virtio_error(vdev, "virtio-blk discard/write_zeroes header"
950                          " too short");
951             return -1;
952         }
953 
954         err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
955                                                             is_write_zeroes);
956         if (err_status != VIRTIO_BLK_S_OK) {
957             virtio_blk_req_complete(req, err_status);
958             g_free(req);
959         }
960 
961         break;
962     }
963     default:
964         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
965         g_free(req);
966     }
967     return 0;
968 }
969 
970 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
971 {
972     VirtIOBlockReq *req;
973     MultiReqBuffer mrb = {};
974     bool suppress_notifications = virtio_queue_get_notification(vq);
975 
976     defer_call_begin();
977 
978     do {
979         if (suppress_notifications) {
980             virtio_queue_set_notification(vq, 0);
981         }
982 
983         while ((req = virtio_blk_get_request(s, vq))) {
984             if (virtio_blk_handle_request(req, &mrb)) {
985                 virtqueue_detach_element(req->vq, &req->elem, 0);
986                 g_free(req);
987                 break;
988             }
989         }
990 
991         if (suppress_notifications) {
992             virtio_queue_set_notification(vq, 1);
993         }
994     } while (!virtio_queue_empty(vq));
995 
996     if (mrb.num_reqs) {
997         virtio_blk_submit_multireq(s, &mrb);
998     }
999 
1000     defer_call_end();
1001 }
1002 
1003 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
1004 {
1005     VirtIOBlock *s = (VirtIOBlock *)vdev;
1006 
1007     if (!s->ioeventfd_disabled && !s->ioeventfd_started) {
1008         /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
1009          * ioeventfd here instead of waiting for .set_status().
1010          */
1011         virtio_device_start_ioeventfd(vdev);
1012         if (!s->ioeventfd_disabled) {
1013             return;
1014         }
1015     }
1016 
1017     virtio_blk_handle_vq(s, vq);
1018 }
1019 
1020 static void virtio_blk_dma_restart_bh(void *opaque)
1021 {
1022     VirtIOBlockReq *req = opaque;
1023     VirtIOBlock *s = req->dev; /* we're called with at least one request */
1024 
1025     MultiReqBuffer mrb = {};
1026 
1027     while (req) {
1028         VirtIOBlockReq *next = req->next;
1029         if (virtio_blk_handle_request(req, &mrb)) {
1030             /* Device is now broken and won't do any processing until it gets
1031              * reset. Already queued requests will be lost: let's purge them.
1032              */
1033             while (req) {
1034                 next = req->next;
1035                 virtqueue_detach_element(req->vq, &req->elem, 0);
1036                 g_free(req);
1037                 req = next;
1038             }
1039             break;
1040         }
1041         req = next;
1042     }
1043 
1044     if (mrb.num_reqs) {
1045         virtio_blk_submit_multireq(s, &mrb);
1046     }
1047 
1048     /* Paired with inc in virtio_blk_dma_restart_cb() */
1049     blk_dec_in_flight(s->conf.conf.blk);
1050 }
1051 
1052 static void virtio_blk_dma_restart_cb(void *opaque, bool running,
1053                                       RunState state)
1054 {
1055     VirtIOBlock *s = opaque;
1056     uint16_t num_queues = s->conf.num_queues;
1057     g_autofree VirtIOBlockReq **vq_rq = NULL;
1058     VirtIOBlockReq *rq = NULL;
1059 
1060     if (!running) {
1061         return;
1062     }
1063 
1064     /* Split the device-wide s->rq request list into per-vq request lists */
1065     vq_rq = g_new0(VirtIOBlockReq *, num_queues);
1066 
1067     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1068         rq = s->rq;
1069         s->rq = NULL;
1070     }
1071 
1072     while (rq) {
1073         VirtIOBlockReq *next = rq->next;
1074         uint16_t idx = virtio_get_queue_index(rq->vq);
1075 
1076         /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
1077         assert(idx < num_queues);
1078         rq->next = vq_rq[idx];
1079         vq_rq[idx] = rq;
1080         rq = next;
1081     }
1082 
1083     /* Schedule a BH to submit the requests in each vq's AioContext */
1084     for (uint16_t i = 0; i < num_queues; i++) {
1085         if (!vq_rq[i]) {
1086             continue;
1087         }
1088 
1089         /* Paired with dec in virtio_blk_dma_restart_bh() */
1090         blk_inc_in_flight(s->conf.conf.blk);
1091 
1092         aio_bh_schedule_oneshot(s->vq_aio_context[i],
1093                                 virtio_blk_dma_restart_bh,
1094                                 vq_rq[i]);
1095     }
1096 }
1097 
1098 static void virtio_blk_reset(VirtIODevice *vdev)
1099 {
1100     VirtIOBlock *s = VIRTIO_BLK(vdev);
1101     VirtIOBlockReq *req;
1102 
1103     /* Dataplane has stopped... */
1104     assert(!s->ioeventfd_started);
1105 
1106     /* ...but requests may still be in flight. */
1107     blk_drain(s->blk);
1108 
1109     /* We drop queued requests after blk_drain() because blk_drain() itself can
1110      * produce them. */
1111     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1112         while (s->rq) {
1113             req = s->rq;
1114             s->rq = req->next;
1115 
1116             /* No other threads can access req->vq here */
1117             virtqueue_detach_element(req->vq, &req->elem, 0);
1118 
1119             g_free(req);
1120         }
1121     }
1122 
1123     blk_set_enable_write_cache(s->blk, s->original_wce);
1124 }
1125 
1126 /* coalesce internal state, copy to pci i/o region 0
1127  */
1128 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
1129 {
1130     VirtIOBlock *s = VIRTIO_BLK(vdev);
1131     BlockConf *conf = &s->conf.conf;
1132     BlockDriverState *bs = blk_bs(s->blk);
1133     struct virtio_blk_config blkcfg;
1134     uint64_t capacity;
1135     int64_t length;
1136     int blk_size = conf->logical_block_size;
1137 
1138     blk_get_geometry(s->blk, &capacity);
1139     memset(&blkcfg, 0, sizeof(blkcfg));
1140     virtio_stq_p(vdev, &blkcfg.capacity, capacity);
1141     virtio_stl_p(vdev, &blkcfg.seg_max,
1142                  s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2);
1143     virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
1144     virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
1145     virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
1146     virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
1147     blkcfg.geometry.heads = conf->heads;
1148     /*
1149      * We must ensure that the block device capacity is a multiple of
1150      * the logical block size. If that is not the case, let's use
1151      * sector_mask to adopt the geometry to have a correct picture.
1152      * For those devices where the capacity is ok for the given geometry
1153      * we don't touch the sector value of the geometry, since some devices
1154      * (like s390 dasd) need a specific value. Here the capacity is already
1155      * cyls*heads*secs*blk_size and the sector value is not block size
1156      * divided by 512 - instead it is the amount of blk_size blocks
1157      * per track (cylinder).
1158      */
1159     length = blk_getlength(s->blk);
1160     if (length > 0 && length / conf->heads / conf->secs % blk_size) {
1161         blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
1162     } else {
1163         blkcfg.geometry.sectors = conf->secs;
1164     }
1165     blkcfg.size_max = 0;
1166     blkcfg.physical_block_exp = get_physical_block_exp(conf);
1167     blkcfg.alignment_offset = 0;
1168     blkcfg.wce = blk_enable_write_cache(s->blk);
1169     virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
1170     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
1171         uint32_t discard_granularity = conf->discard_granularity;
1172         if (discard_granularity == -1 || !s->conf.report_discard_granularity) {
1173             discard_granularity = blk_size;
1174         }
1175         virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
1176                      s->conf.max_discard_sectors);
1177         virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
1178                      discard_granularity >> BDRV_SECTOR_BITS);
1179         /*
1180          * We support only one segment per request since multiple segments
1181          * are not widely used and there are no userspace APIs that allow
1182          * applications to submit multiple segments in a single call.
1183          */
1184         virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
1185     }
1186     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
1187         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
1188                      s->conf.max_write_zeroes_sectors);
1189         blkcfg.write_zeroes_may_unmap = 1;
1190         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
1191     }
1192     if (bs->bl.zoned != BLK_Z_NONE) {
1193         switch (bs->bl.zoned) {
1194         case BLK_Z_HM:
1195             blkcfg.zoned.model = VIRTIO_BLK_Z_HM;
1196             break;
1197         case BLK_Z_HA:
1198             blkcfg.zoned.model = VIRTIO_BLK_Z_HA;
1199             break;
1200         default:
1201             g_assert_not_reached();
1202         }
1203 
1204         virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors,
1205                      bs->bl.zone_size / 512);
1206         virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones,
1207                      bs->bl.max_active_zones);
1208         virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones,
1209                      bs->bl.max_open_zones);
1210         virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size);
1211         virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors,
1212                      bs->bl.max_append_sectors);
1213     } else {
1214         blkcfg.zoned.model = VIRTIO_BLK_Z_NONE;
1215     }
1216     memcpy(config, &blkcfg, s->config_size);
1217 }
1218 
1219 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
1220 {
1221     VirtIOBlock *s = VIRTIO_BLK(vdev);
1222     struct virtio_blk_config blkcfg;
1223 
1224     memcpy(&blkcfg, config, s->config_size);
1225 
1226     blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
1227 }
1228 
1229 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
1230                                         Error **errp)
1231 {
1232     VirtIOBlock *s = VIRTIO_BLK(vdev);
1233 
1234     /* Firstly sync all virtio-blk possible supported features */
1235     features |= s->host_features;
1236 
1237     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
1238     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
1239     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
1240     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
1241     if (!virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
1242         virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
1243         /* Added for historical reasons, removing it could break migration.  */
1244         virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
1245     }
1246 
1247     if (blk_enable_write_cache(s->blk) ||
1248         (s->conf.x_enable_wce_if_config_wce &&
1249          virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) {
1250         virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
1251     }
1252     if (!blk_is_writable(s->blk)) {
1253         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
1254     }
1255     if (s->conf.num_queues > 1) {
1256         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
1257     }
1258 
1259     return features;
1260 }
1261 
1262 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
1263 {
1264     VirtIOBlock *s = VIRTIO_BLK(vdev);
1265 
1266     if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
1267         assert(!s->ioeventfd_started);
1268     }
1269 
1270     if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1271         return;
1272     }
1273 
1274     /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1275      * cache flushes.  Thus, the "auto writethrough" behavior is never
1276      * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1277      * Leaving it enabled would break the following sequence:
1278      *
1279      *     Guest started with "-drive cache=writethrough"
1280      *     Guest sets status to 0
1281      *     Guest sets DRIVER bit in status field
1282      *     Guest reads host features (WCE=0, CONFIG_WCE=1)
1283      *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
1284      *     Guest writes 1 to the WCE configuration field (writeback mode)
1285      *     Guest sets DRIVER_OK bit in status field
1286      *
1287      * s->blk would erroneously be placed in writethrough mode.
1288      */
1289     if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
1290         blk_set_enable_write_cache(s->blk,
1291                                    virtio_vdev_has_feature(vdev,
1292                                                            VIRTIO_BLK_F_WCE));
1293     }
1294 }
1295 
1296 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
1297 {
1298     VirtIOBlock *s = VIRTIO_BLK(vdev);
1299 
1300     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1301         VirtIOBlockReq *req = s->rq;
1302 
1303         while (req) {
1304             qemu_put_sbyte(f, 1);
1305 
1306             if (s->conf.num_queues > 1) {
1307                 qemu_put_be32(f, virtio_get_queue_index(req->vq));
1308             }
1309 
1310             qemu_put_virtqueue_element(vdev, f, &req->elem);
1311             req = req->next;
1312         }
1313     }
1314 
1315     qemu_put_sbyte(f, 0);
1316 }
1317 
1318 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
1319                                   int version_id)
1320 {
1321     VirtIOBlock *s = VIRTIO_BLK(vdev);
1322 
1323     while (qemu_get_sbyte(f)) {
1324         unsigned nvqs = s->conf.num_queues;
1325         unsigned vq_idx = 0;
1326         VirtIOBlockReq *req;
1327 
1328         if (nvqs > 1) {
1329             vq_idx = qemu_get_be32(f);
1330 
1331             if (vq_idx >= nvqs) {
1332                 error_report("Invalid virtqueue index in request list: %#x",
1333                              vq_idx);
1334                 return -EINVAL;
1335             }
1336         }
1337 
1338         req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
1339         virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
1340 
1341         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1342             req->next = s->rq;
1343             s->rq = req;
1344         }
1345     }
1346 
1347     return 0;
1348 }
1349 
1350 static void virtio_resize_cb(void *opaque)
1351 {
1352     VirtIODevice *vdev = opaque;
1353 
1354     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1355     virtio_notify_config(vdev);
1356 }
1357 
1358 static void virtio_blk_resize(void *opaque)
1359 {
1360     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1361 
1362     /*
1363      * virtio_notify_config() needs to acquire the BQL,
1364      * so it can't be called from an iothread. Instead, schedule
1365      * it to be run in the main context BH.
1366      */
1367     aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
1368 }
1369 
1370 static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
1371 {
1372     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1373 
1374     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
1375         VirtQueue *vq = virtio_get_queue(vdev, i);
1376         virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
1377     }
1378 }
1379 
1380 static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
1381 {
1382     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1383 
1384     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
1385         VirtQueue *vq = virtio_get_queue(vdev, i);
1386         virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
1387     }
1388 }
1389 
1390 /* Suspend virtqueue ioeventfd processing during drain */
1391 static void virtio_blk_drained_begin(void *opaque)
1392 {
1393     VirtIOBlock *s = opaque;
1394 
1395     if (s->ioeventfd_started) {
1396         virtio_blk_ioeventfd_detach(s);
1397     }
1398 }
1399 
1400 /* Resume virtqueue ioeventfd processing after drain */
1401 static void virtio_blk_drained_end(void *opaque)
1402 {
1403     VirtIOBlock *s = opaque;
1404 
1405     if (s->ioeventfd_started) {
1406         virtio_blk_ioeventfd_attach(s);
1407     }
1408 }
1409 
1410 static const BlockDevOps virtio_block_ops = {
1411     .resize_cb     = virtio_blk_resize,
1412     .drained_begin = virtio_blk_drained_begin,
1413     .drained_end   = virtio_blk_drained_end,
1414 };
1415 
1416 static bool
1417 validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
1418         uint16_t num_queues, Error **errp)
1419 {
1420     g_autofree unsigned long *vqs = bitmap_new(num_queues);
1421     g_autoptr(GHashTable) iothreads =
1422         g_hash_table_new(g_str_hash, g_str_equal);
1423 
1424     for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
1425         const char *name = node->value->iothread;
1426         uint16List *vq;
1427 
1428         if (!iothread_by_id(name)) {
1429             error_setg(errp, "IOThread \"%s\" object does not exist", name);
1430             return false;
1431         }
1432 
1433         if (!g_hash_table_add(iothreads, (gpointer)name)) {
1434             error_setg(errp,
1435                     "duplicate IOThread name \"%s\" in iothread-vq-mapping",
1436                     name);
1437             return false;
1438         }
1439 
1440         if (node != list) {
1441             if (!!node->value->vqs != !!list->value->vqs) {
1442                 error_setg(errp, "either all items in iothread-vq-mapping "
1443                                  "must have vqs or none of them must have it");
1444                 return false;
1445             }
1446         }
1447 
1448         for (vq = node->value->vqs; vq; vq = vq->next) {
1449             if (vq->value >= num_queues) {
1450                 error_setg(errp, "vq index %u for IOThread \"%s\" must be "
1451                         "less than num_queues %u in iothread-vq-mapping",
1452                         vq->value, name, num_queues);
1453                 return false;
1454             }
1455 
1456             if (test_and_set_bit(vq->value, vqs)) {
1457                 error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
1458                         "because it is already assigned", vq->value, name);
1459                 return false;
1460             }
1461         }
1462     }
1463 
1464     if (list->value->vqs) {
1465         for (uint16_t i = 0; i < num_queues; i++) {
1466             if (!test_bit(i, vqs)) {
1467                 error_setg(errp,
1468                         "missing vq %u IOThread assignment in iothread-vq-mapping",
1469                         i);
1470                 return false;
1471             }
1472         }
1473     }
1474 
1475     return true;
1476 }
1477 
1478 /**
1479  * apply_iothread_vq_mapping:
1480  * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
1481  * @vq_aio_context: The array of AioContext pointers to fill in.
1482  * @num_queues: The length of @vq_aio_context.
1483  * @errp: If an error occurs, a pointer to the area to store the error.
1484  *
1485  * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
1486  * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
1487  *
1488  * Returns: %true on success, %false on failure.
1489  **/
1490 static bool apply_iothread_vq_mapping(
1491         IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
1492         AioContext **vq_aio_context,
1493         uint16_t num_queues,
1494         Error **errp)
1495 {
1496     IOThreadVirtQueueMappingList *node;
1497     size_t num_iothreads = 0;
1498     size_t cur_iothread = 0;
1499 
1500     if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
1501                                            num_queues, errp)) {
1502         return false;
1503     }
1504 
1505     for (node = iothread_vq_mapping_list; node; node = node->next) {
1506         num_iothreads++;
1507     }
1508 
1509     for (node = iothread_vq_mapping_list; node; node = node->next) {
1510         IOThread *iothread = iothread_by_id(node->value->iothread);
1511         AioContext *ctx = iothread_get_aio_context(iothread);
1512 
1513         /* Released in virtio_blk_vq_aio_context_cleanup() */
1514         object_ref(OBJECT(iothread));
1515 
1516         if (node->value->vqs) {
1517             uint16List *vq;
1518 
1519             /* Explicit vq:IOThread assignment */
1520             for (vq = node->value->vqs; vq; vq = vq->next) {
1521                 assert(vq->value < num_queues);
1522                 vq_aio_context[vq->value] = ctx;
1523             }
1524         } else {
1525             /* Round-robin vq:IOThread assignment */
1526             for (unsigned i = cur_iothread; i < num_queues;
1527                  i += num_iothreads) {
1528                 vq_aio_context[i] = ctx;
1529             }
1530         }
1531 
1532         cur_iothread++;
1533     }
1534 
1535     return true;
1536 }
1537 
1538 /* Context: BQL held */
1539 static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
1540 {
1541     ERRP_GUARD();
1542     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1543     VirtIOBlkConf *conf = &s->conf;
1544     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1545     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1546 
1547     if (conf->iothread && conf->iothread_vq_mapping_list) {
1548         error_setg(errp,
1549                    "iothread and iothread-vq-mapping properties cannot be set "
1550                    "at the same time");
1551         return false;
1552     }
1553 
1554     if (conf->iothread || conf->iothread_vq_mapping_list) {
1555         if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
1556             error_setg(errp,
1557                        "device is incompatible with iothread "
1558                        "(transport does not support notifiers)");
1559             return false;
1560         }
1561         if (!virtio_device_ioeventfd_enabled(vdev)) {
1562             error_setg(errp, "ioeventfd is required for iothread");
1563             return false;
1564         }
1565     }
1566 
1567     s->vq_aio_context = g_new(AioContext *, conf->num_queues);
1568 
1569     if (conf->iothread_vq_mapping_list) {
1570         if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
1571                                        s->vq_aio_context,
1572                                        conf->num_queues,
1573                                        errp)) {
1574             g_free(s->vq_aio_context);
1575             s->vq_aio_context = NULL;
1576             return false;
1577         }
1578     } else if (conf->iothread) {
1579         AioContext *ctx = iothread_get_aio_context(conf->iothread);
1580         for (unsigned i = 0; i < conf->num_queues; i++) {
1581             s->vq_aio_context[i] = ctx;
1582         }
1583 
1584         /* Released in virtio_blk_vq_aio_context_cleanup() */
1585         object_ref(OBJECT(conf->iothread));
1586     } else {
1587         AioContext *ctx = qemu_get_aio_context();
1588         for (unsigned i = 0; i < conf->num_queues; i++) {
1589             s->vq_aio_context[i] = ctx;
1590         }
1591     }
1592 
1593     return true;
1594 }
1595 
1596 /* Context: BQL held */
1597 static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
1598 {
1599     VirtIOBlkConf *conf = &s->conf;
1600 
1601     assert(!s->ioeventfd_started);
1602 
1603     if (conf->iothread_vq_mapping_list) {
1604         IOThreadVirtQueueMappingList *node;
1605 
1606         for (node = conf->iothread_vq_mapping_list; node; node = node->next) {
1607             IOThread *iothread = iothread_by_id(node->value->iothread);
1608             object_unref(OBJECT(iothread));
1609         }
1610     }
1611 
1612     if (conf->iothread) {
1613         object_unref(OBJECT(conf->iothread));
1614     }
1615 
1616     g_free(s->vq_aio_context);
1617     s->vq_aio_context = NULL;
1618 }
1619 
1620 /* Context: BQL held */
1621 static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
1622 {
1623     VirtIOBlock *s = VIRTIO_BLK(vdev);
1624     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
1625     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1626     unsigned i;
1627     unsigned nvqs = s->conf.num_queues;
1628     Error *local_err = NULL;
1629     int r;
1630 
1631     if (s->ioeventfd_started || s->ioeventfd_starting) {
1632         return 0;
1633     }
1634 
1635     s->ioeventfd_starting = true;
1636 
1637     /* Set up guest notifier (irq) */
1638     r = k->set_guest_notifiers(qbus->parent, nvqs, true);
1639     if (r != 0) {
1640         error_report("virtio-blk failed to set guest notifier (%d), "
1641                      "ensure -accel kvm is set.", r);
1642         goto fail_guest_notifiers;
1643     }
1644 
1645     /*
1646      * Batch all the host notifiers in a single transaction to avoid
1647      * quadratic time complexity in address_space_update_ioeventfds().
1648      */
1649     memory_region_transaction_begin();
1650 
1651     /* Set up virtqueue notify */
1652     for (i = 0; i < nvqs; i++) {
1653         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
1654         if (r != 0) {
1655             int j = i;
1656 
1657             fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
1658             while (i--) {
1659                 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
1660             }
1661 
1662             /*
1663              * The transaction expects the ioeventfds to be open when it
1664              * commits. Do it now, before the cleanup loop.
1665              */
1666             memory_region_transaction_commit();
1667 
1668             while (j--) {
1669                 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j);
1670             }
1671             goto fail_host_notifiers;
1672         }
1673     }
1674 
1675     memory_region_transaction_commit();
1676 
1677     /*
1678      * Try to change the AioContext so that block jobs and other operations can
1679      * co-locate their activity in the same AioContext. If it fails, nevermind.
1680      */
1681     assert(nvqs > 0); /* enforced during ->realize() */
1682     r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
1683                             &local_err);
1684     if (r < 0) {
1685         warn_report_err(local_err);
1686     }
1687 
1688     /*
1689      * These fields must be visible to the IOThread when it processes the
1690      * virtqueue, otherwise it will think ioeventfd has not started yet.
1691      *
1692      * Make sure ->ioeventfd_started is false when blk_set_aio_context() is
1693      * called above so that draining does not cause the host notifier to be
1694      * detached/attached prematurely.
1695      */
1696     s->ioeventfd_starting = false;
1697     s->ioeventfd_started = true;
1698     smp_wmb(); /* paired with aio_notify_accept() on the read side */
1699 
1700     /*
1701      * Get this show started by hooking up our callbacks.  If drained now,
1702      * virtio_blk_drained_end() will do this later.
1703      * Attaching the notifier also kicks the virtqueues, processing any requests
1704      * they may already have.
1705      */
1706     if (!blk_in_drain(s->conf.conf.blk)) {
1707         virtio_blk_ioeventfd_attach(s);
1708     }
1709     return 0;
1710 
1711   fail_host_notifiers:
1712     k->set_guest_notifiers(qbus->parent, nvqs, false);
1713   fail_guest_notifiers:
1714     s->ioeventfd_disabled = true;
1715     s->ioeventfd_starting = false;
1716     return -ENOSYS;
1717 }
1718 
1719 /* Stop notifications for new requests from guest.
1720  *
1721  * Context: BH in IOThread
1722  */
1723 static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque)
1724 {
1725     VirtQueue *vq = opaque;
1726     EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
1727 
1728     virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
1729 
1730     /*
1731      * Test and clear notifier after disabling event, in case poll callback
1732      * didn't have time to run.
1733      */
1734     virtio_queue_host_notifier_read(host_notifier);
1735 }
1736 
1737 /* Context: BQL held */
1738 static void virtio_blk_stop_ioeventfd(VirtIODevice *vdev)
1739 {
1740     VirtIOBlock *s = VIRTIO_BLK(vdev);
1741     BusState *qbus = qdev_get_parent_bus(DEVICE(s));
1742     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1743     unsigned i;
1744     unsigned nvqs = s->conf.num_queues;
1745 
1746     if (!s->ioeventfd_started || s->ioeventfd_stopping) {
1747         return;
1748     }
1749 
1750     /* Better luck next time. */
1751     if (s->ioeventfd_disabled) {
1752         s->ioeventfd_disabled = false;
1753         s->ioeventfd_started = false;
1754         return;
1755     }
1756     s->ioeventfd_stopping = true;
1757 
1758     if (!blk_in_drain(s->conf.conf.blk)) {
1759         for (i = 0; i < nvqs; i++) {
1760             VirtQueue *vq = virtio_get_queue(vdev, i);
1761             AioContext *ctx = s->vq_aio_context[i];
1762 
1763             aio_wait_bh_oneshot(ctx, virtio_blk_ioeventfd_stop_vq_bh, vq);
1764         }
1765     }
1766 
1767     /*
1768      * Batch all the host notifiers in a single transaction to avoid
1769      * quadratic time complexity in address_space_update_ioeventfds().
1770      */
1771     memory_region_transaction_begin();
1772 
1773     for (i = 0; i < nvqs; i++) {
1774         virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
1775     }
1776 
1777     /*
1778      * The transaction expects the ioeventfds to be open when it
1779      * commits. Do it now, before the cleanup loop.
1780      */
1781     memory_region_transaction_commit();
1782 
1783     for (i = 0; i < nvqs; i++) {
1784         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
1785     }
1786 
1787     /*
1788      * Set ->ioeventfd_started to false before draining so that host notifiers
1789      * are not detached/attached anymore.
1790      */
1791     s->ioeventfd_started = false;
1792 
1793     /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
1794     blk_drain(s->conf.conf.blk);
1795 
1796     /*
1797      * Try to switch bs back to the QEMU main loop. If other users keep the
1798      * BlockBackend in the iothread, that's ok
1799      */
1800     blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL);
1801 
1802     /* Clean up guest notifier (irq) */
1803     k->set_guest_notifiers(qbus->parent, nvqs, false);
1804 
1805     s->ioeventfd_stopping = false;
1806 }
1807 
1808 static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
1809 {
1810     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1811     VirtIOBlock *s = VIRTIO_BLK(dev);
1812     VirtIOBlkConf *conf = &s->conf;
1813     BlockDriverState *bs;
1814     Error *err = NULL;
1815     unsigned i;
1816 
1817     if (!conf->conf.blk) {
1818         error_setg(errp, "drive property not set");
1819         return;
1820     }
1821     if (!blk_is_inserted(conf->conf.blk)) {
1822         error_setg(errp, "Device needs media, but drive is empty");
1823         return;
1824     }
1825     if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) {
1826         conf->num_queues = 1;
1827     }
1828     if (!conf->num_queues) {
1829         error_setg(errp, "num-queues property must be larger than 0");
1830         return;
1831     }
1832     if (conf->queue_size <= 2) {
1833         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
1834                    "must be > 2", conf->queue_size);
1835         return;
1836     }
1837     if (!is_power_of_2(conf->queue_size) ||
1838         conf->queue_size > VIRTQUEUE_MAX_SIZE) {
1839         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
1840                    "must be a power of 2 (max %d)",
1841                    conf->queue_size, VIRTQUEUE_MAX_SIZE);
1842         return;
1843     }
1844 
1845     if (!blkconf_apply_backend_options(&conf->conf,
1846                                        !blk_supports_write_perm(conf->conf.blk),
1847                                        true, errp)) {
1848         return;
1849     }
1850     s->original_wce = blk_enable_write_cache(conf->conf.blk);
1851     if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
1852         return;
1853     }
1854 
1855     if (!blkconf_blocksizes(&conf->conf, errp)) {
1856         return;
1857     }
1858 
1859     bs = blk_bs(conf->conf.blk);
1860     if (bs->bl.zoned != BLK_Z_NONE) {
1861         virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
1862         if (bs->bl.zoned == BLK_Z_HM) {
1863             virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD);
1864         }
1865     }
1866 
1867     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
1868         (!conf->max_discard_sectors ||
1869          conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
1870         error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
1871                    ", must be between 1 and %d",
1872                    conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
1873         return;
1874     }
1875 
1876     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
1877         (!conf->max_write_zeroes_sectors ||
1878          conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
1879         error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
1880                    "), must be between 1 and %d",
1881                    conf->max_write_zeroes_sectors,
1882                    (int)BDRV_REQUEST_MAX_SECTORS);
1883         return;
1884     }
1885 
1886     s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
1887                                             s->host_features);
1888     virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
1889 
1890     qemu_mutex_init(&s->rq_lock);
1891 
1892     s->blk = conf->conf.blk;
1893     s->rq = NULL;
1894     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
1895 
1896     for (i = 0; i < conf->num_queues; i++) {
1897         virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
1898     }
1899     qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2);
1900 
1901     /* Don't start ioeventfd if transport does not support notifiers. */
1902     if (!virtio_device_ioeventfd_enabled(vdev)) {
1903         s->ioeventfd_disabled = true;
1904     }
1905 
1906     virtio_blk_vq_aio_context_init(s, &err);
1907     if (err != NULL) {
1908         error_propagate(errp, err);
1909         for (i = 0; i < conf->num_queues; i++) {
1910             virtio_del_queue(vdev, i);
1911         }
1912         virtio_cleanup(vdev);
1913         return;
1914     }
1915 
1916     /*
1917      * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
1918      * called after ->start_ioeventfd() has already set blk's AioContext.
1919      */
1920     s->change =
1921         qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, s);
1922 
1923     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
1924     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
1925 
1926     blk_iostatus_enable(s->blk);
1927 
1928     add_boot_device_lchs(dev, "/disk@0,0",
1929                          conf->conf.lcyls,
1930                          conf->conf.lheads,
1931                          conf->conf.lsecs);
1932 }
1933 
1934 static void virtio_blk_device_unrealize(DeviceState *dev)
1935 {
1936     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1937     VirtIOBlock *s = VIRTIO_BLK(dev);
1938     VirtIOBlkConf *conf = &s->conf;
1939     unsigned i;
1940 
1941     blk_drain(s->blk);
1942     del_boot_device_lchs(dev, "/disk@0,0");
1943     virtio_blk_vq_aio_context_cleanup(s);
1944     for (i = 0; i < conf->num_queues; i++) {
1945         virtio_del_queue(vdev, i);
1946     }
1947     qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
1948     qemu_mutex_destroy(&s->rq_lock);
1949     blk_ram_registrar_destroy(&s->blk_ram_registrar);
1950     qemu_del_vm_change_state_handler(s->change);
1951     blockdev_mark_auto_del(s->blk);
1952     virtio_cleanup(vdev);
1953 }
1954 
1955 static void virtio_blk_instance_init(Object *obj)
1956 {
1957     VirtIOBlock *s = VIRTIO_BLK(obj);
1958 
1959     device_add_bootindex_property(obj, &s->conf.conf.bootindex,
1960                                   "bootindex", "/disk@0,0",
1961                                   DEVICE(obj));
1962 }
1963 
1964 static const VMStateDescription vmstate_virtio_blk = {
1965     .name = "virtio-blk",
1966     .minimum_version_id = 2,
1967     .version_id = 2,
1968     .fields = (const VMStateField[]) {
1969         VMSTATE_VIRTIO_DEVICE,
1970         VMSTATE_END_OF_LIST()
1971     },
1972 };
1973 
1974 static const Property virtio_blk_properties[] = {
1975     DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
1976     DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
1977     DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
1978     DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
1979     DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
1980                       VIRTIO_BLK_F_CONFIG_WCE, true),
1981     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
1982                     true),
1983     DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues,
1984                        VIRTIO_BLK_AUTO_NUM_QUEUES),
1985     DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
1986     DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
1987     DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
1988                      IOThread *),
1989     DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
1990                                          conf.iothread_vq_mapping_list),
1991     DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
1992                       VIRTIO_BLK_F_DISCARD, true),
1993     DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
1994                      conf.report_discard_granularity, true),
1995     DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
1996                       VIRTIO_BLK_F_WRITE_ZEROES, true),
1997     DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
1998                        conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
1999     DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
2000                        conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
2001     DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
2002                      conf.x_enable_wce_if_config_wce, true),
2003 };
2004 
2005 static void virtio_blk_class_init(ObjectClass *klass, void *data)
2006 {
2007     DeviceClass *dc = DEVICE_CLASS(klass);
2008     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2009 
2010     device_class_set_props(dc, virtio_blk_properties);
2011     dc->vmsd = &vmstate_virtio_blk;
2012     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2013     vdc->realize = virtio_blk_device_realize;
2014     vdc->unrealize = virtio_blk_device_unrealize;
2015     vdc->get_config = virtio_blk_update_config;
2016     vdc->set_config = virtio_blk_set_config;
2017     vdc->get_features = virtio_blk_get_features;
2018     vdc->set_status = virtio_blk_set_status;
2019     vdc->reset = virtio_blk_reset;
2020     vdc->save = virtio_blk_save_device;
2021     vdc->load = virtio_blk_load_device;
2022     vdc->start_ioeventfd = virtio_blk_start_ioeventfd;
2023     vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd;
2024 }
2025 
2026 static const TypeInfo virtio_blk_info = {
2027     .name = TYPE_VIRTIO_BLK,
2028     .parent = TYPE_VIRTIO_DEVICE,
2029     .instance_size = sizeof(VirtIOBlock),
2030     .instance_init = virtio_blk_instance_init,
2031     .class_init = virtio_blk_class_init,
2032 };
2033 
2034 static void virtio_register_types(void)
2035 {
2036     type_register_static(&virtio_blk_info);
2037 }
2038 
2039 type_init(virtio_register_types)
2040