xref: /qemu/hw/block/virtio-blk.c (revision 21596064081e8d0c0153f68714981c7f0e040973)
1 /*
2  * Virtio Block Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/defer-call.h"
16 #include "qapi/error.h"
17 #include "qemu/iov.h"
18 #include "qemu/module.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "block/block_int.h"
22 #include "trace.h"
23 #include "hw/block/block.h"
24 #include "hw/qdev-properties.h"
25 #include "system/blockdev.h"
26 #include "system/block-ram-registrar.h"
27 #include "system/system.h"
28 #include "system/runstate.h"
29 #include "hw/virtio/virtio-blk.h"
30 #include "scsi/constants.h"
31 #ifdef __linux__
32 # include <scsi/sg.h>
33 #endif
34 #include "hw/virtio/virtio-bus.h"
35 #include "migration/qemu-file-types.h"
36 #include "hw/virtio/iothread-vq-mapping.h"
37 #include "hw/virtio/virtio-access.h"
38 #include "hw/virtio/virtio-blk-common.h"
39 #include "qemu/coroutine.h"
40 
41 static void virtio_blk_ioeventfd_attach(VirtIOBlock *s);
42 
virtio_blk_init_request(VirtIOBlock * s,VirtQueue * vq,VirtIOBlockReq * req)43 static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
44                                     VirtIOBlockReq *req)
45 {
46     req->dev = s;
47     req->vq = vq;
48     req->qiov.size = 0;
49     req->in_len = 0;
50     req->next = NULL;
51     req->mr_next = NULL;
52 }
53 
virtio_blk_req_complete(VirtIOBlockReq * req,unsigned char status)54 void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
55 {
56     VirtIOBlock *s = req->dev;
57     VirtIODevice *vdev = VIRTIO_DEVICE(s);
58 
59     trace_virtio_blk_req_complete(vdev, req, status);
60 
61     stb_p(&req->in->status, status);
62     iov_discard_undo(&req->inhdr_undo);
63     iov_discard_undo(&req->outhdr_undo);
64     virtqueue_push(req->vq, &req->elem, req->in_len);
65     if (qemu_in_iothread()) {
66         virtio_notify_irqfd(vdev, req->vq);
67     } else {
68         virtio_notify(vdev, req->vq);
69     }
70 }
71 
virtio_blk_handle_rw_error(VirtIOBlockReq * req,int error,bool is_read,bool acct_failed)72 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
73     bool is_read, bool acct_failed)
74 {
75     VirtIOBlock *s = req->dev;
76     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
77 
78     if (action == BLOCK_ERROR_ACTION_STOP) {
79         /* Break the link as the next request is going to be parsed from the
80          * ring again. Otherwise we may end up doing a double completion! */
81         req->mr_next = NULL;
82 
83         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
84             req->next = s->rq;
85             s->rq = req;
86         }
87     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
88         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
89         if (acct_failed) {
90             block_acct_failed(blk_get_stats(s->blk), &req->acct);
91         }
92         g_free(req);
93     }
94 
95     blk_error_action(s->blk, action, is_read, error);
96     return action != BLOCK_ERROR_ACTION_IGNORE;
97 }
98 
virtio_blk_rw_complete(void * opaque,int ret)99 static void virtio_blk_rw_complete(void *opaque, int ret)
100 {
101     VirtIOBlockReq *next = opaque;
102     VirtIOBlock *s = next->dev;
103     VirtIODevice *vdev = VIRTIO_DEVICE(s);
104 
105     while (next) {
106         VirtIOBlockReq *req = next;
107         next = req->mr_next;
108         trace_virtio_blk_rw_complete(vdev, req, ret);
109 
110         if (req->qiov.nalloc != -1) {
111             /* If nalloc is != -1 req->qiov is a local copy of the original
112              * external iovec. It was allocated in submit_requests to be
113              * able to merge requests. */
114             qemu_iovec_destroy(&req->qiov);
115         }
116 
117         if (ret) {
118             int p = virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type);
119             bool is_read = !(p & VIRTIO_BLK_T_OUT);
120             /* Note that memory may be dirtied on read failure.  If the
121              * virtio request is not completed here, as is the case for
122              * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
123              * correctly during live migration.  While this is ugly,
124              * it is acceptable because the device is free to write to
125              * the memory until the request is completed (which will
126              * happen on the other side of the migration).
127              */
128             if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
129                 continue;
130             }
131         }
132 
133         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
134         block_acct_done(blk_get_stats(s->blk), &req->acct);
135         g_free(req);
136     }
137 }
138 
virtio_blk_flush_complete(void * opaque,int ret)139 static void virtio_blk_flush_complete(void *opaque, int ret)
140 {
141     VirtIOBlockReq *req = opaque;
142     VirtIOBlock *s = req->dev;
143 
144     if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) {
145         return;
146     }
147 
148     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
149     block_acct_done(blk_get_stats(s->blk), &req->acct);
150     g_free(req);
151 }
152 
virtio_blk_discard_write_zeroes_complete(void * opaque,int ret)153 static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
154 {
155     VirtIOBlockReq *req = opaque;
156     VirtIOBlock *s = req->dev;
157     bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
158                             ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;
159 
160     if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
161         return;
162     }
163 
164     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
165     if (is_write_zeroes) {
166         block_acct_done(blk_get_stats(s->blk), &req->acct);
167     }
168     g_free(req);
169 }
170 
virtio_blk_get_request(VirtIOBlock * s,VirtQueue * vq)171 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
172 {
173     VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
174 
175     if (req) {
176         virtio_blk_init_request(s, vq, req);
177     }
178     return req;
179 }
180 
virtio_blk_handle_scsi(VirtIOBlockReq * req)181 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
182 {
183     int status;
184     struct virtio_scsi_inhdr *scsi;
185     VirtIOBlock *blk = req->dev;
186     VirtIODevice *vdev = VIRTIO_DEVICE(blk);
187     VirtQueueElement *elem = &req->elem;
188 
189     /*
190      * We require at least one output segment each for the virtio_blk_outhdr
191      * and the SCSI command block.
192      *
193      * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
194      * and the sense buffer pointer in the input segments.
195      */
196     if (elem->out_num < 2 || elem->in_num < 3) {
197         status = VIRTIO_BLK_S_IOERR;
198         goto fail;
199     }
200 
201     /*
202      * The scsi inhdr is placed in the second-to-last input segment, just
203      * before the regular inhdr.
204      *
205      * Just put anything nonzero so that the ioctl fails in the guest.
206      */
207     scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
208     virtio_stl_p(vdev, &scsi->errors, 255);
209     status = VIRTIO_BLK_S_UNSUPP;
210 
211 fail:
212     virtio_blk_req_complete(req, status);
213     g_free(req);
214 }
215 
submit_requests(VirtIOBlock * s,MultiReqBuffer * mrb,int start,int num_reqs,int niov)216 static inline void submit_requests(VirtIOBlock *s, MultiReqBuffer *mrb,
217                                    int start, int num_reqs, int niov)
218 {
219     BlockBackend *blk = s->blk;
220     QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
221     int64_t sector_num = mrb->reqs[start]->sector_num;
222     bool is_write = mrb->is_write;
223     BdrvRequestFlags flags = 0;
224 
225     if (num_reqs > 1) {
226         int i;
227         struct iovec *tmp_iov = qiov->iov;
228         int tmp_niov = qiov->niov;
229 
230         /* mrb->reqs[start]->qiov was initialized from external so we can't
231          * modify it here. We need to initialize it locally and then add the
232          * external iovecs. */
233         qemu_iovec_init(qiov, niov);
234 
235         for (i = 0; i < tmp_niov; i++) {
236             qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
237         }
238 
239         for (i = start + 1; i < start + num_reqs; i++) {
240             qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
241                               mrb->reqs[i]->qiov.size);
242             mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
243         }
244 
245         trace_virtio_blk_submit_multireq(VIRTIO_DEVICE(mrb->reqs[start]->dev),
246                                          mrb, start, num_reqs,
247                                          sector_num << BDRV_SECTOR_BITS,
248                                          qiov->size, is_write);
249         block_acct_merge_done(blk_get_stats(blk),
250                               is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
251                               num_reqs - 1);
252     }
253 
254     if (blk_ram_registrar_ok(&s->blk_ram_registrar)) {
255         flags |= BDRV_REQ_REGISTERED_BUF;
256     }
257 
258     if (is_write) {
259         blk_aio_pwritev(blk, sector_num << BDRV_SECTOR_BITS, qiov,
260                         flags, virtio_blk_rw_complete,
261                         mrb->reqs[start]);
262     } else {
263         blk_aio_preadv(blk, sector_num << BDRV_SECTOR_BITS, qiov,
264                        flags, virtio_blk_rw_complete,
265                        mrb->reqs[start]);
266     }
267 }
268 
multireq_compare(const void * a,const void * b)269 static int multireq_compare(const void *a, const void *b)
270 {
271     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
272                          *req2 = *(VirtIOBlockReq **)b;
273 
274     /*
275      * Note that we can't simply subtract sector_num1 from sector_num2
276      * here as that could overflow the return value.
277      */
278     if (req1->sector_num > req2->sector_num) {
279         return 1;
280     } else if (req1->sector_num < req2->sector_num) {
281         return -1;
282     } else {
283         return 0;
284     }
285 }
286 
virtio_blk_submit_multireq(VirtIOBlock * s,MultiReqBuffer * mrb)287 static void virtio_blk_submit_multireq(VirtIOBlock *s, MultiReqBuffer *mrb)
288 {
289     int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
290     uint32_t max_transfer;
291     int64_t sector_num = 0;
292 
293     if (mrb->num_reqs == 1) {
294         submit_requests(s, mrb, 0, 1, -1);
295         mrb->num_reqs = 0;
296         return;
297     }
298 
299     max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
300 
301     qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
302           &multireq_compare);
303 
304     for (i = 0; i < mrb->num_reqs; i++) {
305         VirtIOBlockReq *req = mrb->reqs[i];
306         if (num_reqs > 0) {
307             /*
308              * NOTE: We cannot merge the requests in below situations:
309              * 1. requests are not sequential
310              * 2. merge would exceed maximum number of IOVs
311              * 3. merge would exceed maximum transfer length of backend device
312              */
313             if (sector_num + nb_sectors != req->sector_num ||
314                 niov > blk_get_max_iov(s->blk) - req->qiov.niov ||
315                 req->qiov.size > max_transfer ||
316                 nb_sectors > (max_transfer -
317                               req->qiov.size) / BDRV_SECTOR_SIZE) {
318                 submit_requests(s, mrb, start, num_reqs, niov);
319                 num_reqs = 0;
320             }
321         }
322 
323         if (num_reqs == 0) {
324             sector_num = req->sector_num;
325             nb_sectors = niov = 0;
326             start = i;
327         }
328 
329         nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
330         niov += req->qiov.niov;
331         num_reqs++;
332     }
333 
334     submit_requests(s, mrb, start, num_reqs, niov);
335     mrb->num_reqs = 0;
336 }
337 
virtio_blk_handle_flush(VirtIOBlockReq * req,MultiReqBuffer * mrb)338 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
339 {
340     VirtIOBlock *s = req->dev;
341 
342     block_acct_start(blk_get_stats(s->blk), &req->acct, 0,
343                      BLOCK_ACCT_FLUSH);
344 
345     /*
346      * Make sure all outstanding writes are posted to the backing device.
347      */
348     if (mrb->is_write && mrb->num_reqs > 0) {
349         virtio_blk_submit_multireq(s, mrb);
350     }
351     blk_aio_flush(s->blk, virtio_blk_flush_complete, req);
352 }
353 
virtio_blk_sect_range_ok(VirtIOBlock * dev,uint64_t sector,size_t size)354 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
355                                      uint64_t sector, size_t size)
356 {
357     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
358     uint64_t total_sectors;
359 
360     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
361         return false;
362     }
363     if (sector & dev->sector_mask) {
364         return false;
365     }
366     if (size % dev->conf.conf.logical_block_size) {
367         return false;
368     }
369     blk_get_geometry(dev->blk, &total_sectors);
370     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
371         return false;
372     }
373     return true;
374 }
375 
virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq * req,struct virtio_blk_discard_write_zeroes * dwz_hdr,bool is_write_zeroes)376 static uint8_t virtio_blk_handle_discard_write_zeroes(VirtIOBlockReq *req,
377     struct virtio_blk_discard_write_zeroes *dwz_hdr, bool is_write_zeroes)
378 {
379     VirtIOBlock *s = req->dev;
380     VirtIODevice *vdev = VIRTIO_DEVICE(s);
381     uint64_t sector;
382     uint32_t num_sectors, flags, max_sectors;
383     uint8_t err_status;
384     int bytes;
385 
386     sector = virtio_ldq_p(vdev, &dwz_hdr->sector);
387     num_sectors = virtio_ldl_p(vdev, &dwz_hdr->num_sectors);
388     flags = virtio_ldl_p(vdev, &dwz_hdr->flags);
389     max_sectors = is_write_zeroes ? s->conf.max_write_zeroes_sectors :
390                   s->conf.max_discard_sectors;
391 
392     /*
393      * max_sectors is at most BDRV_REQUEST_MAX_SECTORS, this check
394      * make us sure that "num_sectors << BDRV_SECTOR_BITS" can fit in
395      * the integer variable.
396      */
397     if (unlikely(num_sectors > max_sectors)) {
398         err_status = VIRTIO_BLK_S_IOERR;
399         goto err;
400     }
401 
402     bytes = num_sectors << BDRV_SECTOR_BITS;
403 
404     if (unlikely(!virtio_blk_sect_range_ok(s, sector, bytes))) {
405         err_status = VIRTIO_BLK_S_IOERR;
406         goto err;
407     }
408 
409     /*
410      * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
411      * and write zeroes commands if any unknown flag is set.
412      */
413     if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
414         err_status = VIRTIO_BLK_S_UNSUPP;
415         goto err;
416     }
417 
418     if (is_write_zeroes) { /* VIRTIO_BLK_T_WRITE_ZEROES */
419         int blk_aio_flags = 0;
420 
421         if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
422             blk_aio_flags |= BDRV_REQ_MAY_UNMAP;
423         }
424 
425         block_acct_start(blk_get_stats(s->blk), &req->acct, bytes,
426                          BLOCK_ACCT_WRITE);
427 
428         blk_aio_pwrite_zeroes(s->blk, sector << BDRV_SECTOR_BITS,
429                               bytes, blk_aio_flags,
430                               virtio_blk_discard_write_zeroes_complete, req);
431     } else { /* VIRTIO_BLK_T_DISCARD */
432         /*
433          * The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
434          * discard commands if the unmap flag is set.
435          */
436         if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
437             err_status = VIRTIO_BLK_S_UNSUPP;
438             goto err;
439         }
440 
441         blk_aio_pdiscard(s->blk, sector << BDRV_SECTOR_BITS, bytes,
442                          virtio_blk_discard_write_zeroes_complete, req);
443     }
444 
445     return VIRTIO_BLK_S_OK;
446 
447 err:
448     if (is_write_zeroes) {
449         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
450     }
451     return err_status;
452 }
453 
454 typedef struct ZoneCmdData {
455     VirtIOBlockReq *req;
456     struct iovec *in_iov;
457     unsigned in_num;
458     union {
459         struct {
460             unsigned int nr_zones;
461             BlockZoneDescriptor *zones;
462         } zone_report_data;
463         struct {
464             int64_t offset;
465         } zone_append_data;
466     };
467 } ZoneCmdData;
468 
469 /*
470  * check zoned_request: error checking before issuing requests. If all checks
471  * passed, return true.
472  * append: true if only zone append requests issued.
473  */
check_zoned_request(VirtIOBlock * s,int64_t offset,int64_t len,bool append,uint8_t * status)474 static bool check_zoned_request(VirtIOBlock *s, int64_t offset, int64_t len,
475                              bool append, uint8_t *status) {
476     BlockDriverState *bs = blk_bs(s->blk);
477     int index;
478 
479     if (!virtio_has_feature(s->host_features, VIRTIO_BLK_F_ZONED)) {
480         *status = VIRTIO_BLK_S_UNSUPP;
481         return false;
482     }
483 
484     if (offset < 0 || len < 0 || len > (bs->total_sectors << BDRV_SECTOR_BITS)
485         || offset > (bs->total_sectors << BDRV_SECTOR_BITS) - len) {
486         *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
487         return false;
488     }
489 
490     if (append) {
491         if (bs->bl.write_granularity) {
492             if ((offset % bs->bl.write_granularity) != 0) {
493                 *status = VIRTIO_BLK_S_ZONE_UNALIGNED_WP;
494                 return false;
495             }
496         }
497 
498         index = offset / bs->bl.zone_size;
499         if (BDRV_ZT_IS_CONV(bs->wps->wp[index])) {
500             *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
501             return false;
502         }
503 
504         if (len / 512 > bs->bl.max_append_sectors) {
505             if (bs->bl.max_append_sectors == 0) {
506                 *status = VIRTIO_BLK_S_UNSUPP;
507             } else {
508                 *status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
509             }
510             return false;
511         }
512     }
513     return true;
514 }
515 
virtio_blk_zone_report_complete(void * opaque,int ret)516 static void virtio_blk_zone_report_complete(void *opaque, int ret)
517 {
518     ZoneCmdData *data = opaque;
519     VirtIOBlockReq *req = data->req;
520     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
521     struct iovec *in_iov = data->in_iov;
522     unsigned in_num = data->in_num;
523     int64_t zrp_size, n, j = 0;
524     int64_t nz = data->zone_report_data.nr_zones;
525     int8_t err_status = VIRTIO_BLK_S_OK;
526     struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
527         .nr_zones = cpu_to_le64(nz),
528     };
529 
530     trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
531     if (ret) {
532         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
533         goto out;
534     }
535 
536     zrp_size = sizeof(struct virtio_blk_zone_report)
537                + sizeof(struct virtio_blk_zone_descriptor) * nz;
538     n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
539     if (n != sizeof(zrp_hdr)) {
540         virtio_error(vdev, "Driver provided input buffer that is too small!");
541         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
542         goto out;
543     }
544 
545     for (size_t i = sizeof(zrp_hdr); i < zrp_size;
546         i += sizeof(struct virtio_blk_zone_descriptor), ++j) {
547         struct virtio_blk_zone_descriptor desc =
548             (struct virtio_blk_zone_descriptor) {
549                 .z_start = cpu_to_le64(data->zone_report_data.zones[j].start
550                     >> BDRV_SECTOR_BITS),
551                 .z_cap = cpu_to_le64(data->zone_report_data.zones[j].cap
552                     >> BDRV_SECTOR_BITS),
553                 .z_wp = cpu_to_le64(data->zone_report_data.zones[j].wp
554                     >> BDRV_SECTOR_BITS),
555         };
556 
557         switch (data->zone_report_data.zones[j].type) {
558         case BLK_ZT_CONV:
559             desc.z_type = VIRTIO_BLK_ZT_CONV;
560             break;
561         case BLK_ZT_SWR:
562             desc.z_type = VIRTIO_BLK_ZT_SWR;
563             break;
564         case BLK_ZT_SWP:
565             desc.z_type = VIRTIO_BLK_ZT_SWP;
566             break;
567         default:
568             g_assert_not_reached();
569         }
570 
571         switch (data->zone_report_data.zones[j].state) {
572         case BLK_ZS_RDONLY:
573             desc.z_state = VIRTIO_BLK_ZS_RDONLY;
574             break;
575         case BLK_ZS_OFFLINE:
576             desc.z_state = VIRTIO_BLK_ZS_OFFLINE;
577             break;
578         case BLK_ZS_EMPTY:
579             desc.z_state = VIRTIO_BLK_ZS_EMPTY;
580             break;
581         case BLK_ZS_CLOSED:
582             desc.z_state = VIRTIO_BLK_ZS_CLOSED;
583             break;
584         case BLK_ZS_FULL:
585             desc.z_state = VIRTIO_BLK_ZS_FULL;
586             break;
587         case BLK_ZS_EOPEN:
588             desc.z_state = VIRTIO_BLK_ZS_EOPEN;
589             break;
590         case BLK_ZS_IOPEN:
591             desc.z_state = VIRTIO_BLK_ZS_IOPEN;
592             break;
593         case BLK_ZS_NOT_WP:
594             desc.z_state = VIRTIO_BLK_ZS_NOT_WP;
595             break;
596         default:
597             g_assert_not_reached();
598         }
599 
600         /* TODO: it takes O(n^2) time complexity. Optimizations required. */
601         n = iov_from_buf(in_iov, in_num, i, &desc, sizeof(desc));
602         if (n != sizeof(desc)) {
603             virtio_error(vdev, "Driver provided input buffer "
604                                "for descriptors that is too small!");
605             err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
606         }
607     }
608 
609 out:
610     virtio_blk_req_complete(req, err_status);
611     g_free(req);
612     g_free(data->zone_report_data.zones);
613     g_free(data);
614 }
615 
virtio_blk_handle_zone_report(VirtIOBlockReq * req,struct iovec * in_iov,unsigned in_num)616 static void virtio_blk_handle_zone_report(VirtIOBlockReq *req,
617                                          struct iovec *in_iov,
618                                          unsigned in_num)
619 {
620     VirtIOBlock *s = req->dev;
621     VirtIODevice *vdev = VIRTIO_DEVICE(s);
622     unsigned int nr_zones;
623     ZoneCmdData *data;
624     int64_t zone_size, offset;
625     uint8_t err_status;
626 
627     if (req->in_len < sizeof(struct virtio_blk_inhdr) +
628             sizeof(struct virtio_blk_zone_report) +
629             sizeof(struct virtio_blk_zone_descriptor)) {
630         virtio_error(vdev, "in buffer too small for zone report");
631         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
632         goto out;
633     }
634 
635     /* start byte offset of the zone report */
636     offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
637     if (!check_zoned_request(s, offset, 0, false, &err_status)) {
638         goto out;
639     }
640     nr_zones = (req->in_len - sizeof(struct virtio_blk_inhdr) -
641                 sizeof(struct virtio_blk_zone_report)) /
642                sizeof(struct virtio_blk_zone_descriptor);
643     trace_virtio_blk_handle_zone_report(vdev, req,
644                                         offset >> BDRV_SECTOR_BITS, nr_zones);
645 
646     zone_size = sizeof(BlockZoneDescriptor) * nr_zones;
647     data = g_malloc(sizeof(ZoneCmdData));
648     data->req = req;
649     data->in_iov = in_iov;
650     data->in_num = in_num;
651     data->zone_report_data.nr_zones = nr_zones;
652     data->zone_report_data.zones = g_malloc(zone_size),
653 
654     blk_aio_zone_report(s->blk, offset, &data->zone_report_data.nr_zones,
655                         data->zone_report_data.zones,
656                         virtio_blk_zone_report_complete, data);
657     return;
658 out:
659     virtio_blk_req_complete(req, err_status);
660     g_free(req);
661 }
662 
virtio_blk_zone_mgmt_complete(void * opaque,int ret)663 static void virtio_blk_zone_mgmt_complete(void *opaque, int ret)
664 {
665     VirtIOBlockReq *req = opaque;
666     VirtIOBlock *s = req->dev;
667     VirtIODevice *vdev = VIRTIO_DEVICE(s);
668     int8_t err_status = VIRTIO_BLK_S_OK;
669     trace_virtio_blk_zone_mgmt_complete(vdev, req,ret);
670 
671     if (ret) {
672         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
673     }
674 
675     virtio_blk_req_complete(req, err_status);
676     g_free(req);
677 }
678 
virtio_blk_handle_zone_mgmt(VirtIOBlockReq * req,BlockZoneOp op)679 static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op)
680 {
681     VirtIOBlock *s = req->dev;
682     VirtIODevice *vdev = VIRTIO_DEVICE(s);
683     BlockDriverState *bs = blk_bs(s->blk);
684     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
685     uint64_t len;
686     uint64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
687     uint8_t err_status = VIRTIO_BLK_S_OK;
688 
689     uint32_t type = virtio_ldl_p(vdev, &req->out.type);
690     if (type == VIRTIO_BLK_T_ZONE_RESET_ALL) {
691         /* Entire drive capacity */
692         offset = 0;
693         len = capacity;
694         trace_virtio_blk_handle_zone_reset_all(vdev, req, 0,
695                                                bs->total_sectors);
696     } else {
697         if (bs->bl.zone_size > capacity - offset) {
698             /* The zoned device allows the last smaller zone. */
699             len = capacity - bs->bl.zone_size * (bs->bl.nr_zones - 1ull);
700         } else {
701             len = bs->bl.zone_size;
702         }
703         trace_virtio_blk_handle_zone_mgmt(vdev, req, op,
704                                           offset >> BDRV_SECTOR_BITS,
705                                           len >> BDRV_SECTOR_BITS);
706     }
707 
708     if (!check_zoned_request(s, offset, len, false, &err_status)) {
709         goto out;
710     }
711 
712     blk_aio_zone_mgmt(s->blk, op, offset, len,
713                       virtio_blk_zone_mgmt_complete, req);
714 
715     return 0;
716 out:
717     virtio_blk_req_complete(req, err_status);
718     g_free(req);
719     return err_status;
720 }
721 
virtio_blk_zone_append_complete(void * opaque,int ret)722 static void virtio_blk_zone_append_complete(void *opaque, int ret)
723 {
724     ZoneCmdData *data = opaque;
725     VirtIOBlockReq *req = data->req;
726     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
727     int64_t append_sector, n;
728     uint8_t err_status = VIRTIO_BLK_S_OK;
729 
730     if (ret) {
731         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
732         goto out;
733     }
734 
735     virtio_stq_p(vdev, &append_sector,
736                  data->zone_append_data.offset >> BDRV_SECTOR_BITS);
737     n = iov_from_buf(data->in_iov, data->in_num, 0, &append_sector,
738                      sizeof(append_sector));
739     if (n != sizeof(append_sector)) {
740         virtio_error(vdev, "Driver provided input buffer less than size of "
741                            "append_sector");
742         err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD;
743         goto out;
744     }
745     trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret);
746 
747 out:
748     virtio_blk_req_complete(req, err_status);
749     g_free(req);
750     g_free(data);
751 }
752 
virtio_blk_handle_zone_append(VirtIOBlockReq * req,struct iovec * out_iov,struct iovec * in_iov,uint64_t out_num,unsigned in_num)753 static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
754                                          struct iovec *out_iov,
755                                          struct iovec *in_iov,
756                                          uint64_t out_num,
757                                          unsigned in_num) {
758     VirtIOBlock *s = req->dev;
759     VirtIODevice *vdev = VIRTIO_DEVICE(s);
760     uint8_t err_status = VIRTIO_BLK_S_OK;
761 
762     int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
763     int64_t len = iov_size(out_iov, out_num);
764     ZoneCmdData *data;
765 
766     trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
767     if (!check_zoned_request(s, offset, len, true, &err_status)) {
768         goto out;
769     }
770 
771     data = g_malloc(sizeof(ZoneCmdData));
772     data->req = req;
773     data->in_iov = in_iov;
774     data->in_num = in_num;
775     data->zone_append_data.offset = offset;
776     qemu_iovec_init_external(&req->qiov, out_iov, out_num);
777 
778     block_acct_start(blk_get_stats(s->blk), &req->acct, len,
779                      BLOCK_ACCT_ZONE_APPEND);
780 
781     blk_aio_zone_append(s->blk, &data->zone_append_data.offset, &req->qiov, 0,
782                         virtio_blk_zone_append_complete, data);
783     return 0;
784 
785 out:
786     virtio_blk_req_complete(req, err_status);
787     g_free(req);
788     return err_status;
789 }
790 
virtio_blk_handle_request(VirtIOBlockReq * req,MultiReqBuffer * mrb)791 static int virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
792 {
793     uint32_t type;
794     struct iovec *in_iov = req->elem.in_sg;
795     struct iovec *out_iov = req->elem.out_sg;
796     unsigned in_num = req->elem.in_num;
797     unsigned out_num = req->elem.out_num;
798     VirtIOBlock *s = req->dev;
799     VirtIODevice *vdev = VIRTIO_DEVICE(s);
800 
801     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
802         virtio_error(vdev, "virtio-blk missing headers");
803         return -1;
804     }
805 
806     if (unlikely(iov_to_buf(out_iov, out_num, 0, &req->out,
807                             sizeof(req->out)) != sizeof(req->out))) {
808         virtio_error(vdev, "virtio-blk request outhdr too short");
809         return -1;
810     }
811 
812     iov_discard_front_undoable(&out_iov, &out_num, sizeof(req->out),
813                                &req->outhdr_undo);
814 
815     if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
816         virtio_error(vdev, "virtio-blk request inhdr too short");
817         iov_discard_undo(&req->outhdr_undo);
818         return -1;
819     }
820 
821     /* We always touch the last byte, so just see how big in_iov is.  */
822     req->in_len = iov_size(in_iov, in_num);
823     req->in = (void *)in_iov[in_num - 1].iov_base
824               + in_iov[in_num - 1].iov_len
825               - sizeof(struct virtio_blk_inhdr);
826     iov_discard_back_undoable(in_iov, &in_num, sizeof(struct virtio_blk_inhdr),
827                               &req->inhdr_undo);
828 
829     type = virtio_ldl_p(vdev, &req->out.type);
830 
831     /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
832      * is an optional flag. Although a guest should not send this flag if
833      * not negotiated we ignored it in the past. So keep ignoring it. */
834     switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
835     case VIRTIO_BLK_T_IN:
836     {
837         bool is_write = type & VIRTIO_BLK_T_OUT;
838         req->sector_num = virtio_ldq_p(vdev, &req->out.sector);
839 
840         if (is_write) {
841             qemu_iovec_init_external(&req->qiov, out_iov, out_num);
842             trace_virtio_blk_handle_write(vdev, req, req->sector_num,
843                                           req->qiov.size / BDRV_SECTOR_SIZE);
844         } else {
845             qemu_iovec_init_external(&req->qiov, in_iov, in_num);
846             trace_virtio_blk_handle_read(vdev, req, req->sector_num,
847                                          req->qiov.size / BDRV_SECTOR_SIZE);
848         }
849 
850         if (!virtio_blk_sect_range_ok(s, req->sector_num, req->qiov.size)) {
851             virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
852             block_acct_invalid(blk_get_stats(s->blk),
853                                is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
854             g_free(req);
855             return 0;
856         }
857 
858         block_acct_start(blk_get_stats(s->blk), &req->acct, req->qiov.size,
859                          is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
860 
861         /* merge would exceed maximum number of requests or IO direction
862          * changes */
863         if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
864                                   is_write != mrb->is_write ||
865                                   !s->conf.request_merging)) {
866             virtio_blk_submit_multireq(s, mrb);
867         }
868 
869         assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
870         mrb->reqs[mrb->num_reqs++] = req;
871         mrb->is_write = is_write;
872         break;
873     }
874     case VIRTIO_BLK_T_FLUSH:
875         virtio_blk_handle_flush(req, mrb);
876         break;
877     case VIRTIO_BLK_T_ZONE_REPORT:
878         virtio_blk_handle_zone_report(req, in_iov, in_num);
879         break;
880     case VIRTIO_BLK_T_ZONE_OPEN:
881         virtio_blk_handle_zone_mgmt(req, BLK_ZO_OPEN);
882         break;
883     case VIRTIO_BLK_T_ZONE_CLOSE:
884         virtio_blk_handle_zone_mgmt(req, BLK_ZO_CLOSE);
885         break;
886     case VIRTIO_BLK_T_ZONE_FINISH:
887         virtio_blk_handle_zone_mgmt(req, BLK_ZO_FINISH);
888         break;
889     case VIRTIO_BLK_T_ZONE_RESET:
890         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
891         break;
892     case VIRTIO_BLK_T_ZONE_RESET_ALL:
893         virtio_blk_handle_zone_mgmt(req, BLK_ZO_RESET);
894         break;
895     case VIRTIO_BLK_T_SCSI_CMD:
896         virtio_blk_handle_scsi(req);
897         break;
898     case VIRTIO_BLK_T_GET_ID:
899     {
900         /*
901          * NB: per existing s/n string convention the string is
902          * terminated by '\0' only when shorter than buffer.
903          */
904         const char *serial = s->conf.serial ? s->conf.serial : "";
905         size_t size = MIN(strlen(serial) + 1,
906                           MIN(iov_size(in_iov, in_num),
907                               VIRTIO_BLK_ID_BYTES));
908         iov_from_buf(in_iov, in_num, 0, serial, size);
909         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
910         g_free(req);
911         break;
912     }
913     case VIRTIO_BLK_T_ZONE_APPEND & ~VIRTIO_BLK_T_OUT:
914         /*
915          * Passing out_iov/out_num and in_iov/in_num is not safe
916          * to access req->elem.out_sg directly because it may be
917          * modified by virtio_blk_handle_request().
918          */
919         virtio_blk_handle_zone_append(req, out_iov, in_iov, out_num, in_num);
920         break;
921     /*
922      * VIRTIO_BLK_T_DISCARD and VIRTIO_BLK_T_WRITE_ZEROES are defined with
923      * VIRTIO_BLK_T_OUT flag set. We masked this flag in the switch statement,
924      * so we must mask it for these requests, then we will check if it is set.
925      */
926     case VIRTIO_BLK_T_DISCARD & ~VIRTIO_BLK_T_OUT:
927     case VIRTIO_BLK_T_WRITE_ZEROES & ~VIRTIO_BLK_T_OUT:
928     {
929         struct virtio_blk_discard_write_zeroes dwz_hdr;
930         size_t out_len = iov_size(out_iov, out_num);
931         bool is_write_zeroes = (type & ~VIRTIO_BLK_T_BARRIER) ==
932                                VIRTIO_BLK_T_WRITE_ZEROES;
933         uint8_t err_status;
934 
935         /*
936          * Unsupported if VIRTIO_BLK_T_OUT is not set or the request contains
937          * more than one segment.
938          */
939         if (unlikely(!(type & VIRTIO_BLK_T_OUT) ||
940                      out_len > sizeof(dwz_hdr))) {
941             virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
942             g_free(req);
943             return 0;
944         }
945 
946         if (unlikely(iov_to_buf(out_iov, out_num, 0, &dwz_hdr,
947                                 sizeof(dwz_hdr)) != sizeof(dwz_hdr))) {
948             iov_discard_undo(&req->inhdr_undo);
949             iov_discard_undo(&req->outhdr_undo);
950             virtio_error(vdev, "virtio-blk discard/write_zeroes header"
951                          " too short");
952             return -1;
953         }
954 
955         err_status = virtio_blk_handle_discard_write_zeroes(req, &dwz_hdr,
956                                                             is_write_zeroes);
957         if (err_status != VIRTIO_BLK_S_OK) {
958             virtio_blk_req_complete(req, err_status);
959             g_free(req);
960         }
961 
962         break;
963     }
964     default:
965     {
966         /*
967          * Give subclasses a chance to handle unknown requests. This way the
968          * class lookup is not in the hot path.
969          */
970         VirtIOBlkClass *vbk = VIRTIO_BLK_GET_CLASS(s);
971         if (!vbk->handle_unknown_request ||
972             !vbk->handle_unknown_request(req, mrb, type)) {
973             virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
974             g_free(req);
975         }
976     }
977     }
978     return 0;
979 }
980 
virtio_blk_handle_vq(VirtIOBlock * s,VirtQueue * vq)981 void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
982 {
983     VirtIOBlockReq *req;
984     MultiReqBuffer mrb = {};
985     bool suppress_notifications = virtio_queue_get_notification(vq);
986 
987     defer_call_begin();
988 
989     do {
990         if (suppress_notifications) {
991             virtio_queue_set_notification(vq, 0);
992         }
993 
994         while ((req = virtio_blk_get_request(s, vq))) {
995             if (virtio_blk_handle_request(req, &mrb)) {
996                 virtqueue_detach_element(req->vq, &req->elem, 0);
997                 g_free(req);
998                 break;
999             }
1000         }
1001 
1002         if (suppress_notifications) {
1003             virtio_queue_set_notification(vq, 1);
1004         }
1005     } while (!virtio_queue_empty(vq));
1006 
1007     if (mrb.num_reqs) {
1008         virtio_blk_submit_multireq(s, &mrb);
1009     }
1010 
1011     defer_call_end();
1012 }
1013 
virtio_blk_handle_output(VirtIODevice * vdev,VirtQueue * vq)1014 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
1015 {
1016     VirtIOBlock *s = (VirtIOBlock *)vdev;
1017 
1018     if (!s->ioeventfd_disabled && !s->ioeventfd_started) {
1019         /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
1020          * ioeventfd here instead of waiting for .set_status().
1021          */
1022         virtio_device_start_ioeventfd(vdev);
1023         if (!s->ioeventfd_disabled) {
1024             return;
1025         }
1026     }
1027 
1028     virtio_blk_handle_vq(s, vq);
1029 }
1030 
virtio_blk_dma_restart_bh(void * opaque)1031 static void virtio_blk_dma_restart_bh(void *opaque)
1032 {
1033     VirtIOBlockReq *req = opaque;
1034     VirtIOBlock *s = req->dev; /* we're called with at least one request */
1035 
1036     MultiReqBuffer mrb = {};
1037 
1038     while (req) {
1039         VirtIOBlockReq *next = req->next;
1040         if (virtio_blk_handle_request(req, &mrb)) {
1041             /* Device is now broken and won't do any processing until it gets
1042              * reset. Already queued requests will be lost: let's purge them.
1043              */
1044             while (req) {
1045                 next = req->next;
1046                 virtqueue_detach_element(req->vq, &req->elem, 0);
1047                 g_free(req);
1048                 req = next;
1049             }
1050             break;
1051         }
1052         req = next;
1053     }
1054 
1055     if (mrb.num_reqs) {
1056         virtio_blk_submit_multireq(s, &mrb);
1057     }
1058 
1059     /* Paired with inc in virtio_blk_dma_restart_cb() */
1060     blk_dec_in_flight(s->conf.conf.blk);
1061 }
1062 
virtio_blk_dma_restart_cb(void * opaque,bool running,RunState state)1063 static void virtio_blk_dma_restart_cb(void *opaque, bool running,
1064                                       RunState state)
1065 {
1066     VirtIOBlock *s = opaque;
1067     uint16_t num_queues = s->conf.num_queues;
1068     g_autofree VirtIOBlockReq **vq_rq = NULL;
1069     VirtIOBlockReq *rq = NULL;
1070 
1071     if (!running) {
1072         return;
1073     }
1074 
1075     /* Split the device-wide s->rq request list into per-vq request lists */
1076     vq_rq = g_new0(VirtIOBlockReq *, num_queues);
1077 
1078     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1079         rq = s->rq;
1080         s->rq = NULL;
1081     }
1082 
1083     while (rq) {
1084         VirtIOBlockReq *next = rq->next;
1085         uint16_t idx = virtio_get_queue_index(rq->vq);
1086 
1087         /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
1088         assert(idx < num_queues);
1089         rq->next = vq_rq[idx];
1090         vq_rq[idx] = rq;
1091         rq = next;
1092     }
1093 
1094     /* Schedule a BH to submit the requests in each vq's AioContext */
1095     for (uint16_t i = 0; i < num_queues; i++) {
1096         if (!vq_rq[i]) {
1097             continue;
1098         }
1099 
1100         /* Paired with dec in virtio_blk_dma_restart_bh() */
1101         blk_inc_in_flight(s->conf.conf.blk);
1102 
1103         aio_bh_schedule_oneshot(s->vq_aio_context[i],
1104                                 virtio_blk_dma_restart_bh,
1105                                 vq_rq[i]);
1106     }
1107 }
1108 
virtio_blk_reset(VirtIODevice * vdev)1109 static void virtio_blk_reset(VirtIODevice *vdev)
1110 {
1111     VirtIOBlock *s = VIRTIO_BLK(vdev);
1112     VirtIOBlockReq *req;
1113 
1114     /* Dataplane has stopped... */
1115     assert(!s->ioeventfd_started);
1116 
1117     /* ...but requests may still be in flight. */
1118     blk_drain(s->blk);
1119 
1120     /* We drop queued requests after blk_drain() because blk_drain() itself can
1121      * produce them. */
1122     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1123         while (s->rq) {
1124             req = s->rq;
1125             s->rq = req->next;
1126 
1127             /* No other threads can access req->vq here */
1128             virtqueue_detach_element(req->vq, &req->elem, 0);
1129 
1130             g_free(req);
1131         }
1132     }
1133 
1134     blk_set_enable_write_cache(s->blk, s->original_wce);
1135 }
1136 
1137 /* coalesce internal state, copy to pci i/o region 0
1138  */
virtio_blk_update_config(VirtIODevice * vdev,uint8_t * config)1139 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
1140 {
1141     VirtIOBlock *s = VIRTIO_BLK(vdev);
1142     BlockConf *conf = &s->conf.conf;
1143     BlockDriverState *bs = blk_bs(s->blk);
1144     struct virtio_blk_config blkcfg;
1145     uint64_t capacity;
1146     int64_t length;
1147     int blk_size = conf->logical_block_size;
1148 
1149     blk_get_geometry(s->blk, &capacity);
1150     memset(&blkcfg, 0, sizeof(blkcfg));
1151     virtio_stq_p(vdev, &blkcfg.capacity, capacity);
1152     virtio_stl_p(vdev, &blkcfg.seg_max,
1153                  s->conf.seg_max_adjust ? s->conf.queue_size - 2 : 128 - 2);
1154     virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
1155     virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
1156     virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
1157     virtio_stl_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
1158     blkcfg.geometry.heads = conf->heads;
1159     /*
1160      * We must ensure that the block device capacity is a multiple of
1161      * the logical block size. If that is not the case, let's use
1162      * sector_mask to adopt the geometry to have a correct picture.
1163      * For those devices where the capacity is ok for the given geometry
1164      * we don't touch the sector value of the geometry, since some devices
1165      * (like s390 dasd) need a specific value. Here the capacity is already
1166      * cyls*heads*secs*blk_size and the sector value is not block size
1167      * divided by 512 - instead it is the amount of blk_size blocks
1168      * per track (cylinder).
1169      */
1170     length = blk_getlength(s->blk);
1171     if (length > 0 && length / conf->heads / conf->secs % blk_size) {
1172         blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
1173     } else {
1174         blkcfg.geometry.sectors = conf->secs;
1175     }
1176     blkcfg.size_max = 0;
1177     blkcfg.physical_block_exp = get_physical_block_exp(conf);
1178     blkcfg.alignment_offset = 0;
1179     blkcfg.wce = blk_enable_write_cache(s->blk);
1180     virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
1181     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD)) {
1182         uint32_t discard_granularity = conf->discard_granularity;
1183         if (discard_granularity == -1 || !s->conf.report_discard_granularity) {
1184             discard_granularity = blk_size;
1185         }
1186         virtio_stl_p(vdev, &blkcfg.max_discard_sectors,
1187                      s->conf.max_discard_sectors);
1188         virtio_stl_p(vdev, &blkcfg.discard_sector_alignment,
1189                      discard_granularity >> BDRV_SECTOR_BITS);
1190         /*
1191          * We support only one segment per request since multiple segments
1192          * are not widely used and there are no userspace APIs that allow
1193          * applications to submit multiple segments in a single call.
1194          */
1195         virtio_stl_p(vdev, &blkcfg.max_discard_seg, 1);
1196     }
1197     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES)) {
1198         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_sectors,
1199                      s->conf.max_write_zeroes_sectors);
1200         blkcfg.write_zeroes_may_unmap = 1;
1201         virtio_stl_p(vdev, &blkcfg.max_write_zeroes_seg, 1);
1202     }
1203     if (bs->bl.zoned != BLK_Z_NONE) {
1204         switch (bs->bl.zoned) {
1205         case BLK_Z_HM:
1206             blkcfg.zoned.model = VIRTIO_BLK_Z_HM;
1207             break;
1208         case BLK_Z_HA:
1209             blkcfg.zoned.model = VIRTIO_BLK_Z_HA;
1210             break;
1211         default:
1212             g_assert_not_reached();
1213         }
1214 
1215         virtio_stl_p(vdev, &blkcfg.zoned.zone_sectors,
1216                      bs->bl.zone_size / 512);
1217         virtio_stl_p(vdev, &blkcfg.zoned.max_active_zones,
1218                      bs->bl.max_active_zones);
1219         virtio_stl_p(vdev, &blkcfg.zoned.max_open_zones,
1220                      bs->bl.max_open_zones);
1221         virtio_stl_p(vdev, &blkcfg.zoned.write_granularity, blk_size);
1222         virtio_stl_p(vdev, &blkcfg.zoned.max_append_sectors,
1223                      bs->bl.max_append_sectors);
1224     } else {
1225         blkcfg.zoned.model = VIRTIO_BLK_Z_NONE;
1226     }
1227     memcpy(config, &blkcfg, s->config_size);
1228 }
1229 
virtio_blk_set_config(VirtIODevice * vdev,const uint8_t * config)1230 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
1231 {
1232     VirtIOBlock *s = VIRTIO_BLK(vdev);
1233     struct virtio_blk_config blkcfg;
1234 
1235     memcpy(&blkcfg, config, s->config_size);
1236 
1237     blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
1238 }
1239 
virtio_blk_get_features(VirtIODevice * vdev,uint64_t features,Error ** errp)1240 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
1241                                         Error **errp)
1242 {
1243     VirtIOBlock *s = VIRTIO_BLK(vdev);
1244 
1245     /* Firstly sync all virtio-blk possible supported features */
1246     features |= s->host_features;
1247 
1248     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
1249     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
1250     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
1251     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
1252     if (!virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
1253         virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
1254         /* Added for historical reasons, removing it could break migration.  */
1255         virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
1256     }
1257 
1258     if (blk_enable_write_cache(s->blk) ||
1259         (s->conf.x_enable_wce_if_config_wce &&
1260          virtio_has_feature(features, VIRTIO_BLK_F_CONFIG_WCE))) {
1261         virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
1262     }
1263     if (!blk_is_writable(s->blk)) {
1264         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
1265     }
1266     if (s->conf.num_queues > 1) {
1267         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
1268     }
1269 
1270     return features;
1271 }
1272 
virtio_blk_set_status(VirtIODevice * vdev,uint8_t status)1273 static int virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
1274 {
1275     VirtIOBlock *s = VIRTIO_BLK(vdev);
1276 
1277     if (!(status & (VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK))) {
1278         assert(!s->ioeventfd_started);
1279     }
1280 
1281     if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1282         return 0;
1283     }
1284 
1285     /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
1286      * cache flushes.  Thus, the "auto writethrough" behavior is never
1287      * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
1288      * Leaving it enabled would break the following sequence:
1289      *
1290      *     Guest started with "-drive cache=writethrough"
1291      *     Guest sets status to 0
1292      *     Guest sets DRIVER bit in status field
1293      *     Guest reads host features (WCE=0, CONFIG_WCE=1)
1294      *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
1295      *     Guest writes 1 to the WCE configuration field (writeback mode)
1296      *     Guest sets DRIVER_OK bit in status field
1297      *
1298      * s->blk would erroneously be placed in writethrough mode.
1299      */
1300     if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
1301         blk_set_enable_write_cache(s->blk,
1302                                    virtio_vdev_has_feature(vdev,
1303                                                            VIRTIO_BLK_F_WCE));
1304     }
1305     return 0;
1306 }
1307 
virtio_blk_save_device(VirtIODevice * vdev,QEMUFile * f)1308 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
1309 {
1310     VirtIOBlock *s = VIRTIO_BLK(vdev);
1311 
1312     WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1313         VirtIOBlockReq *req = s->rq;
1314 
1315         while (req) {
1316             qemu_put_sbyte(f, 1);
1317 
1318             if (s->conf.num_queues > 1) {
1319                 qemu_put_be32(f, virtio_get_queue_index(req->vq));
1320             }
1321 
1322             qemu_put_virtqueue_element(vdev, f, &req->elem);
1323             req = req->next;
1324         }
1325     }
1326 
1327     qemu_put_sbyte(f, 0);
1328 }
1329 
virtio_blk_load_device(VirtIODevice * vdev,QEMUFile * f,int version_id)1330 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
1331                                   int version_id)
1332 {
1333     VirtIOBlock *s = VIRTIO_BLK(vdev);
1334 
1335     while (qemu_get_sbyte(f)) {
1336         unsigned nvqs = s->conf.num_queues;
1337         unsigned vq_idx = 0;
1338         VirtIOBlockReq *req;
1339 
1340         if (nvqs > 1) {
1341             vq_idx = qemu_get_be32(f);
1342 
1343             if (vq_idx >= nvqs) {
1344                 error_report("Invalid virtqueue index in request list: %#x",
1345                              vq_idx);
1346                 return -EINVAL;
1347             }
1348         }
1349 
1350         req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
1351         virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
1352 
1353         WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
1354             req->next = s->rq;
1355             s->rq = req;
1356         }
1357     }
1358 
1359     return 0;
1360 }
1361 
virtio_resize_cb(void * opaque)1362 static void virtio_resize_cb(void *opaque)
1363 {
1364     VirtIODevice *vdev = opaque;
1365 
1366     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
1367     virtio_notify_config(vdev);
1368 }
1369 
virtio_blk_resize(void * opaque)1370 static void virtio_blk_resize(void *opaque)
1371 {
1372     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1373 
1374     /*
1375      * virtio_notify_config() needs to acquire the BQL,
1376      * so it can't be called from an iothread. Instead, schedule
1377      * it to be run in the main context BH.
1378      */
1379     aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
1380 }
1381 
virtio_blk_ioeventfd_detach(VirtIOBlock * s)1382 static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
1383 {
1384     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1385 
1386     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
1387         VirtQueue *vq = virtio_get_queue(vdev, i);
1388         virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
1389     }
1390 }
1391 
virtio_blk_ioeventfd_attach(VirtIOBlock * s)1392 static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
1393 {
1394     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1395 
1396     for (uint16_t i = 0; i < s->conf.num_queues; i++) {
1397         VirtQueue *vq = virtio_get_queue(vdev, i);
1398         virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
1399     }
1400 }
1401 
1402 /* Suspend virtqueue ioeventfd processing during drain */
virtio_blk_drained_begin(void * opaque)1403 static void virtio_blk_drained_begin(void *opaque)
1404 {
1405     VirtIOBlock *s = opaque;
1406 
1407     if (s->ioeventfd_started) {
1408         virtio_blk_ioeventfd_detach(s);
1409     }
1410 }
1411 
1412 /* Resume virtqueue ioeventfd processing after drain */
virtio_blk_drained_end(void * opaque)1413 static void virtio_blk_drained_end(void *opaque)
1414 {
1415     VirtIOBlock *s = opaque;
1416 
1417     if (s->ioeventfd_started) {
1418         virtio_blk_ioeventfd_attach(s);
1419     }
1420 }
1421 
1422 static const BlockDevOps virtio_block_ops = {
1423     .resize_cb     = virtio_blk_resize,
1424     .drained_begin = virtio_blk_drained_begin,
1425     .drained_end   = virtio_blk_drained_end,
1426 };
1427 
1428 /* Context: BQL held */
virtio_blk_vq_aio_context_init(VirtIOBlock * s,Error ** errp)1429 static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
1430 {
1431     ERRP_GUARD();
1432     VirtIODevice *vdev = VIRTIO_DEVICE(s);
1433     VirtIOBlkConf *conf = &s->conf;
1434     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1435     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1436 
1437     if (conf->iothread && conf->iothread_vq_mapping_list) {
1438         error_setg(errp,
1439                    "iothread and iothread-vq-mapping properties cannot be set "
1440                    "at the same time");
1441         return false;
1442     }
1443 
1444     if (conf->iothread || conf->iothread_vq_mapping_list) {
1445         if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
1446             error_setg(errp,
1447                        "device is incompatible with iothread "
1448                        "(transport does not support notifiers)");
1449             return false;
1450         }
1451         if (!virtio_device_ioeventfd_enabled(vdev)) {
1452             error_setg(errp, "ioeventfd is required for iothread");
1453             return false;
1454         }
1455     }
1456 
1457     s->vq_aio_context = g_new(AioContext *, conf->num_queues);
1458 
1459     if (conf->iothread_vq_mapping_list) {
1460         if (!iothread_vq_mapping_apply(conf->iothread_vq_mapping_list,
1461                                        s->vq_aio_context,
1462                                        conf->num_queues,
1463                                        errp)) {
1464             g_free(s->vq_aio_context);
1465             s->vq_aio_context = NULL;
1466             return false;
1467         }
1468     } else if (conf->iothread) {
1469         AioContext *ctx = iothread_get_aio_context(conf->iothread);
1470         for (unsigned i = 0; i < conf->num_queues; i++) {
1471             s->vq_aio_context[i] = ctx;
1472         }
1473 
1474         /* Released in virtio_blk_vq_aio_context_cleanup() */
1475         object_ref(OBJECT(conf->iothread));
1476     } else {
1477         AioContext *ctx = qemu_get_aio_context();
1478         for (unsigned i = 0; i < conf->num_queues; i++) {
1479             s->vq_aio_context[i] = ctx;
1480         }
1481     }
1482 
1483     return true;
1484 }
1485 
1486 /* Context: BQL held */
virtio_blk_vq_aio_context_cleanup(VirtIOBlock * s)1487 static void virtio_blk_vq_aio_context_cleanup(VirtIOBlock *s)
1488 {
1489     VirtIOBlkConf *conf = &s->conf;
1490 
1491     assert(!s->ioeventfd_started);
1492 
1493     if (conf->iothread_vq_mapping_list) {
1494         iothread_vq_mapping_cleanup(conf->iothread_vq_mapping_list);
1495     }
1496 
1497     if (conf->iothread) {
1498         object_unref(OBJECT(conf->iothread));
1499     }
1500 
1501     g_free(s->vq_aio_context);
1502     s->vq_aio_context = NULL;
1503 }
1504 
1505 /* Context: BQL held */
virtio_blk_start_ioeventfd(VirtIODevice * vdev)1506 static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
1507 {
1508     VirtIOBlock *s = VIRTIO_BLK(vdev);
1509     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
1510     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1511     unsigned i;
1512     unsigned nvqs = s->conf.num_queues;
1513     Error *local_err = NULL;
1514     int r;
1515 
1516     if (s->ioeventfd_started || s->ioeventfd_starting) {
1517         return 0;
1518     }
1519 
1520     s->ioeventfd_starting = true;
1521 
1522     /* Set up guest notifier (irq) */
1523     r = k->set_guest_notifiers(qbus->parent, nvqs, true);
1524     if (r != 0) {
1525         error_report("virtio-blk failed to set guest notifier (%d), "
1526                      "ensure -accel kvm is set.", r);
1527         goto fail_guest_notifiers;
1528     }
1529 
1530     /*
1531      * Batch all the host notifiers in a single transaction to avoid
1532      * quadratic time complexity in address_space_update_ioeventfds().
1533      */
1534     memory_region_transaction_begin();
1535 
1536     /* Set up virtqueue notify */
1537     for (i = 0; i < nvqs; i++) {
1538         r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
1539         if (r != 0) {
1540             int j = i;
1541 
1542             fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
1543             while (i--) {
1544                 virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
1545             }
1546 
1547             /*
1548              * The transaction expects the ioeventfds to be open when it
1549              * commits. Do it now, before the cleanup loop.
1550              */
1551             memory_region_transaction_commit();
1552 
1553             while (j--) {
1554                 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), j);
1555             }
1556             goto fail_host_notifiers;
1557         }
1558     }
1559 
1560     memory_region_transaction_commit();
1561 
1562     /*
1563      * Try to change the AioContext so that block jobs and other operations can
1564      * co-locate their activity in the same AioContext. If it fails, nevermind.
1565      */
1566     assert(nvqs > 0); /* enforced during ->realize() */
1567     r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
1568                             &local_err);
1569     if (r < 0) {
1570         warn_report_err(local_err);
1571     }
1572 
1573     /*
1574      * These fields must be visible to the IOThread when it processes the
1575      * virtqueue, otherwise it will think ioeventfd has not started yet.
1576      *
1577      * Make sure ->ioeventfd_started is false when blk_set_aio_context() is
1578      * called above so that draining does not cause the host notifier to be
1579      * detached/attached prematurely.
1580      */
1581     s->ioeventfd_starting = false;
1582     s->ioeventfd_started = true;
1583     smp_wmb(); /* paired with aio_notify_accept() on the read side */
1584 
1585     /*
1586      * Get this show started by hooking up our callbacks.  If drained now,
1587      * virtio_blk_drained_end() will do this later.
1588      * Attaching the notifier also kicks the virtqueues, processing any requests
1589      * they may already have.
1590      */
1591     if (!blk_in_drain(s->conf.conf.blk)) {
1592         virtio_blk_ioeventfd_attach(s);
1593     }
1594     return 0;
1595 
1596   fail_host_notifiers:
1597     k->set_guest_notifiers(qbus->parent, nvqs, false);
1598   fail_guest_notifiers:
1599     s->ioeventfd_disabled = true;
1600     s->ioeventfd_starting = false;
1601     return -ENOSYS;
1602 }
1603 
1604 /* Stop notifications for new requests from guest.
1605  *
1606  * Context: BH in IOThread
1607  */
virtio_blk_ioeventfd_stop_vq_bh(void * opaque)1608 static void virtio_blk_ioeventfd_stop_vq_bh(void *opaque)
1609 {
1610     VirtQueue *vq = opaque;
1611     EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq);
1612 
1613     virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context());
1614 
1615     /*
1616      * Test and clear notifier after disabling event, in case poll callback
1617      * didn't have time to run.
1618      */
1619     virtio_queue_host_notifier_read(host_notifier);
1620 }
1621 
1622 /* Context: BQL held */
virtio_blk_stop_ioeventfd(VirtIODevice * vdev)1623 static void virtio_blk_stop_ioeventfd(VirtIODevice *vdev)
1624 {
1625     VirtIOBlock *s = VIRTIO_BLK(vdev);
1626     BusState *qbus = qdev_get_parent_bus(DEVICE(s));
1627     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1628     unsigned i;
1629     unsigned nvqs = s->conf.num_queues;
1630 
1631     if (!s->ioeventfd_started || s->ioeventfd_stopping) {
1632         return;
1633     }
1634 
1635     /* Better luck next time. */
1636     if (s->ioeventfd_disabled) {
1637         s->ioeventfd_disabled = false;
1638         s->ioeventfd_started = false;
1639         return;
1640     }
1641     s->ioeventfd_stopping = true;
1642 
1643     if (!blk_in_drain(s->conf.conf.blk)) {
1644         for (i = 0; i < nvqs; i++) {
1645             VirtQueue *vq = virtio_get_queue(vdev, i);
1646             AioContext *ctx = s->vq_aio_context[i];
1647 
1648             aio_wait_bh_oneshot(ctx, virtio_blk_ioeventfd_stop_vq_bh, vq);
1649         }
1650     }
1651 
1652     /*
1653      * Batch all the host notifiers in a single transaction to avoid
1654      * quadratic time complexity in address_space_update_ioeventfds().
1655      */
1656     memory_region_transaction_begin();
1657 
1658     for (i = 0; i < nvqs; i++) {
1659         virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
1660     }
1661 
1662     /*
1663      * The transaction expects the ioeventfds to be open when it
1664      * commits. Do it now, before the cleanup loop.
1665      */
1666     memory_region_transaction_commit();
1667 
1668     for (i = 0; i < nvqs; i++) {
1669         virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), i);
1670     }
1671 
1672     /*
1673      * Set ->ioeventfd_started to false before draining so that host notifiers
1674      * are not detached/attached anymore.
1675      */
1676     s->ioeventfd_started = false;
1677 
1678     /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */
1679     blk_drain(s->conf.conf.blk);
1680 
1681     /*
1682      * Try to switch bs back to the QEMU main loop. If other users keep the
1683      * BlockBackend in the iothread, that's ok
1684      */
1685     blk_set_aio_context(s->conf.conf.blk, qemu_get_aio_context(), NULL);
1686 
1687     /* Clean up guest notifier (irq) */
1688     k->set_guest_notifiers(qbus->parent, nvqs, false);
1689 
1690     s->ioeventfd_stopping = false;
1691 }
1692 
virtio_blk_device_realize(DeviceState * dev,Error ** errp)1693 static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
1694 {
1695     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1696     VirtIOBlock *s = VIRTIO_BLK(dev);
1697     VirtIOBlkConf *conf = &s->conf;
1698     BlockDriverState *bs;
1699     Error *err = NULL;
1700     unsigned i;
1701 
1702     if (!conf->conf.blk) {
1703         error_setg(errp, "drive property not set");
1704         return;
1705     }
1706     if (!blk_is_inserted(conf->conf.blk)) {
1707         error_setg(errp, "Device needs media, but drive is empty");
1708         return;
1709     }
1710     if (conf->num_queues == VIRTIO_BLK_AUTO_NUM_QUEUES) {
1711         conf->num_queues = 1;
1712     }
1713     if (!conf->num_queues) {
1714         error_setg(errp, "num-queues property must be larger than 0");
1715         return;
1716     }
1717     if (conf->queue_size <= 2) {
1718         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
1719                    "must be > 2", conf->queue_size);
1720         return;
1721     }
1722     if (!is_power_of_2(conf->queue_size) ||
1723         conf->queue_size > VIRTQUEUE_MAX_SIZE) {
1724         error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
1725                    "must be a power of 2 (max %d)",
1726                    conf->queue_size, VIRTQUEUE_MAX_SIZE);
1727         return;
1728     }
1729 
1730     if (!blkconf_apply_backend_options(&conf->conf,
1731                                        !blk_supports_write_perm(conf->conf.blk),
1732                                        true, errp)) {
1733         return;
1734     }
1735     s->original_wce = blk_enable_write_cache(conf->conf.blk);
1736     if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
1737         return;
1738     }
1739 
1740     if (!blkconf_blocksizes(&conf->conf, errp)) {
1741         return;
1742     }
1743 
1744     bs = blk_bs(conf->conf.blk);
1745     if (bs->bl.zoned != BLK_Z_NONE) {
1746         virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
1747         if (bs->bl.zoned == BLK_Z_HM) {
1748             virtio_clear_feature(&s->host_features, VIRTIO_BLK_F_DISCARD);
1749         }
1750     }
1751 
1752     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_DISCARD) &&
1753         (!conf->max_discard_sectors ||
1754          conf->max_discard_sectors > BDRV_REQUEST_MAX_SECTORS)) {
1755         error_setg(errp, "invalid max-discard-sectors property (%" PRIu32 ")"
1756                    ", must be between 1 and %d",
1757                    conf->max_discard_sectors, (int)BDRV_REQUEST_MAX_SECTORS);
1758         return;
1759     }
1760 
1761     if (virtio_has_feature(s->host_features, VIRTIO_BLK_F_WRITE_ZEROES) &&
1762         (!conf->max_write_zeroes_sectors ||
1763          conf->max_write_zeroes_sectors > BDRV_REQUEST_MAX_SECTORS)) {
1764         error_setg(errp, "invalid max-write-zeroes-sectors property (%" PRIu32
1765                    "), must be between 1 and %d",
1766                    conf->max_write_zeroes_sectors,
1767                    (int)BDRV_REQUEST_MAX_SECTORS);
1768         return;
1769     }
1770 
1771     s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
1772                                             s->host_features);
1773     virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
1774 
1775     qemu_mutex_init(&s->rq_lock);
1776 
1777     s->blk = conf->conf.blk;
1778     s->rq = NULL;
1779     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
1780 
1781     for (i = 0; i < conf->num_queues; i++) {
1782         virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
1783     }
1784     qemu_coroutine_inc_pool_size(conf->num_queues * conf->queue_size / 2);
1785 
1786     /* Don't start ioeventfd if transport does not support notifiers. */
1787     if (!virtio_device_ioeventfd_enabled(vdev)) {
1788         s->ioeventfd_disabled = true;
1789     }
1790 
1791     virtio_blk_vq_aio_context_init(s, &err);
1792     if (err != NULL) {
1793         error_propagate(errp, err);
1794         for (i = 0; i < conf->num_queues; i++) {
1795             virtio_del_queue(vdev, i);
1796         }
1797         virtio_cleanup(vdev);
1798         return;
1799     }
1800 
1801     /*
1802      * This must be after virtio_init() so virtio_blk_dma_restart_cb() gets
1803      * called after ->start_ioeventfd() has already set blk's AioContext.
1804      */
1805     s->change =
1806         qdev_add_vm_change_state_handler(dev, virtio_blk_dma_restart_cb, NULL, s);
1807 
1808     blk_ram_registrar_init(&s->blk_ram_registrar, s->blk);
1809     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
1810 
1811     blk_iostatus_enable(s->blk);
1812 
1813     add_boot_device_lchs(dev, "/disk@0,0",
1814                          conf->conf.lcyls,
1815                          conf->conf.lheads,
1816                          conf->conf.lsecs);
1817 }
1818 
virtio_blk_device_unrealize(DeviceState * dev)1819 static void virtio_blk_device_unrealize(DeviceState *dev)
1820 {
1821     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1822     VirtIOBlock *s = VIRTIO_BLK(dev);
1823     VirtIOBlkConf *conf = &s->conf;
1824     unsigned i;
1825 
1826     blk_drain(s->blk);
1827     del_boot_device_lchs(dev, "/disk@0,0");
1828     virtio_blk_vq_aio_context_cleanup(s);
1829     for (i = 0; i < conf->num_queues; i++) {
1830         virtio_del_queue(vdev, i);
1831     }
1832     qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2);
1833     qemu_mutex_destroy(&s->rq_lock);
1834     blk_ram_registrar_destroy(&s->blk_ram_registrar);
1835     qemu_del_vm_change_state_handler(s->change);
1836     blockdev_mark_auto_del(s->blk);
1837     virtio_cleanup(vdev);
1838 }
1839 
virtio_blk_instance_init(Object * obj)1840 static void virtio_blk_instance_init(Object *obj)
1841 {
1842     VirtIOBlock *s = VIRTIO_BLK(obj);
1843 
1844     device_add_bootindex_property(obj, &s->conf.conf.bootindex,
1845                                   "bootindex", "/disk@0,0",
1846                                   DEVICE(obj));
1847 }
1848 
1849 static const VMStateDescription vmstate_virtio_blk = {
1850     .name = "virtio-blk",
1851     .minimum_version_id = 2,
1852     .version_id = 2,
1853     .fields = (const VMStateField[]) {
1854         VMSTATE_VIRTIO_DEVICE,
1855         VMSTATE_END_OF_LIST()
1856     },
1857 };
1858 
1859 static const Property virtio_blk_properties[] = {
1860     DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
1861     DEFINE_BLOCK_ERROR_PROPERTIES(VirtIOBlock, conf.conf),
1862     DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
1863     DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
1864     DEFINE_PROP_BIT64("config-wce", VirtIOBlock, host_features,
1865                       VIRTIO_BLK_F_CONFIG_WCE, true),
1866     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
1867                     true),
1868     DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues,
1869                        VIRTIO_BLK_AUTO_NUM_QUEUES),
1870     DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
1871     DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
1872     DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
1873                      IOThread *),
1874     DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock,
1875                                          conf.iothread_vq_mapping_list),
1876     DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features,
1877                       VIRTIO_BLK_F_DISCARD, true),
1878     DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock,
1879                      conf.report_discard_granularity, true),
1880     DEFINE_PROP_BIT64("write-zeroes", VirtIOBlock, host_features,
1881                       VIRTIO_BLK_F_WRITE_ZEROES, true),
1882     DEFINE_PROP_UINT32("max-discard-sectors", VirtIOBlock,
1883                        conf.max_discard_sectors, BDRV_REQUEST_MAX_SECTORS),
1884     DEFINE_PROP_UINT32("max-write-zeroes-sectors", VirtIOBlock,
1885                        conf.max_write_zeroes_sectors, BDRV_REQUEST_MAX_SECTORS),
1886     DEFINE_PROP_BOOL("x-enable-wce-if-config-wce", VirtIOBlock,
1887                      conf.x_enable_wce_if_config_wce, true),
1888 };
1889 
virtio_blk_class_init(ObjectClass * klass,const void * data)1890 static void virtio_blk_class_init(ObjectClass *klass, const void *data)
1891 {
1892     DeviceClass *dc = DEVICE_CLASS(klass);
1893     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1894 
1895     device_class_set_props(dc, virtio_blk_properties);
1896     dc->vmsd = &vmstate_virtio_blk;
1897     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1898     vdc->realize = virtio_blk_device_realize;
1899     vdc->unrealize = virtio_blk_device_unrealize;
1900     vdc->get_config = virtio_blk_update_config;
1901     vdc->set_config = virtio_blk_set_config;
1902     vdc->get_features = virtio_blk_get_features;
1903     vdc->set_status = virtio_blk_set_status;
1904     vdc->reset = virtio_blk_reset;
1905     vdc->save = virtio_blk_save_device;
1906     vdc->load = virtio_blk_load_device;
1907     vdc->start_ioeventfd = virtio_blk_start_ioeventfd;
1908     vdc->stop_ioeventfd = virtio_blk_stop_ioeventfd;
1909 }
1910 
1911 static const TypeInfo virtio_blk_info = {
1912     .name = TYPE_VIRTIO_BLK,
1913     .parent = TYPE_VIRTIO_DEVICE,
1914     .instance_size = sizeof(VirtIOBlock),
1915     .instance_init = virtio_blk_instance_init,
1916     .class_init = virtio_blk_class_init,
1917     .class_size = sizeof(VirtIOBlkClass),
1918 };
1919 
virtio_register_types(void)1920 static void virtio_register_types(void)
1921 {
1922     type_register_static(&virtio_blk_info);
1923 }
1924 
1925 type_init(virtio_register_types)
1926