xref: /qemu/block/io.c (revision 73d29ea2417b58ca55fba1aa468ba38e3607b583)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "system/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "system/replay.h"
41 
42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44 
45 static void coroutine_fn GRAPH_RDLOCK
46 bdrv_parent_cb_resize(BlockDriverState *bs);
47 
48 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
49     int64_t offset, int64_t bytes, BdrvRequestFlags flags);
50 
51 static void GRAPH_RDLOCK
bdrv_parent_drained_begin(BlockDriverState * bs,BdrvChild * ignore)52 bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
53 {
54     BdrvChild *c, *next;
55     IO_OR_GS_CODE();
56     assert_bdrv_graph_readable();
57 
58     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
59         if (c == ignore) {
60             continue;
61         }
62         bdrv_parent_drained_begin_single(c);
63     }
64 }
65 
bdrv_parent_drained_end_single(BdrvChild * c)66 void bdrv_parent_drained_end_single(BdrvChild *c)
67 {
68     GLOBAL_STATE_CODE();
69 
70     assert(c->quiesced_parent);
71     c->quiesced_parent = false;
72 
73     if (c->klass->drained_end) {
74         c->klass->drained_end(c);
75     }
76 }
77 
78 static void GRAPH_RDLOCK
bdrv_parent_drained_end(BlockDriverState * bs,BdrvChild * ignore)79 bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
80 {
81     BdrvChild *c;
82     IO_OR_GS_CODE();
83     assert_bdrv_graph_readable();
84 
85     QLIST_FOREACH(c, &bs->parents, next_parent) {
86         if (c == ignore) {
87             continue;
88         }
89         bdrv_parent_drained_end_single(c);
90     }
91 }
92 
bdrv_parent_drained_poll_single(BdrvChild * c)93 bool bdrv_parent_drained_poll_single(BdrvChild *c)
94 {
95     IO_OR_GS_CODE();
96 
97     if (c->klass->drained_poll) {
98         return c->klass->drained_poll(c);
99     }
100     return false;
101 }
102 
103 static bool GRAPH_RDLOCK
bdrv_parent_drained_poll(BlockDriverState * bs,BdrvChild * ignore,bool ignore_bds_parents)104 bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
105                          bool ignore_bds_parents)
106 {
107     BdrvChild *c, *next;
108     bool busy = false;
109     IO_OR_GS_CODE();
110     assert_bdrv_graph_readable();
111 
112     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
113         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
114             continue;
115         }
116         busy |= bdrv_parent_drained_poll_single(c);
117     }
118 
119     return busy;
120 }
121 
bdrv_parent_drained_begin_single(BdrvChild * c)122 void bdrv_parent_drained_begin_single(BdrvChild *c)
123 {
124     GLOBAL_STATE_CODE();
125 
126     assert(!c->quiesced_parent);
127     c->quiesced_parent = true;
128 
129     if (c->klass->drained_begin) {
130         /* called with rdlock taken, but it doesn't really need it. */
131         c->klass->drained_begin(c);
132     }
133 }
134 
bdrv_merge_limits(BlockLimits * dst,const BlockLimits * src)135 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
136 {
137     dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
138                                   src->pdiscard_alignment);
139     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
140     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
141     dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
142                                         src->max_hw_transfer);
143     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
144                                  src->opt_mem_alignment);
145     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
146                                  src->min_mem_alignment);
147     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
148     dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
149 }
150 
151 typedef struct BdrvRefreshLimitsState {
152     BlockDriverState *bs;
153     BlockLimits old_bl;
154 } BdrvRefreshLimitsState;
155 
bdrv_refresh_limits_abort(void * opaque)156 static void bdrv_refresh_limits_abort(void *opaque)
157 {
158     BdrvRefreshLimitsState *s = opaque;
159 
160     s->bs->bl = s->old_bl;
161 }
162 
163 static TransactionActionDrv bdrv_refresh_limits_drv = {
164     .abort = bdrv_refresh_limits_abort,
165     .clean = g_free,
166 };
167 
168 /* @tran is allowed to be NULL, in this case no rollback is possible. */
bdrv_refresh_limits(BlockDriverState * bs,Transaction * tran,Error ** errp)169 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
170 {
171     ERRP_GUARD();
172     BlockDriver *drv = bs->drv;
173     BdrvChild *c;
174     bool have_limits;
175 
176     GLOBAL_STATE_CODE();
177 
178     if (tran) {
179         BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
180         *s = (BdrvRefreshLimitsState) {
181             .bs = bs,
182             .old_bl = bs->bl,
183         };
184         tran_add(tran, &bdrv_refresh_limits_drv, s);
185     }
186 
187     memset(&bs->bl, 0, sizeof(bs->bl));
188 
189     if (!drv) {
190         return;
191     }
192 
193     /* Default alignment based on whether driver has byte interface */
194     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
195                                 drv->bdrv_aio_preadv ||
196                                 drv->bdrv_co_preadv_part) ? 1 : 512;
197 
198     /* Take some limits from the children as a default */
199     have_limits = false;
200     QLIST_FOREACH(c, &bs->children, next) {
201         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
202         {
203             bdrv_merge_limits(&bs->bl, &c->bs->bl);
204             have_limits = true;
205         }
206 
207         if (c->role & BDRV_CHILD_FILTERED) {
208             bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
209         }
210     }
211 
212     if (!have_limits) {
213         bs->bl.min_mem_alignment = 512;
214         bs->bl.opt_mem_alignment = qemu_real_host_page_size();
215 
216         /* Safe default since most protocols use readv()/writev()/etc */
217         bs->bl.max_iov = IOV_MAX;
218     }
219 
220     /* Then let the driver override it */
221     if (drv->bdrv_refresh_limits) {
222         drv->bdrv_refresh_limits(bs, errp);
223         if (*errp) {
224             return;
225         }
226     }
227 
228     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
229         error_setg(errp, "Driver requires too large request alignment");
230     }
231 }
232 
233 /**
234  * The copy-on-read flag is actually a reference count so multiple users may
235  * use the feature without worrying about clobbering its previous state.
236  * Copy-on-read stays enabled until all users have called to disable it.
237  */
bdrv_enable_copy_on_read(BlockDriverState * bs)238 void bdrv_enable_copy_on_read(BlockDriverState *bs)
239 {
240     IO_CODE();
241     qatomic_inc(&bs->copy_on_read);
242 }
243 
bdrv_disable_copy_on_read(BlockDriverState * bs)244 void bdrv_disable_copy_on_read(BlockDriverState *bs)
245 {
246     int old = qatomic_fetch_dec(&bs->copy_on_read);
247     IO_CODE();
248     assert(old >= 1);
249 }
250 
251 typedef struct {
252     Coroutine *co;
253     BlockDriverState *bs;
254     bool done;
255     bool begin;
256     bool poll;
257     BdrvChild *parent;
258 } BdrvCoDrainData;
259 
260 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
bdrv_drain_poll(BlockDriverState * bs,BdrvChild * ignore_parent,bool ignore_bds_parents)261 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
262                      bool ignore_bds_parents)
263 {
264     GLOBAL_STATE_CODE();
265 
266     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
267         return true;
268     }
269 
270     if (qatomic_read(&bs->in_flight)) {
271         return true;
272     }
273 
274     return false;
275 }
276 
bdrv_drain_poll_top_level(BlockDriverState * bs,BdrvChild * ignore_parent)277 static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
278                                       BdrvChild *ignore_parent)
279 {
280     GLOBAL_STATE_CODE();
281     GRAPH_RDLOCK_GUARD_MAINLOOP();
282 
283     return bdrv_drain_poll(bs, ignore_parent, false);
284 }
285 
286 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
287                                   bool poll);
288 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
289 
bdrv_co_drain_bh_cb(void * opaque)290 static void bdrv_co_drain_bh_cb(void *opaque)
291 {
292     BdrvCoDrainData *data = opaque;
293     Coroutine *co = data->co;
294     BlockDriverState *bs = data->bs;
295 
296     if (bs) {
297         bdrv_dec_in_flight(bs);
298         if (data->begin) {
299             bdrv_do_drained_begin(bs, data->parent, data->poll);
300         } else {
301             assert(!data->poll);
302             bdrv_do_drained_end(bs, data->parent);
303         }
304     } else {
305         assert(data->begin);
306         bdrv_drain_all_begin();
307     }
308 
309     data->done = true;
310     aio_co_wake(co);
311 }
312 
bdrv_co_yield_to_drain(BlockDriverState * bs,bool begin,BdrvChild * parent,bool poll)313 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
314                                                 bool begin,
315                                                 BdrvChild *parent,
316                                                 bool poll)
317 {
318     BdrvCoDrainData data;
319     Coroutine *self = qemu_coroutine_self();
320 
321     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
322      * other coroutines run if they were queued by aio_co_enter(). */
323 
324     assert(qemu_in_coroutine());
325     data = (BdrvCoDrainData) {
326         .co = self,
327         .bs = bs,
328         .done = false,
329         .begin = begin,
330         .parent = parent,
331         .poll = poll,
332     };
333 
334     if (bs) {
335         bdrv_inc_in_flight(bs);
336     }
337 
338     replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
339                                      bdrv_co_drain_bh_cb, &data);
340 
341     qemu_coroutine_yield();
342     /* If we are resumed from some other event (such as an aio completion or a
343      * timer callback), it is a bug in the caller that should be fixed. */
344     assert(data.done);
345 }
346 
bdrv_do_drained_begin(BlockDriverState * bs,BdrvChild * parent,bool poll)347 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
348                                   bool poll)
349 {
350     IO_OR_GS_CODE();
351 
352     if (qemu_in_coroutine()) {
353         bdrv_co_yield_to_drain(bs, true, parent, poll);
354         return;
355     }
356 
357     GLOBAL_STATE_CODE();
358 
359     /* Stop things in parent-to-child order */
360     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
361         GRAPH_RDLOCK_GUARD_MAINLOOP();
362         bdrv_parent_drained_begin(bs, parent);
363         if (bs->drv && bs->drv->bdrv_drain_begin) {
364             bs->drv->bdrv_drain_begin(bs);
365         }
366     }
367 
368     /*
369      * Wait for drained requests to finish.
370      *
371      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
372      * call is needed so things in this AioContext can make progress even
373      * though we don't return to the main AioContext loop - this automatically
374      * includes other nodes in the same AioContext and therefore all child
375      * nodes.
376      */
377     if (poll) {
378         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
379     }
380 }
381 
bdrv_do_drained_begin_quiesce(BlockDriverState * bs,BdrvChild * parent)382 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
383 {
384     bdrv_do_drained_begin(bs, parent, false);
385 }
386 
387 void coroutine_mixed_fn
bdrv_drained_begin(BlockDriverState * bs)388 bdrv_drained_begin(BlockDriverState *bs)
389 {
390     IO_OR_GS_CODE();
391     bdrv_do_drained_begin(bs, NULL, true);
392 }
393 
394 /**
395  * This function does not poll, nor must any of its recursively called
396  * functions.
397  */
bdrv_do_drained_end(BlockDriverState * bs,BdrvChild * parent)398 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
399 {
400     int old_quiesce_counter;
401 
402     IO_OR_GS_CODE();
403 
404     if (qemu_in_coroutine()) {
405         bdrv_co_yield_to_drain(bs, false, parent, false);
406         return;
407     }
408 
409     /* At this point, we should be always running in the main loop. */
410     GLOBAL_STATE_CODE();
411     assert(bs->quiesce_counter > 0);
412     GLOBAL_STATE_CODE();
413 
414     /* Re-enable things in child-to-parent order */
415     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
416     if (old_quiesce_counter == 1) {
417         GRAPH_RDLOCK_GUARD_MAINLOOP();
418         if (bs->drv && bs->drv->bdrv_drain_end) {
419             bs->drv->bdrv_drain_end(bs);
420         }
421         bdrv_parent_drained_end(bs, parent);
422     }
423 }
424 
bdrv_drained_end(BlockDriverState * bs)425 void bdrv_drained_end(BlockDriverState *bs)
426 {
427     IO_OR_GS_CODE();
428     bdrv_do_drained_end(bs, NULL);
429 }
430 
bdrv_drain(BlockDriverState * bs)431 void bdrv_drain(BlockDriverState *bs)
432 {
433     IO_OR_GS_CODE();
434     bdrv_drained_begin(bs);
435     bdrv_drained_end(bs);
436 }
437 
bdrv_drain_assert_idle(BlockDriverState * bs)438 static void bdrv_drain_assert_idle(BlockDriverState *bs)
439 {
440     BdrvChild *child, *next;
441     GLOBAL_STATE_CODE();
442     GRAPH_RDLOCK_GUARD_MAINLOOP();
443 
444     assert(qatomic_read(&bs->in_flight) == 0);
445     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
446         bdrv_drain_assert_idle(child->bs);
447     }
448 }
449 
450 unsigned int bdrv_drain_all_count = 0;
451 
bdrv_drain_all_poll(void)452 static bool bdrv_drain_all_poll(void)
453 {
454     BlockDriverState *bs = NULL;
455     bool result = false;
456 
457     GLOBAL_STATE_CODE();
458     GRAPH_RDLOCK_GUARD_MAINLOOP();
459 
460     /*
461      * bdrv_drain_poll() can't make changes to the graph and we hold the BQL,
462      * so iterating bdrv_next_all_states() is safe.
463      */
464     while ((bs = bdrv_next_all_states(bs))) {
465         result |= bdrv_drain_poll(bs, NULL, true);
466     }
467 
468     return result;
469 }
470 
471 /*
472  * Wait for pending requests to complete across all BlockDriverStates
473  *
474  * This function does not flush data to disk, use bdrv_flush_all() for that
475  * after calling this function.
476  *
477  * This pauses all block jobs and disables external clients. It must
478  * be paired with bdrv_drain_all_end().
479  *
480  * NOTE: no new block jobs or BlockDriverStates can be created between
481  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
482  */
bdrv_drain_all_begin_nopoll(void)483 void bdrv_drain_all_begin_nopoll(void)
484 {
485     BlockDriverState *bs = NULL;
486     GLOBAL_STATE_CODE();
487 
488     /*
489      * bdrv queue is managed by record/replay,
490      * waiting for finishing the I/O requests may
491      * be infinite
492      */
493     if (replay_events_enabled()) {
494         return;
495     }
496 
497     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
498      * loop AioContext, so make sure we're in the main context. */
499     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
500     assert(bdrv_drain_all_count < INT_MAX);
501     bdrv_drain_all_count++;
502 
503     /* Quiesce all nodes, without polling in-flight requests yet. The graph
504      * cannot change during this loop. */
505     while ((bs = bdrv_next_all_states(bs))) {
506         bdrv_do_drained_begin(bs, NULL, false);
507     }
508 }
509 
bdrv_drain_all_begin(void)510 void coroutine_mixed_fn bdrv_drain_all_begin(void)
511 {
512     BlockDriverState *bs = NULL;
513 
514     if (qemu_in_coroutine()) {
515         bdrv_co_yield_to_drain(NULL, true, NULL, true);
516         return;
517     }
518 
519     /*
520      * bdrv queue is managed by record/replay,
521      * waiting for finishing the I/O requests may
522      * be infinite
523      */
524     if (replay_events_enabled()) {
525         return;
526     }
527 
528     bdrv_drain_all_begin_nopoll();
529 
530     /* Now poll the in-flight requests */
531     AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
532 
533     while ((bs = bdrv_next_all_states(bs))) {
534         bdrv_drain_assert_idle(bs);
535     }
536 }
537 
bdrv_drain_all_end_quiesce(BlockDriverState * bs)538 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
539 {
540     GLOBAL_STATE_CODE();
541 
542     g_assert(bs->quiesce_counter > 0);
543     g_assert(!bs->refcnt);
544 
545     while (bs->quiesce_counter) {
546         bdrv_do_drained_end(bs, NULL);
547     }
548 }
549 
bdrv_drain_all_end(void)550 void bdrv_drain_all_end(void)
551 {
552     BlockDriverState *bs = NULL;
553     GLOBAL_STATE_CODE();
554 
555     /*
556      * bdrv queue is managed by record/replay,
557      * waiting for finishing the I/O requests may
558      * be endless
559      */
560     if (replay_events_enabled()) {
561         return;
562     }
563 
564     while ((bs = bdrv_next_all_states(bs))) {
565         bdrv_do_drained_end(bs, NULL);
566     }
567 
568     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
569     assert(bdrv_drain_all_count > 0);
570     bdrv_drain_all_count--;
571 }
572 
bdrv_drain_all(void)573 void bdrv_drain_all(void)
574 {
575     GLOBAL_STATE_CODE();
576     bdrv_drain_all_begin();
577     bdrv_drain_all_end();
578 }
579 
580 /**
581  * Remove an active request from the tracked requests list
582  *
583  * This function should be called when a tracked request is completing.
584  */
tracked_request_end(BdrvTrackedRequest * req)585 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
586 {
587     if (req->serialising) {
588         qatomic_dec(&req->bs->serialising_in_flight);
589     }
590 
591     qemu_mutex_lock(&req->bs->reqs_lock);
592     QLIST_REMOVE(req, list);
593     qemu_mutex_unlock(&req->bs->reqs_lock);
594 
595     /*
596      * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
597      * anymore because the request has been removed from the list, so it's safe
598      * to restart the queue outside reqs_lock to minimize the critical section.
599      */
600     qemu_co_queue_restart_all(&req->wait_queue);
601 }
602 
603 /**
604  * Add an active request to the tracked requests list
605  */
tracked_request_begin(BdrvTrackedRequest * req,BlockDriverState * bs,int64_t offset,int64_t bytes,enum BdrvTrackedRequestType type)606 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
607                                                BlockDriverState *bs,
608                                                int64_t offset,
609                                                int64_t bytes,
610                                                enum BdrvTrackedRequestType type)
611 {
612     bdrv_check_request(offset, bytes, &error_abort);
613 
614     *req = (BdrvTrackedRequest){
615         .bs = bs,
616         .offset         = offset,
617         .bytes          = bytes,
618         .type           = type,
619         .co             = qemu_coroutine_self(),
620         .serialising    = false,
621         .overlap_offset = offset,
622         .overlap_bytes  = bytes,
623     };
624 
625     qemu_co_queue_init(&req->wait_queue);
626 
627     qemu_mutex_lock(&bs->reqs_lock);
628     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
629     qemu_mutex_unlock(&bs->reqs_lock);
630 }
631 
tracked_request_overlaps(BdrvTrackedRequest * req,int64_t offset,int64_t bytes)632 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
633                                      int64_t offset, int64_t bytes)
634 {
635     bdrv_check_request(offset, bytes, &error_abort);
636 
637     /*        aaaa   bbbb */
638     if (offset >= req->overlap_offset + req->overlap_bytes) {
639         return false;
640     }
641     /* bbbb   aaaa        */
642     if (req->overlap_offset >= offset + bytes) {
643         return false;
644     }
645     return true;
646 }
647 
648 /* Called with self->bs->reqs_lock held */
649 static coroutine_fn BdrvTrackedRequest *
bdrv_find_conflicting_request(BdrvTrackedRequest * self)650 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
651 {
652     BdrvTrackedRequest *req;
653 
654     QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
655         if (req == self || (!req->serialising && !self->serialising)) {
656             continue;
657         }
658         if (tracked_request_overlaps(req, self->overlap_offset,
659                                      self->overlap_bytes))
660         {
661             /*
662              * Hitting this means there was a reentrant request, for
663              * example, a block driver issuing nested requests.  This must
664              * never happen since it means deadlock.
665              */
666             assert(qemu_coroutine_self() != req->co);
667 
668             /*
669              * If the request is already (indirectly) waiting for us, or
670              * will wait for us as soon as it wakes up, then just go on
671              * (instead of producing a deadlock in the former case).
672              */
673             if (!req->waiting_for) {
674                 return req;
675             }
676         }
677     }
678 
679     return NULL;
680 }
681 
682 /* Called with self->bs->reqs_lock held */
683 static void coroutine_fn
bdrv_wait_serialising_requests_locked(BdrvTrackedRequest * self)684 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
685 {
686     BdrvTrackedRequest *req;
687 
688     while ((req = bdrv_find_conflicting_request(self))) {
689         self->waiting_for = req;
690         qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
691         self->waiting_for = NULL;
692     }
693 }
694 
695 /* Called with req->bs->reqs_lock held */
tracked_request_set_serialising(BdrvTrackedRequest * req,uint64_t align)696 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
697                                             uint64_t align)
698 {
699     int64_t overlap_offset = req->offset & ~(align - 1);
700     int64_t overlap_bytes =
701         ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
702 
703     bdrv_check_request(req->offset, req->bytes, &error_abort);
704 
705     if (!req->serialising) {
706         qatomic_inc(&req->bs->serialising_in_flight);
707         req->serialising = true;
708     }
709 
710     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
711     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
712 }
713 
714 /**
715  * Return the tracked request on @bs for the current coroutine, or
716  * NULL if there is none.
717  */
bdrv_co_get_self_request(BlockDriverState * bs)718 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
719 {
720     BdrvTrackedRequest *req;
721     Coroutine *self = qemu_coroutine_self();
722     IO_CODE();
723 
724     QLIST_FOREACH(req, &bs->tracked_requests, list) {
725         if (req->co == self) {
726             return req;
727         }
728     }
729 
730     return NULL;
731 }
732 
733 /**
734  * Round a region to subcluster (if supported) or cluster boundaries
735  */
736 void coroutine_fn GRAPH_RDLOCK
bdrv_round_to_subclusters(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * align_offset,int64_t * align_bytes)737 bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
738                           int64_t *align_offset, int64_t *align_bytes)
739 {
740     BlockDriverInfo bdi;
741     IO_CODE();
742     if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
743         *align_offset = offset;
744         *align_bytes = bytes;
745     } else {
746         int64_t c = bdi.subcluster_size;
747         *align_offset = QEMU_ALIGN_DOWN(offset, c);
748         *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
749     }
750 }
751 
bdrv_get_cluster_size(BlockDriverState * bs)752 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
753 {
754     BlockDriverInfo bdi;
755     int ret;
756 
757     ret = bdrv_co_get_info(bs, &bdi);
758     if (ret < 0 || bdi.cluster_size == 0) {
759         return bs->bl.request_alignment;
760     } else {
761         return bdi.cluster_size;
762     }
763 }
764 
bdrv_inc_in_flight(BlockDriverState * bs)765 void bdrv_inc_in_flight(BlockDriverState *bs)
766 {
767     IO_CODE();
768     qatomic_inc(&bs->in_flight);
769 }
770 
bdrv_wakeup(BlockDriverState * bs)771 void bdrv_wakeup(BlockDriverState *bs)
772 {
773     IO_CODE();
774     aio_wait_kick();
775 }
776 
bdrv_dec_in_flight(BlockDriverState * bs)777 void bdrv_dec_in_flight(BlockDriverState *bs)
778 {
779     IO_CODE();
780     qatomic_dec(&bs->in_flight);
781     bdrv_wakeup(bs);
782 }
783 
784 static void coroutine_fn
bdrv_wait_serialising_requests(BdrvTrackedRequest * self)785 bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
786 {
787     BlockDriverState *bs = self->bs;
788 
789     if (!qatomic_read(&bs->serialising_in_flight)) {
790         return;
791     }
792 
793     qemu_mutex_lock(&bs->reqs_lock);
794     bdrv_wait_serialising_requests_locked(self);
795     qemu_mutex_unlock(&bs->reqs_lock);
796 }
797 
bdrv_make_request_serialising(BdrvTrackedRequest * req,uint64_t align)798 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
799                                                 uint64_t align)
800 {
801     IO_CODE();
802 
803     qemu_mutex_lock(&req->bs->reqs_lock);
804 
805     tracked_request_set_serialising(req, align);
806     bdrv_wait_serialising_requests_locked(req);
807 
808     qemu_mutex_unlock(&req->bs->reqs_lock);
809 }
810 
bdrv_check_qiov_request(int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,Error ** errp)811 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
812                             QEMUIOVector *qiov, size_t qiov_offset,
813                             Error **errp)
814 {
815     /*
816      * Check generic offset/bytes correctness
817      */
818 
819     if (offset < 0) {
820         error_setg(errp, "offset is negative: %" PRIi64, offset);
821         return -EIO;
822     }
823 
824     if (bytes < 0) {
825         error_setg(errp, "bytes is negative: %" PRIi64, bytes);
826         return -EIO;
827     }
828 
829     if (bytes > BDRV_MAX_LENGTH) {
830         error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
831                    bytes, BDRV_MAX_LENGTH);
832         return -EIO;
833     }
834 
835     if (offset > BDRV_MAX_LENGTH) {
836         error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
837                    offset, BDRV_MAX_LENGTH);
838         return -EIO;
839     }
840 
841     if (offset > BDRV_MAX_LENGTH - bytes) {
842         error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
843                    "exceeds maximum(%" PRIi64 ")", offset, bytes,
844                    BDRV_MAX_LENGTH);
845         return -EIO;
846     }
847 
848     if (!qiov) {
849         return 0;
850     }
851 
852     /*
853      * Check qiov and qiov_offset
854      */
855 
856     if (qiov_offset > qiov->size) {
857         error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
858                    qiov_offset, qiov->size);
859         return -EIO;
860     }
861 
862     if (bytes > qiov->size - qiov_offset) {
863         error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
864                    "vector size(%zu)", bytes, qiov_offset, qiov->size);
865         return -EIO;
866     }
867 
868     return 0;
869 }
870 
bdrv_check_request(int64_t offset,int64_t bytes,Error ** errp)871 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
872 {
873     return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
874 }
875 
bdrv_check_request32(int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)876 static int bdrv_check_request32(int64_t offset, int64_t bytes,
877                                 QEMUIOVector *qiov, size_t qiov_offset)
878 {
879     int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
880     if (ret < 0) {
881         return ret;
882     }
883 
884     if (bytes > BDRV_REQUEST_MAX_BYTES) {
885         return -EIO;
886     }
887 
888     return 0;
889 }
890 
891 /*
892  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
893  * The operation is sped up by checking the block status and only writing
894  * zeroes to the device if they currently do not return zeroes. Optional
895  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
896  * BDRV_REQ_FUA).
897  *
898  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
899  */
bdrv_make_zero(BdrvChild * child,BdrvRequestFlags flags)900 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
901 {
902     int ret;
903     int64_t target_size, bytes, offset = 0;
904     BlockDriverState *bs = child->bs;
905     IO_CODE();
906 
907     target_size = bdrv_getlength(bs);
908     if (target_size < 0) {
909         return target_size;
910     }
911 
912     for (;;) {
913         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
914         if (bytes <= 0) {
915             return 0;
916         }
917         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
918         if (ret < 0) {
919             return ret;
920         }
921         if (ret & BDRV_BLOCK_ZERO) {
922             offset += bytes;
923             continue;
924         }
925         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
926         if (ret < 0) {
927             return ret;
928         }
929         offset += bytes;
930     }
931 }
932 
933 /*
934  * Writes to the file and ensures that no writes are reordered across this
935  * request (acts as a barrier)
936  *
937  * Returns 0 on success, -errno in error cases.
938  */
bdrv_co_pwrite_sync(BdrvChild * child,int64_t offset,int64_t bytes,const void * buf,BdrvRequestFlags flags)939 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
940                                      int64_t bytes, const void *buf,
941                                      BdrvRequestFlags flags)
942 {
943     int ret;
944     IO_CODE();
945     assert_bdrv_graph_readable();
946 
947     ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
948     if (ret < 0) {
949         return ret;
950     }
951 
952     ret = bdrv_co_flush(child->bs);
953     if (ret < 0) {
954         return ret;
955     }
956 
957     return 0;
958 }
959 
960 typedef struct CoroutineIOCompletion {
961     Coroutine *coroutine;
962     int ret;
963 } CoroutineIOCompletion;
964 
bdrv_co_io_em_complete(void * opaque,int ret)965 static void bdrv_co_io_em_complete(void *opaque, int ret)
966 {
967     CoroutineIOCompletion *co = opaque;
968 
969     co->ret = ret;
970     aio_co_wake(co->coroutine);
971 }
972 
973 static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,int flags)974 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
975                    QEMUIOVector *qiov, size_t qiov_offset, int flags)
976 {
977     BlockDriver *drv = bs->drv;
978     int64_t sector_num;
979     unsigned int nb_sectors;
980     QEMUIOVector local_qiov;
981     int ret;
982     assert_bdrv_graph_readable();
983 
984     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
985     assert(!(flags & ~bs->supported_read_flags));
986 
987     if (!drv) {
988         return -ENOMEDIUM;
989     }
990 
991     if (drv->bdrv_co_preadv_part) {
992         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
993                                         flags);
994     }
995 
996     if (qiov_offset > 0 || bytes != qiov->size) {
997         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
998         qiov = &local_qiov;
999     }
1000 
1001     if (drv->bdrv_co_preadv) {
1002         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1003         goto out;
1004     }
1005 
1006     if (drv->bdrv_aio_preadv) {
1007         BlockAIOCB *acb;
1008         CoroutineIOCompletion co = {
1009             .coroutine = qemu_coroutine_self(),
1010         };
1011 
1012         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1013                                    bdrv_co_io_em_complete, &co);
1014         if (acb == NULL) {
1015             ret = -EIO;
1016             goto out;
1017         } else {
1018             qemu_coroutine_yield();
1019             ret = co.ret;
1020             goto out;
1021         }
1022     }
1023 
1024     sector_num = offset >> BDRV_SECTOR_BITS;
1025     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1026 
1027     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1028     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1029     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1030     assert(drv->bdrv_co_readv);
1031 
1032     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1033 
1034 out:
1035     if (qiov == &local_qiov) {
1036         qemu_iovec_destroy(&local_qiov);
1037     }
1038 
1039     return ret;
1040 }
1041 
1042 static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)1043 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1044                     QEMUIOVector *qiov, size_t qiov_offset,
1045                     BdrvRequestFlags flags)
1046 {
1047     BlockDriver *drv = bs->drv;
1048     bool emulate_fua = false;
1049     int64_t sector_num;
1050     unsigned int nb_sectors;
1051     QEMUIOVector local_qiov;
1052     int ret;
1053     assert_bdrv_graph_readable();
1054 
1055     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1056 
1057     if (!drv) {
1058         return -ENOMEDIUM;
1059     }
1060 
1061     if (bs->open_flags & BDRV_O_NO_FLUSH) {
1062         flags &= ~BDRV_REQ_FUA;
1063     }
1064 
1065     if ((flags & BDRV_REQ_FUA) &&
1066         (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1067         flags &= ~BDRV_REQ_FUA;
1068         emulate_fua = true;
1069     }
1070 
1071     flags &= bs->supported_write_flags;
1072 
1073     if (drv->bdrv_co_pwritev_part) {
1074         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1075                                         flags);
1076         goto emulate_flags;
1077     }
1078 
1079     if (qiov_offset > 0 || bytes != qiov->size) {
1080         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1081         qiov = &local_qiov;
1082     }
1083 
1084     if (drv->bdrv_co_pwritev) {
1085         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
1086         goto emulate_flags;
1087     }
1088 
1089     if (drv->bdrv_aio_pwritev) {
1090         BlockAIOCB *acb;
1091         CoroutineIOCompletion co = {
1092             .coroutine = qemu_coroutine_self(),
1093         };
1094 
1095         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
1096                                     bdrv_co_io_em_complete, &co);
1097         if (acb == NULL) {
1098             ret = -EIO;
1099         } else {
1100             qemu_coroutine_yield();
1101             ret = co.ret;
1102         }
1103         goto emulate_flags;
1104     }
1105 
1106     sector_num = offset >> BDRV_SECTOR_BITS;
1107     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1108 
1109     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1110     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1111     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1112 
1113     assert(drv->bdrv_co_writev);
1114     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
1115 
1116 emulate_flags:
1117     if (ret == 0 && emulate_fua) {
1118         ret = bdrv_co_flush(bs);
1119     }
1120 
1121     if (qiov == &local_qiov) {
1122         qemu_iovec_destroy(&local_qiov);
1123     }
1124 
1125     return ret;
1126 }
1127 
1128 static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_pwritev_compressed(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)1129 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1130                                int64_t bytes, QEMUIOVector *qiov,
1131                                size_t qiov_offset)
1132 {
1133     BlockDriver *drv = bs->drv;
1134     QEMUIOVector local_qiov;
1135     int ret;
1136     assert_bdrv_graph_readable();
1137 
1138     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1139 
1140     if (!drv) {
1141         return -ENOMEDIUM;
1142     }
1143 
1144     if (!block_driver_can_compress(drv)) {
1145         return -ENOTSUP;
1146     }
1147 
1148     if (drv->bdrv_co_pwritev_compressed_part) {
1149         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1150                                                     qiov, qiov_offset);
1151     }
1152 
1153     if (qiov_offset == 0) {
1154         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1155     }
1156 
1157     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1158     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1159     qemu_iovec_destroy(&local_qiov);
1160 
1161     return ret;
1162 }
1163 
1164 static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_copy_on_readv(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,int flags)1165 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
1166                          QEMUIOVector *qiov, size_t qiov_offset, int flags)
1167 {
1168     BlockDriverState *bs = child->bs;
1169 
1170     /* Perform I/O through a temporary buffer so that users who scribble over
1171      * their read buffer while the operation is in progress do not end up
1172      * modifying the image file.  This is critical for zero-copy guest I/O
1173      * where anything might happen inside guest memory.
1174      */
1175     void *bounce_buffer = NULL;
1176 
1177     BlockDriver *drv = bs->drv;
1178     int64_t align_offset;
1179     int64_t align_bytes;
1180     int64_t skip_bytes;
1181     int ret;
1182     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1183                                     BDRV_REQUEST_MAX_BYTES);
1184     int64_t progress = 0;
1185     bool skip_write;
1186 
1187     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1188 
1189     if (!drv) {
1190         return -ENOMEDIUM;
1191     }
1192 
1193     /*
1194      * Do not write anything when the BDS is inactive.  That is not
1195      * allowed, and it would not help.
1196      */
1197     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1198 
1199     /* FIXME We cannot require callers to have write permissions when all they
1200      * are doing is a read request. If we did things right, write permissions
1201      * would be obtained anyway, but internally by the copy-on-read code. As
1202      * long as it is implemented here rather than in a separate filter driver,
1203      * the copy-on-read code doesn't have its own BdrvChild, however, for which
1204      * it could request permissions. Therefore we have to bypass the permission
1205      * system for the moment. */
1206     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1207 
1208     /* Cover entire cluster so no additional backing file I/O is required when
1209      * allocating cluster in the image file.  Note that this value may exceed
1210      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1211      * is one reason we loop rather than doing it all at once.
1212      */
1213     bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
1214     skip_bytes = offset - align_offset;
1215 
1216     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1217                                    align_offset, align_bytes);
1218 
1219     while (align_bytes) {
1220         int64_t pnum;
1221 
1222         if (skip_write) {
1223             ret = 1; /* "already allocated", so nothing will be copied */
1224             pnum = MIN(align_bytes, max_transfer);
1225         } else {
1226             ret = bdrv_co_is_allocated(bs, align_offset,
1227                                        MIN(align_bytes, max_transfer), &pnum);
1228             if (ret < 0) {
1229                 /*
1230                  * Safe to treat errors in querying allocation as if
1231                  * unallocated; we'll probably fail again soon on the
1232                  * read, but at least that will set a decent errno.
1233                  */
1234                 pnum = MIN(align_bytes, max_transfer);
1235             }
1236 
1237             /* Stop at EOF if the image ends in the middle of the cluster */
1238             if (ret == 0 && pnum == 0) {
1239                 assert(progress >= bytes);
1240                 break;
1241             }
1242 
1243             assert(skip_bytes < pnum);
1244         }
1245 
1246         if (ret <= 0) {
1247             QEMUIOVector local_qiov;
1248 
1249             /* Must copy-on-read; use the bounce buffer */
1250             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1251             if (!bounce_buffer) {
1252                 int64_t max_we_need = MAX(pnum, align_bytes - pnum);
1253                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1254                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1255 
1256                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1257                 if (!bounce_buffer) {
1258                     ret = -ENOMEM;
1259                     goto err;
1260                 }
1261             }
1262             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1263 
1264             ret = bdrv_driver_preadv(bs, align_offset, pnum,
1265                                      &local_qiov, 0, 0);
1266             if (ret < 0) {
1267                 goto err;
1268             }
1269 
1270             bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1271             if (drv->bdrv_co_pwrite_zeroes &&
1272                 buffer_is_zero(bounce_buffer, pnum)) {
1273                 /* FIXME: Should we (perhaps conditionally) be setting
1274                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1275                  * that still correctly reads as zero? */
1276                 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
1277                                                BDRV_REQ_WRITE_UNCHANGED);
1278             } else {
1279                 /* This does not change the data on the disk, it is not
1280                  * necessary to flush even in cache=writethrough mode.
1281                  */
1282                 ret = bdrv_driver_pwritev(bs, align_offset, pnum,
1283                                           &local_qiov, 0,
1284                                           BDRV_REQ_WRITE_UNCHANGED);
1285             }
1286 
1287             if (ret < 0) {
1288                 /* It might be okay to ignore write errors for guest
1289                  * requests.  If this is a deliberate copy-on-read
1290                  * then we don't want to ignore the error.  Simply
1291                  * report it in all cases.
1292                  */
1293                 goto err;
1294             }
1295 
1296             if (!(flags & BDRV_REQ_PREFETCH)) {
1297                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1298                                     bounce_buffer + skip_bytes,
1299                                     MIN(pnum - skip_bytes, bytes - progress));
1300             }
1301         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1302             /* Read directly into the destination */
1303             ret = bdrv_driver_preadv(bs, offset + progress,
1304                                      MIN(pnum - skip_bytes, bytes - progress),
1305                                      qiov, qiov_offset + progress, 0);
1306             if (ret < 0) {
1307                 goto err;
1308             }
1309         }
1310 
1311         align_offset += pnum;
1312         align_bytes -= pnum;
1313         progress += pnum - skip_bytes;
1314         skip_bytes = 0;
1315     }
1316     ret = 0;
1317 
1318 err:
1319     qemu_vfree(bounce_buffer);
1320     return ret;
1321 }
1322 
1323 /*
1324  * Forwards an already correctly aligned request to the BlockDriver. This
1325  * handles copy on read, zeroing after EOF, and fragmentation of large
1326  * reads; any other features must be implemented by the caller.
1327  */
1328 static int coroutine_fn GRAPH_RDLOCK
bdrv_aligned_preadv(BdrvChild * child,BdrvTrackedRequest * req,int64_t offset,int64_t bytes,int64_t align,QEMUIOVector * qiov,size_t qiov_offset,int flags)1329 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
1330                     int64_t offset, int64_t bytes, int64_t align,
1331                     QEMUIOVector *qiov, size_t qiov_offset, int flags)
1332 {
1333     BlockDriverState *bs = child->bs;
1334     int64_t total_bytes, max_bytes;
1335     int ret = 0;
1336     int64_t bytes_remaining = bytes;
1337     int max_transfer;
1338 
1339     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1340     assert(is_power_of_2(align));
1341     assert((offset & (align - 1)) == 0);
1342     assert((bytes & (align - 1)) == 0);
1343     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1344     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1345                                    align);
1346 
1347     /*
1348      * TODO: We would need a per-BDS .supported_read_flags and
1349      * potential fallback support, if we ever implement any read flags
1350      * to pass through to drivers.  For now, there aren't any
1351      * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1352      */
1353     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1354                        BDRV_REQ_REGISTERED_BUF)));
1355 
1356     /* Handle Copy on Read and associated serialisation */
1357     if (flags & BDRV_REQ_COPY_ON_READ) {
1358         /* If we touch the same cluster it counts as an overlap.  This
1359          * guarantees that allocating writes will be serialized and not race
1360          * with each other for the same cluster.  For example, in copy-on-read
1361          * it ensures that the CoR read and write operations are atomic and
1362          * guest writes cannot interleave between them. */
1363         bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1364     } else {
1365         bdrv_wait_serialising_requests(req);
1366     }
1367 
1368     if (flags & BDRV_REQ_COPY_ON_READ) {
1369         int64_t pnum;
1370 
1371         /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1372         flags &= ~BDRV_REQ_COPY_ON_READ;
1373 
1374         ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
1375         if (ret < 0) {
1376             goto out;
1377         }
1378 
1379         if (!ret || pnum != bytes) {
1380             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1381                                            qiov, qiov_offset, flags);
1382             goto out;
1383         } else if (flags & BDRV_REQ_PREFETCH) {
1384             goto out;
1385         }
1386     }
1387 
1388     /* Forward the request to the BlockDriver, possibly fragmenting it */
1389     total_bytes = bdrv_co_getlength(bs);
1390     if (total_bytes < 0) {
1391         ret = total_bytes;
1392         goto out;
1393     }
1394 
1395     assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1396 
1397     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1398     if (bytes <= max_bytes && bytes <= max_transfer) {
1399         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1400         goto out;
1401     }
1402 
1403     while (bytes_remaining) {
1404         int64_t num;
1405 
1406         if (max_bytes) {
1407             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1408             assert(num);
1409 
1410             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1411                                      num, qiov,
1412                                      qiov_offset + bytes - bytes_remaining,
1413                                      flags);
1414             max_bytes -= num;
1415         } else {
1416             num = bytes_remaining;
1417             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1418                                     0, bytes_remaining);
1419         }
1420         if (ret < 0) {
1421             goto out;
1422         }
1423         bytes_remaining -= num;
1424     }
1425 
1426 out:
1427     return ret < 0 ? ret : 0;
1428 }
1429 
1430 /*
1431  * Request padding
1432  *
1433  *  |<---- align ----->|                     |<----- align ---->|
1434  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
1435  *  |          |       |                     |     |            |
1436  * -*----------$-------*-------- ... --------*-----$------------*---
1437  *  |          |       |                     |     |            |
1438  *  |          offset  |                     |     end          |
1439  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
1440  *  [buf   ... )                             [tail_buf          )
1441  *
1442  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1443  * is placed at the beginning of @buf and @tail at the @end.
1444  *
1445  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1446  * around tail, if tail exists.
1447  *
1448  * @merge_reads is true for small requests,
1449  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1450  * head and tail exist but @buf_len == align and @tail_buf == @buf.
1451  *
1452  * @write is true for write requests, false for read requests.
1453  *
1454  * If padding makes the vector too long (exceeding IOV_MAX), then we need to
1455  * merge existing vector elements into a single one.  @collapse_bounce_buf acts
1456  * as the bounce buffer in such cases.  @pre_collapse_qiov has the pre-collapse
1457  * I/O vector elements so for read requests, the data can be copied back after
1458  * the read is done.
1459  */
1460 typedef struct BdrvRequestPadding {
1461     uint8_t *buf;
1462     size_t buf_len;
1463     uint8_t *tail_buf;
1464     size_t head;
1465     size_t tail;
1466     bool merge_reads;
1467     bool write;
1468     QEMUIOVector local_qiov;
1469 
1470     uint8_t *collapse_bounce_buf;
1471     size_t collapse_len;
1472     QEMUIOVector pre_collapse_qiov;
1473 } BdrvRequestPadding;
1474 
bdrv_init_padding(BlockDriverState * bs,int64_t offset,int64_t bytes,bool write,BdrvRequestPadding * pad)1475 static bool bdrv_init_padding(BlockDriverState *bs,
1476                               int64_t offset, int64_t bytes,
1477                               bool write,
1478                               BdrvRequestPadding *pad)
1479 {
1480     int64_t align = bs->bl.request_alignment;
1481     int64_t sum;
1482 
1483     bdrv_check_request(offset, bytes, &error_abort);
1484     assert(align <= INT_MAX); /* documented in block/block_int.h */
1485     assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1486 
1487     memset(pad, 0, sizeof(*pad));
1488 
1489     pad->head = offset & (align - 1);
1490     pad->tail = ((offset + bytes) & (align - 1));
1491     if (pad->tail) {
1492         pad->tail = align - pad->tail;
1493     }
1494 
1495     if (!pad->head && !pad->tail) {
1496         return false;
1497     }
1498 
1499     assert(bytes); /* Nothing good in aligning zero-length requests */
1500 
1501     sum = pad->head + bytes + pad->tail;
1502     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1503     pad->buf = qemu_blockalign(bs, pad->buf_len);
1504     pad->merge_reads = sum == pad->buf_len;
1505     if (pad->tail) {
1506         pad->tail_buf = pad->buf + pad->buf_len - align;
1507     }
1508 
1509     pad->write = write;
1510 
1511     return true;
1512 }
1513 
1514 static int coroutine_fn GRAPH_RDLOCK
bdrv_padding_rmw_read(BdrvChild * child,BdrvTrackedRequest * req,BdrvRequestPadding * pad,bool zero_middle)1515 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
1516                       BdrvRequestPadding *pad, bool zero_middle)
1517 {
1518     QEMUIOVector local_qiov;
1519     BlockDriverState *bs = child->bs;
1520     uint64_t align = bs->bl.request_alignment;
1521     int ret;
1522 
1523     assert(req->serialising && pad->buf);
1524 
1525     if (pad->head || pad->merge_reads) {
1526         int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1527 
1528         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1529 
1530         if (pad->head) {
1531             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1532         }
1533         if (pad->merge_reads && pad->tail) {
1534             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1535         }
1536         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1537                                   align, &local_qiov, 0, 0);
1538         if (ret < 0) {
1539             return ret;
1540         }
1541         if (pad->head) {
1542             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1543         }
1544         if (pad->merge_reads && pad->tail) {
1545             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1546         }
1547 
1548         if (pad->merge_reads) {
1549             goto zero_mem;
1550         }
1551     }
1552 
1553     if (pad->tail) {
1554         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1555 
1556         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1557         ret = bdrv_aligned_preadv(
1558                 child, req,
1559                 req->overlap_offset + req->overlap_bytes - align,
1560                 align, align, &local_qiov, 0, 0);
1561         if (ret < 0) {
1562             return ret;
1563         }
1564         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1565     }
1566 
1567 zero_mem:
1568     if (zero_middle) {
1569         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1570     }
1571 
1572     return 0;
1573 }
1574 
1575 /**
1576  * Free *pad's associated buffers, and perform any necessary finalization steps.
1577  */
bdrv_padding_finalize(BdrvRequestPadding * pad)1578 static void bdrv_padding_finalize(BdrvRequestPadding *pad)
1579 {
1580     if (pad->collapse_bounce_buf) {
1581         if (!pad->write) {
1582             /*
1583              * If padding required elements in the vector to be collapsed into a
1584              * bounce buffer, copy the bounce buffer content back
1585              */
1586             qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
1587                                 pad->collapse_bounce_buf, pad->collapse_len);
1588         }
1589         qemu_vfree(pad->collapse_bounce_buf);
1590         qemu_iovec_destroy(&pad->pre_collapse_qiov);
1591     }
1592     if (pad->buf) {
1593         qemu_vfree(pad->buf);
1594         qemu_iovec_destroy(&pad->local_qiov);
1595     }
1596     memset(pad, 0, sizeof(*pad));
1597 }
1598 
1599 /*
1600  * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
1601  * ensuring that the resulting vector will not exceed IOV_MAX elements.
1602  *
1603  * To ensure this, when necessary, the first two or three elements of @iov are
1604  * merged into pad->collapse_bounce_buf and replaced by a reference to that
1605  * bounce buffer in pad->local_qiov.
1606  *
1607  * After performing a read request, the data from the bounce buffer must be
1608  * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
1609  */
bdrv_create_padded_qiov(BlockDriverState * bs,BdrvRequestPadding * pad,struct iovec * iov,int niov,size_t iov_offset,size_t bytes)1610 static int bdrv_create_padded_qiov(BlockDriverState *bs,
1611                                    BdrvRequestPadding *pad,
1612                                    struct iovec *iov, int niov,
1613                                    size_t iov_offset, size_t bytes)
1614 {
1615     int padded_niov, surplus_count, collapse_count;
1616 
1617     /* Assert this invariant */
1618     assert(niov <= IOV_MAX);
1619 
1620     /*
1621      * Cannot pad if resulting length would exceed SIZE_MAX.  Returning an error
1622      * to the guest is not ideal, but there is little else we can do.  At least
1623      * this will practically never happen on 64-bit systems.
1624      */
1625     if (SIZE_MAX - pad->head < bytes ||
1626         SIZE_MAX - pad->head - bytes < pad->tail)
1627     {
1628         return -EINVAL;
1629     }
1630 
1631     /* Length of the resulting IOV if we just concatenated everything */
1632     padded_niov = !!pad->head + niov + !!pad->tail;
1633 
1634     qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
1635 
1636     if (pad->head) {
1637         qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
1638     }
1639 
1640     /*
1641      * If padded_niov > IOV_MAX, we cannot just concatenate everything.
1642      * Instead, merge the first two or three elements of @iov to reduce the
1643      * number of vector elements as necessary.
1644      */
1645     if (padded_niov > IOV_MAX) {
1646         /*
1647          * Only head and tail can have lead to the number of entries exceeding
1648          * IOV_MAX, so we can exceed it by the head and tail at most.  We need
1649          * to reduce the number of elements by `surplus_count`, so we merge that
1650          * many elements plus one into one element.
1651          */
1652         surplus_count = padded_niov - IOV_MAX;
1653         assert(surplus_count <= !!pad->head + !!pad->tail);
1654         collapse_count = surplus_count + 1;
1655 
1656         /*
1657          * Move the elements to collapse into `pad->pre_collapse_qiov`, then
1658          * advance `iov` (and associated variables) by those elements.
1659          */
1660         qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
1661         qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
1662                               collapse_count, iov_offset, SIZE_MAX);
1663         iov += collapse_count;
1664         iov_offset = 0;
1665         niov -= collapse_count;
1666         bytes -= pad->pre_collapse_qiov.size;
1667 
1668         /*
1669          * Construct the bounce buffer to match the length of the to-collapse
1670          * vector elements, and for write requests, initialize it with the data
1671          * from those elements.  Then add it to `pad->local_qiov`.
1672          */
1673         pad->collapse_len = pad->pre_collapse_qiov.size;
1674         pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
1675         if (pad->write) {
1676             qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
1677                               pad->collapse_bounce_buf, pad->collapse_len);
1678         }
1679         qemu_iovec_add(&pad->local_qiov,
1680                        pad->collapse_bounce_buf, pad->collapse_len);
1681     }
1682 
1683     qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
1684 
1685     if (pad->tail) {
1686         qemu_iovec_add(&pad->local_qiov,
1687                        pad->buf + pad->buf_len - pad->tail, pad->tail);
1688     }
1689 
1690     assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
1691     return 0;
1692 }
1693 
1694 /*
1695  * bdrv_pad_request
1696  *
1697  * Exchange request parameters with padded request if needed. Don't include RMW
1698  * read of padding, bdrv_padding_rmw_read() should be called separately if
1699  * needed.
1700  *
1701  * @write is true for write requests, false for read requests.
1702  *
1703  * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1704  *  - on function start they represent original request
1705  *  - on failure or when padding is not needed they are unchanged
1706  *  - on success when padding is needed they represent padded request
1707  */
bdrv_pad_request(BlockDriverState * bs,QEMUIOVector ** qiov,size_t * qiov_offset,int64_t * offset,int64_t * bytes,bool write,BdrvRequestPadding * pad,bool * padded,BdrvRequestFlags * flags)1708 static int bdrv_pad_request(BlockDriverState *bs,
1709                             QEMUIOVector **qiov, size_t *qiov_offset,
1710                             int64_t *offset, int64_t *bytes,
1711                             bool write,
1712                             BdrvRequestPadding *pad, bool *padded,
1713                             BdrvRequestFlags *flags)
1714 {
1715     int ret;
1716     struct iovec *sliced_iov;
1717     int sliced_niov;
1718     size_t sliced_head, sliced_tail;
1719 
1720     /* Should have been checked by the caller already */
1721     ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
1722     if (ret < 0) {
1723         return ret;
1724     }
1725 
1726     if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
1727         if (padded) {
1728             *padded = false;
1729         }
1730         return 0;
1731     }
1732 
1733     /*
1734      * For prefetching in stream_populate(), no qiov is passed along, because
1735      * only copy-on-read matters.
1736      */
1737     if (*qiov) {
1738         sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
1739                                       &sliced_head, &sliced_tail,
1740                                       &sliced_niov);
1741 
1742         /* Guaranteed by bdrv_check_request32() */
1743         assert(*bytes <= SIZE_MAX);
1744         ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
1745                                       sliced_head, *bytes);
1746         if (ret < 0) {
1747             bdrv_padding_finalize(pad);
1748             return ret;
1749         }
1750         *qiov = &pad->local_qiov;
1751         *qiov_offset = 0;
1752     }
1753 
1754     *bytes += pad->head + pad->tail;
1755     *offset -= pad->head;
1756     if (padded) {
1757         *padded = true;
1758     }
1759     if (flags) {
1760         /* Can't use optimization hint with bounce buffer */
1761         *flags &= ~BDRV_REQ_REGISTERED_BUF;
1762     }
1763 
1764     return 0;
1765 }
1766 
bdrv_co_preadv(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1767 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1768     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1769     BdrvRequestFlags flags)
1770 {
1771     IO_CODE();
1772     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1773 }
1774 
bdrv_co_preadv_part(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)1775 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1776     int64_t offset, int64_t bytes,
1777     QEMUIOVector *qiov, size_t qiov_offset,
1778     BdrvRequestFlags flags)
1779 {
1780     BlockDriverState *bs = child->bs;
1781     BdrvTrackedRequest req;
1782     BdrvRequestPadding pad;
1783     int ret;
1784     IO_CODE();
1785 
1786     trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1787 
1788     if (!bdrv_co_is_inserted(bs)) {
1789         return -ENOMEDIUM;
1790     }
1791 
1792     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1793     if (ret < 0) {
1794         return ret;
1795     }
1796 
1797     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1798         /*
1799          * Aligning zero request is nonsense. Even if driver has special meaning
1800          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1801          * it to driver due to request_alignment.
1802          *
1803          * Still, no reason to return an error if someone do unaligned
1804          * zero-length read occasionally.
1805          */
1806         return 0;
1807     }
1808 
1809     bdrv_inc_in_flight(bs);
1810 
1811     /* Don't do copy-on-read if we read data before write operation */
1812     if (qatomic_read(&bs->copy_on_read)) {
1813         flags |= BDRV_REQ_COPY_ON_READ;
1814     }
1815 
1816     ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
1817                            &pad, NULL, &flags);
1818     if (ret < 0) {
1819         goto fail;
1820     }
1821 
1822     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1823     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1824                               bs->bl.request_alignment,
1825                               qiov, qiov_offset, flags);
1826     tracked_request_end(&req);
1827     bdrv_padding_finalize(&pad);
1828 
1829 fail:
1830     bdrv_dec_in_flight(bs);
1831 
1832     return ret;
1833 }
1834 
1835 static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)1836 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1837                          BdrvRequestFlags flags)
1838 {
1839     BlockDriver *drv = bs->drv;
1840     QEMUIOVector qiov;
1841     void *buf = NULL;
1842     int ret = 0;
1843     bool need_flush = false;
1844     int head = 0;
1845     int tail = 0;
1846 
1847     int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1848                                             INT64_MAX);
1849     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1850                         bs->bl.request_alignment);
1851     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1852 
1853     assert_bdrv_graph_readable();
1854     bdrv_check_request(offset, bytes, &error_abort);
1855 
1856     if (!drv) {
1857         return -ENOMEDIUM;
1858     }
1859 
1860     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1861         return -ENOTSUP;
1862     }
1863 
1864     /* By definition there is no user buffer so this flag doesn't make sense */
1865     if (flags & BDRV_REQ_REGISTERED_BUF) {
1866         return -EINVAL;
1867     }
1868 
1869     /* If opened with discard=off we should never unmap. */
1870     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1871         flags &= ~BDRV_REQ_MAY_UNMAP;
1872     }
1873 
1874     /* Invalidate the cached block-status data range if this write overlaps */
1875     bdrv_bsc_invalidate_range(bs, offset, bytes);
1876 
1877     assert(alignment % bs->bl.request_alignment == 0);
1878     head = offset % alignment;
1879     tail = (offset + bytes) % alignment;
1880     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1881     assert(max_write_zeroes >= bs->bl.request_alignment);
1882 
1883     while (bytes > 0 && !ret) {
1884         int64_t num = bytes;
1885 
1886         /* Align request.  Block drivers can expect the "bulk" of the request
1887          * to be aligned, and that unaligned requests do not cross cluster
1888          * boundaries.
1889          */
1890         if (head) {
1891             /* Make a small request up to the first aligned sector. For
1892              * convenience, limit this request to max_transfer even if
1893              * we don't need to fall back to writes.  */
1894             num = MIN(MIN(bytes, max_transfer), alignment - head);
1895             head = (head + num) % alignment;
1896             assert(num < max_write_zeroes);
1897         } else if (tail && num > alignment) {
1898             /* Shorten the request to the last aligned sector.  */
1899             num -= tail;
1900         }
1901 
1902         /* limit request size */
1903         if (num > max_write_zeroes) {
1904             num = max_write_zeroes;
1905         }
1906 
1907         ret = -ENOTSUP;
1908         /* First try the efficient write zeroes operation */
1909         if (drv->bdrv_co_pwrite_zeroes) {
1910             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1911                                              flags & bs->supported_zero_flags);
1912             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1913                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1914                 need_flush = true;
1915             }
1916         } else {
1917             assert(!bs->supported_zero_flags);
1918         }
1919 
1920         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1921             /* Fall back to bounce buffer if write zeroes is unsupported */
1922             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1923 
1924             if ((flags & BDRV_REQ_FUA) &&
1925                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1926                 /* No need for bdrv_driver_pwrite() to do a fallback
1927                  * flush on each chunk; use just one at the end */
1928                 write_flags &= ~BDRV_REQ_FUA;
1929                 need_flush = true;
1930             }
1931             num = MIN(num, max_transfer);
1932             if (buf == NULL) {
1933                 buf = qemu_try_blockalign0(bs, num);
1934                 if (buf == NULL) {
1935                     ret = -ENOMEM;
1936                     goto fail;
1937                 }
1938             }
1939             qemu_iovec_init_buf(&qiov, buf, num);
1940 
1941             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1942 
1943             /* Keep bounce buffer around if it is big enough for all
1944              * all future requests.
1945              */
1946             if (num < max_transfer) {
1947                 qemu_vfree(buf);
1948                 buf = NULL;
1949             }
1950         }
1951 
1952         offset += num;
1953         bytes -= num;
1954     }
1955 
1956 fail:
1957     if (ret == 0 && need_flush) {
1958         ret = bdrv_co_flush(bs);
1959     }
1960     qemu_vfree(buf);
1961     return ret;
1962 }
1963 
1964 static inline int coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_prepare(BdrvChild * child,int64_t offset,int64_t bytes,BdrvTrackedRequest * req,int flags)1965 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1966                           BdrvTrackedRequest *req, int flags)
1967 {
1968     BlockDriverState *bs = child->bs;
1969 
1970     bdrv_check_request(offset, bytes, &error_abort);
1971 
1972     if (bdrv_is_read_only(bs)) {
1973         return -EPERM;
1974     }
1975 
1976     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1977     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1978     assert(!(flags & ~BDRV_REQ_MASK));
1979     assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1980 
1981     if (flags & BDRV_REQ_SERIALISING) {
1982         QEMU_LOCK_GUARD(&bs->reqs_lock);
1983 
1984         tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1985 
1986         if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1987             return -EBUSY;
1988         }
1989 
1990         bdrv_wait_serialising_requests_locked(req);
1991     } else {
1992         bdrv_wait_serialising_requests(req);
1993     }
1994 
1995     assert(req->overlap_offset <= offset);
1996     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1997     assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1998            child->perm & BLK_PERM_RESIZE);
1999 
2000     switch (req->type) {
2001     case BDRV_TRACKED_WRITE:
2002     case BDRV_TRACKED_DISCARD:
2003         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
2004             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
2005         } else {
2006             assert(child->perm & BLK_PERM_WRITE);
2007         }
2008         bdrv_write_threshold_check_write(bs, offset, bytes);
2009         return 0;
2010     case BDRV_TRACKED_TRUNCATE:
2011         assert(child->perm & BLK_PERM_RESIZE);
2012         return 0;
2013     default:
2014         abort();
2015     }
2016 }
2017 
2018 static inline void coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_finish(BdrvChild * child,int64_t offset,int64_t bytes,BdrvTrackedRequest * req,int ret)2019 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
2020                          BdrvTrackedRequest *req, int ret)
2021 {
2022     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2023     BlockDriverState *bs = child->bs;
2024 
2025     bdrv_check_request(offset, bytes, &error_abort);
2026 
2027     qatomic_inc(&bs->write_gen);
2028 
2029     /*
2030      * Discard cannot extend the image, but in error handling cases, such as
2031      * when reverting a qcow2 cluster allocation, the discarded range can pass
2032      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2033      * here. Instead, just skip it, since semantically a discard request
2034      * beyond EOF cannot expand the image anyway.
2035      */
2036     if (ret == 0 &&
2037         (req->type == BDRV_TRACKED_TRUNCATE ||
2038          end_sector > bs->total_sectors) &&
2039         req->type != BDRV_TRACKED_DISCARD) {
2040         bs->total_sectors = end_sector;
2041         bdrv_parent_cb_resize(bs);
2042         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
2043     }
2044     if (req->bytes) {
2045         switch (req->type) {
2046         case BDRV_TRACKED_WRITE:
2047             stat64_max(&bs->wr_highest_offset, offset + bytes);
2048             /* fall through, to set dirty bits */
2049         case BDRV_TRACKED_DISCARD:
2050             bdrv_set_dirty(bs, offset, bytes);
2051             break;
2052         default:
2053             break;
2054         }
2055     }
2056 }
2057 
2058 /*
2059  * Forwards an already correctly aligned write request to the BlockDriver,
2060  * after possibly fragmenting it.
2061  */
2062 static int coroutine_fn GRAPH_RDLOCK
bdrv_aligned_pwritev(BdrvChild * child,BdrvTrackedRequest * req,int64_t offset,int64_t bytes,int64_t align,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)2063 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
2064                      int64_t offset, int64_t bytes, int64_t align,
2065                      QEMUIOVector *qiov, size_t qiov_offset,
2066                      BdrvRequestFlags flags)
2067 {
2068     BlockDriverState *bs = child->bs;
2069     BlockDriver *drv = bs->drv;
2070     int ret;
2071 
2072     int64_t bytes_remaining = bytes;
2073     int max_transfer;
2074 
2075     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2076 
2077     if (!drv) {
2078         return -ENOMEDIUM;
2079     }
2080 
2081     if (bdrv_has_readonly_bitmaps(bs)) {
2082         return -EPERM;
2083     }
2084 
2085     assert(is_power_of_2(align));
2086     assert((offset & (align - 1)) == 0);
2087     assert((bytes & (align - 1)) == 0);
2088     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2089                                    align);
2090 
2091     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
2092 
2093     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2094         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
2095         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
2096         flags |= BDRV_REQ_ZERO_WRITE;
2097         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2098             flags |= BDRV_REQ_MAY_UNMAP;
2099         }
2100 
2101         /* Can't use optimization hint with bufferless zero write */
2102         flags &= ~BDRV_REQ_REGISTERED_BUF;
2103     }
2104 
2105     if (ret < 0) {
2106         /* Do nothing, write notifier decided to fail this request */
2107     } else if (flags & BDRV_REQ_ZERO_WRITE) {
2108         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
2109         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
2110     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
2111         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2112                                              qiov, qiov_offset);
2113     } else if (bytes <= max_transfer) {
2114         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
2115         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
2116     } else {
2117         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
2118         while (bytes_remaining) {
2119             int num = MIN(bytes_remaining, max_transfer);
2120             int local_flags = flags;
2121 
2122             assert(num);
2123             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2124                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2125                 /* If FUA is going to be emulated by flush, we only
2126                  * need to flush on the last iteration */
2127                 local_flags &= ~BDRV_REQ_FUA;
2128             }
2129 
2130             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2131                                       num, qiov,
2132                                       qiov_offset + bytes - bytes_remaining,
2133                                       local_flags);
2134             if (ret < 0) {
2135                 break;
2136             }
2137             bytes_remaining -= num;
2138         }
2139     }
2140     bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
2141 
2142     if (ret >= 0) {
2143         ret = 0;
2144     }
2145     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2146 
2147     return ret;
2148 }
2149 
2150 static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_zero_pwritev(BdrvChild * child,int64_t offset,int64_t bytes,BdrvRequestFlags flags,BdrvTrackedRequest * req)2151 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
2152                         BdrvRequestFlags flags, BdrvTrackedRequest *req)
2153 {
2154     BlockDriverState *bs = child->bs;
2155     QEMUIOVector local_qiov;
2156     uint64_t align = bs->bl.request_alignment;
2157     int ret = 0;
2158     bool padding;
2159     BdrvRequestPadding pad;
2160 
2161     /* This flag doesn't make sense for padding or zero writes */
2162     flags &= ~BDRV_REQ_REGISTERED_BUF;
2163 
2164     padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
2165     if (padding) {
2166         assert(!(flags & BDRV_REQ_NO_WAIT));
2167         bdrv_make_request_serialising(req, align);
2168 
2169         bdrv_padding_rmw_read(child, req, &pad, true);
2170 
2171         if (pad.head || pad.merge_reads) {
2172             int64_t aligned_offset = offset & ~(align - 1);
2173             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2174 
2175             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2176             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2177                                        align, &local_qiov, 0,
2178                                        flags & ~BDRV_REQ_ZERO_WRITE);
2179             if (ret < 0 || pad.merge_reads) {
2180                 /* Error or all work is done */
2181                 goto out;
2182             }
2183             offset += write_bytes - pad.head;
2184             bytes -= write_bytes - pad.head;
2185         }
2186     }
2187 
2188     assert(!bytes || (offset & (align - 1)) == 0);
2189     if (bytes >= align) {
2190         /* Write the aligned part in the middle. */
2191         int64_t aligned_bytes = bytes & ~(align - 1);
2192         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2193                                    NULL, 0, flags);
2194         if (ret < 0) {
2195             goto out;
2196         }
2197         bytes -= aligned_bytes;
2198         offset += aligned_bytes;
2199     }
2200 
2201     assert(!bytes || (offset & (align - 1)) == 0);
2202     if (bytes) {
2203         assert(align == pad.tail + bytes);
2204 
2205         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2206         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2207                                    &local_qiov, 0,
2208                                    flags & ~BDRV_REQ_ZERO_WRITE);
2209     }
2210 
2211 out:
2212     bdrv_padding_finalize(&pad);
2213 
2214     return ret;
2215 }
2216 
2217 /*
2218  * Handle a write request in coroutine context
2219  */
bdrv_co_pwritev(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2220 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2221     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2222     BdrvRequestFlags flags)
2223 {
2224     IO_CODE();
2225     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2226 }
2227 
bdrv_co_pwritev_part(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)2228 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2229     int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2230     BdrvRequestFlags flags)
2231 {
2232     BlockDriverState *bs = child->bs;
2233     BdrvTrackedRequest req;
2234     uint64_t align = bs->bl.request_alignment;
2235     BdrvRequestPadding pad;
2236     int ret;
2237     bool padded = false;
2238     IO_CODE();
2239 
2240     trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2241 
2242     if (!bdrv_co_is_inserted(bs)) {
2243         return -ENOMEDIUM;
2244     }
2245 
2246     if (flags & BDRV_REQ_ZERO_WRITE) {
2247         ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2248     } else {
2249         ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2250     }
2251     if (ret < 0) {
2252         return ret;
2253     }
2254 
2255     /* If the request is misaligned then we can't make it efficient */
2256     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2257         !QEMU_IS_ALIGNED(offset | bytes, align))
2258     {
2259         return -ENOTSUP;
2260     }
2261 
2262     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2263         /*
2264          * Aligning zero request is nonsense. Even if driver has special meaning
2265          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2266          * it to driver due to request_alignment.
2267          *
2268          * Still, no reason to return an error if someone do unaligned
2269          * zero-length write occasionally.
2270          */
2271         return 0;
2272     }
2273 
2274     if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2275         /*
2276          * Pad request for following read-modify-write cycle.
2277          * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2278          * alignment only if there is no ZERO flag.
2279          */
2280         ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
2281                                &pad, &padded, &flags);
2282         if (ret < 0) {
2283             return ret;
2284         }
2285     }
2286 
2287     bdrv_inc_in_flight(bs);
2288     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2289 
2290     if (flags & BDRV_REQ_ZERO_WRITE) {
2291         assert(!padded);
2292         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2293         goto out;
2294     }
2295 
2296     if (padded) {
2297         /*
2298          * Request was unaligned to request_alignment and therefore
2299          * padded.  We are going to do read-modify-write, and must
2300          * serialize the request to prevent interactions of the
2301          * widened region with other transactions.
2302          */
2303         assert(!(flags & BDRV_REQ_NO_WAIT));
2304         bdrv_make_request_serialising(&req, align);
2305         bdrv_padding_rmw_read(child, &req, &pad, false);
2306     }
2307 
2308     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2309                                qiov, qiov_offset, flags);
2310 
2311     bdrv_padding_finalize(&pad);
2312 
2313 out:
2314     tracked_request_end(&req);
2315     bdrv_dec_in_flight(bs);
2316 
2317     return ret;
2318 }
2319 
bdrv_co_pwrite_zeroes(BdrvChild * child,int64_t offset,int64_t bytes,BdrvRequestFlags flags)2320 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2321                                        int64_t bytes, BdrvRequestFlags flags)
2322 {
2323     IO_CODE();
2324     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2325     assert_bdrv_graph_readable();
2326 
2327     return bdrv_co_pwritev(child, offset, bytes, NULL,
2328                            BDRV_REQ_ZERO_WRITE | flags);
2329 }
2330 
2331 /*
2332  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2333  */
bdrv_flush_all(void)2334 int bdrv_flush_all(void)
2335 {
2336     BdrvNextIterator it;
2337     BlockDriverState *bs = NULL;
2338     int result = 0;
2339 
2340     GLOBAL_STATE_CODE();
2341     GRAPH_RDLOCK_GUARD_MAINLOOP();
2342 
2343     /*
2344      * bdrv queue is managed by record/replay,
2345      * creating new flush request for stopping
2346      * the VM may break the determinism
2347      */
2348     if (replay_events_enabled()) {
2349         return result;
2350     }
2351 
2352     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2353         int ret = bdrv_flush(bs);
2354         if (ret < 0 && !result) {
2355             result = ret;
2356         }
2357     }
2358 
2359     return result;
2360 }
2361 
2362 /*
2363  * Returns the allocation status of the specified sectors.
2364  * Drivers not implementing the functionality are assumed to not support
2365  * backing files, hence all their sectors are reported as allocated.
2366  *
2367  * If 'want_zero' is true, the caller is querying for mapping
2368  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2369  * _ZERO where possible; otherwise, the result favors larger 'pnum',
2370  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2371  *
2372  * If 'offset' is beyond the end of the disk image the return value is
2373  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2374  *
2375  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2376  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2377  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2378  *
2379  * 'pnum' is set to the number of bytes (including and immediately
2380  * following the specified offset) that are easily known to be in the
2381  * same allocated/unallocated state.  Note that a second call starting
2382  * at the original offset plus returned pnum may have the same status.
2383  * The returned value is non-zero on success except at end-of-file.
2384  *
2385  * Returns negative errno on failure.  Otherwise, if the
2386  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2387  * set to the host mapping and BDS corresponding to the guest offset.
2388  */
2389 static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)2390 bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
2391                         int64_t offset, int64_t bytes,
2392                         int64_t *pnum, int64_t *map, BlockDriverState **file)
2393 {
2394     int64_t total_size;
2395     int64_t n; /* bytes */
2396     int ret;
2397     int64_t local_map = 0;
2398     BlockDriverState *local_file = NULL;
2399     int64_t aligned_offset, aligned_bytes;
2400     uint32_t align;
2401     bool has_filtered_child;
2402 
2403     assert(pnum);
2404     assert_bdrv_graph_readable();
2405     *pnum = 0;
2406     total_size = bdrv_co_getlength(bs);
2407     if (total_size < 0) {
2408         ret = total_size;
2409         goto early_out;
2410     }
2411 
2412     if (offset >= total_size) {
2413         ret = BDRV_BLOCK_EOF;
2414         goto early_out;
2415     }
2416     if (!bytes) {
2417         ret = 0;
2418         goto early_out;
2419     }
2420 
2421     n = total_size - offset;
2422     if (n < bytes) {
2423         bytes = n;
2424     }
2425 
2426     /* Must be non-NULL or bdrv_co_getlength() would have failed */
2427     assert(bs->drv);
2428     has_filtered_child = bdrv_filter_child(bs);
2429     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2430         *pnum = bytes;
2431         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2432         if (offset + bytes == total_size) {
2433             ret |= BDRV_BLOCK_EOF;
2434         }
2435         if (bs->drv->protocol_name) {
2436             ret |= BDRV_BLOCK_OFFSET_VALID;
2437             local_map = offset;
2438             local_file = bs;
2439         }
2440         goto early_out;
2441     }
2442 
2443     bdrv_inc_in_flight(bs);
2444 
2445     /* Round out to request_alignment boundaries */
2446     align = bs->bl.request_alignment;
2447     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2448     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2449 
2450     if (bs->drv->bdrv_co_block_status) {
2451         /*
2452          * Use the block-status cache only for protocol nodes: Format
2453          * drivers are generally quick to inquire the status, but protocol
2454          * drivers often need to get information from outside of qemu, so
2455          * we do not have control over the actual implementation.  There
2456          * have been cases where inquiring the status took an unreasonably
2457          * long time, and we can do nothing in qemu to fix it.
2458          * This is especially problematic for images with large data areas,
2459          * because finding the few holes in them and giving them special
2460          * treatment does not gain much performance.  Therefore, we try to
2461          * cache the last-identified data region.
2462          *
2463          * Second, limiting ourselves to protocol nodes allows us to assume
2464          * the block status for data regions to be DATA | OFFSET_VALID, and
2465          * that the host offset is the same as the guest offset.
2466          *
2467          * Note that it is possible that external writers zero parts of
2468          * the cached regions without the cache being invalidated, and so
2469          * we may report zeroes as data.  This is not catastrophic,
2470          * however, because reporting zeroes as data is fine.
2471          */
2472         if (QLIST_EMPTY(&bs->children) &&
2473             bdrv_bsc_is_data(bs, aligned_offset, pnum))
2474         {
2475             ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2476             local_file = bs;
2477             local_map = aligned_offset;
2478         } else {
2479             ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2480                                                 aligned_bytes, pnum, &local_map,
2481                                                 &local_file);
2482 
2483             /*
2484              * Note that checking QLIST_EMPTY(&bs->children) is also done when
2485              * the cache is queried above.  Technically, we do not need to check
2486              * it here; the worst that can happen is that we fill the cache for
2487              * non-protocol nodes, and then it is never used.  However, filling
2488              * the cache requires an RCU update, so double check here to avoid
2489              * such an update if possible.
2490              *
2491              * Check want_zero, because we only want to update the cache when we
2492              * have accurate information about what is zero and what is data.
2493              */
2494             if (want_zero &&
2495                 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
2496                 QLIST_EMPTY(&bs->children))
2497             {
2498                 /*
2499                  * When a protocol driver reports BLOCK_OFFSET_VALID, the
2500                  * returned local_map value must be the same as the offset we
2501                  * have passed (aligned_offset), and local_bs must be the node
2502                  * itself.
2503                  * Assert this, because we follow this rule when reading from
2504                  * the cache (see the `local_file = bs` and
2505                  * `local_map = aligned_offset` assignments above), and the
2506                  * result the cache delivers must be the same as the driver
2507                  * would deliver.
2508                  */
2509                 assert(local_file == bs);
2510                 assert(local_map == aligned_offset);
2511                 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2512             }
2513         }
2514     } else {
2515         /* Default code for filters */
2516 
2517         local_file = bdrv_filter_bs(bs);
2518         assert(local_file);
2519 
2520         *pnum = aligned_bytes;
2521         local_map = aligned_offset;
2522         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2523     }
2524     if (ret < 0) {
2525         *pnum = 0;
2526         goto out;
2527     }
2528 
2529     /*
2530      * The driver's result must be a non-zero multiple of request_alignment.
2531      * Clamp pnum and adjust map to original request.
2532      */
2533     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2534            align > offset - aligned_offset);
2535     if (ret & BDRV_BLOCK_RECURSE) {
2536         assert(ret & BDRV_BLOCK_DATA);
2537         assert(ret & BDRV_BLOCK_OFFSET_VALID);
2538         assert(!(ret & BDRV_BLOCK_ZERO));
2539     }
2540 
2541     *pnum -= offset - aligned_offset;
2542     if (*pnum > bytes) {
2543         *pnum = bytes;
2544     }
2545     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2546         local_map += offset - aligned_offset;
2547     }
2548 
2549     if (ret & BDRV_BLOCK_RAW) {
2550         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2551         ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
2552                                       *pnum, pnum, &local_map, &local_file);
2553         goto out;
2554     }
2555 
2556     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2557         ret |= BDRV_BLOCK_ALLOCATED;
2558     } else if (bs->drv->supports_backing) {
2559         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2560 
2561         if (!cow_bs) {
2562             ret |= BDRV_BLOCK_ZERO;
2563         } else if (want_zero) {
2564             int64_t size2 = bdrv_co_getlength(cow_bs);
2565 
2566             if (size2 >= 0 && offset >= size2) {
2567                 ret |= BDRV_BLOCK_ZERO;
2568             }
2569         }
2570     }
2571 
2572     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2573         local_file && local_file != bs &&
2574         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2575         (ret & BDRV_BLOCK_OFFSET_VALID)) {
2576         int64_t file_pnum;
2577         int ret2;
2578 
2579         ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
2580                                        *pnum, &file_pnum, NULL, NULL);
2581         if (ret2 >= 0) {
2582             /* Ignore errors.  This is just providing extra information, it
2583              * is useful but not necessary.
2584              */
2585             if (ret2 & BDRV_BLOCK_EOF &&
2586                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2587                 /*
2588                  * It is valid for the format block driver to read
2589                  * beyond the end of the underlying file's current
2590                  * size; such areas read as zero.
2591                  */
2592                 ret |= BDRV_BLOCK_ZERO;
2593             } else {
2594                 /* Limit request to the range reported by the protocol driver */
2595                 *pnum = file_pnum;
2596                 ret |= (ret2 & BDRV_BLOCK_ZERO);
2597             }
2598         }
2599 
2600         /*
2601          * Now that the recursive search was done, clear the flag. Otherwise,
2602          * with more complicated block graphs like snapshot-access ->
2603          * copy-before-write -> qcow2, where the return value will be propagated
2604          * further up to a parent bdrv_co_do_block_status() call, both the
2605          * BDRV_BLOCK_RECURSE and BDRV_BLOCK_ZERO flags would be set, which is
2606          * not allowed.
2607          */
2608         ret &= ~BDRV_BLOCK_RECURSE;
2609     }
2610 
2611 out:
2612     bdrv_dec_in_flight(bs);
2613     if (ret >= 0 && offset + *pnum == total_size) {
2614         ret |= BDRV_BLOCK_EOF;
2615     }
2616 early_out:
2617     if (file) {
2618         *file = local_file;
2619     }
2620     if (map) {
2621         *map = local_map;
2622     }
2623     return ret;
2624 }
2625 
2626 int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState * bs,BlockDriverState * base,bool include_base,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file,int * depth)2627 bdrv_co_common_block_status_above(BlockDriverState *bs,
2628                                   BlockDriverState *base,
2629                                   bool include_base,
2630                                   bool want_zero,
2631                                   int64_t offset,
2632                                   int64_t bytes,
2633                                   int64_t *pnum,
2634                                   int64_t *map,
2635                                   BlockDriverState **file,
2636                                   int *depth)
2637 {
2638     int ret;
2639     BlockDriverState *p;
2640     int64_t eof = 0;
2641     int dummy;
2642     IO_CODE();
2643 
2644     assert(!include_base || base); /* Can't include NULL base */
2645     assert_bdrv_graph_readable();
2646 
2647     if (!depth) {
2648         depth = &dummy;
2649     }
2650     *depth = 0;
2651 
2652     if (!include_base && bs == base) {
2653         *pnum = bytes;
2654         return 0;
2655     }
2656 
2657     ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
2658                                   map, file);
2659     ++*depth;
2660     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2661         return ret;
2662     }
2663 
2664     if (ret & BDRV_BLOCK_EOF) {
2665         eof = offset + *pnum;
2666     }
2667 
2668     assert(*pnum <= bytes);
2669     bytes = *pnum;
2670 
2671     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2672          p = bdrv_filter_or_cow_bs(p))
2673     {
2674         ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
2675                                       map, file);
2676         ++*depth;
2677         if (ret < 0) {
2678             return ret;
2679         }
2680         if (*pnum == 0) {
2681             /*
2682              * The top layer deferred to this layer, and because this layer is
2683              * short, any zeroes that we synthesize beyond EOF behave as if they
2684              * were allocated at this layer.
2685              *
2686              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2687              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2688              * below.
2689              */
2690             assert(ret & BDRV_BLOCK_EOF);
2691             *pnum = bytes;
2692             if (file) {
2693                 *file = p;
2694             }
2695             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2696             break;
2697         }
2698         if (ret & BDRV_BLOCK_ALLOCATED) {
2699             /*
2700              * We've found the node and the status, we must break.
2701              *
2702              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2703              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2704              * below.
2705              */
2706             ret &= ~BDRV_BLOCK_EOF;
2707             break;
2708         }
2709 
2710         if (p == base) {
2711             assert(include_base);
2712             break;
2713         }
2714 
2715         /*
2716          * OK, [offset, offset + *pnum) region is unallocated on this layer,
2717          * let's continue the diving.
2718          */
2719         assert(*pnum <= bytes);
2720         bytes = *pnum;
2721     }
2722 
2723     if (offset + *pnum == eof) {
2724         ret |= BDRV_BLOCK_EOF;
2725     }
2726 
2727     return ret;
2728 }
2729 
bdrv_co_block_status_above(BlockDriverState * bs,BlockDriverState * base,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)2730 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2731                                             BlockDriverState *base,
2732                                             int64_t offset, int64_t bytes,
2733                                             int64_t *pnum, int64_t *map,
2734                                             BlockDriverState **file)
2735 {
2736     IO_CODE();
2737     return bdrv_co_common_block_status_above(bs, base, false, true, offset,
2738                                              bytes, pnum, map, file, NULL);
2739 }
2740 
bdrv_co_block_status(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)2741 int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
2742                                       int64_t bytes, int64_t *pnum,
2743                                       int64_t *map, BlockDriverState **file)
2744 {
2745     IO_CODE();
2746     return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2747                                       offset, bytes, pnum, map, file);
2748 }
2749 
2750 /*
2751  * Check @bs (and its backing chain) to see if the range defined
2752  * by @offset and @bytes is known to read as zeroes.
2753  * Return 1 if that is the case, 0 otherwise and -errno on error.
2754  * This test is meant to be fast rather than accurate so returning 0
2755  * does not guarantee non-zero data.
2756  */
bdrv_co_is_zero_fast(BlockDriverState * bs,int64_t offset,int64_t bytes)2757 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2758                                       int64_t bytes)
2759 {
2760     int ret;
2761     int64_t pnum = bytes;
2762     IO_CODE();
2763 
2764     if (!bytes) {
2765         return 1;
2766     }
2767 
2768     ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2769                                             bytes, &pnum, NULL, NULL, NULL);
2770 
2771     if (ret < 0) {
2772         return ret;
2773     }
2774 
2775     return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2776 }
2777 
bdrv_co_is_allocated(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum)2778 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
2779                                       int64_t bytes, int64_t *pnum)
2780 {
2781     int ret;
2782     int64_t dummy;
2783     IO_CODE();
2784 
2785     ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
2786                                             bytes, pnum ? pnum : &dummy, NULL,
2787                                             NULL, NULL);
2788     if (ret < 0) {
2789         return ret;
2790     }
2791     return !!(ret & BDRV_BLOCK_ALLOCATED);
2792 }
2793 
2794 /*
2795  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2796  *
2797  * Return a positive depth if (a prefix of) the given range is allocated
2798  * in any image between BASE and TOP (BASE is only included if include_base
2799  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2800  * BASE can be NULL to check if the given offset is allocated in any
2801  * image of the chain.  Return 0 otherwise, or negative errno on
2802  * failure.
2803  *
2804  * 'pnum' is set to the number of bytes (including and immediately
2805  * following the specified offset) that are known to be in the same
2806  * allocated/unallocated state.  Note that a subsequent call starting
2807  * at 'offset + *pnum' may return the same allocation status (in other
2808  * words, the result is not necessarily the maximum possible range);
2809  * but 'pnum' will only be 0 when end of file is reached.
2810  */
bdrv_co_is_allocated_above(BlockDriverState * bs,BlockDriverState * base,bool include_base,int64_t offset,int64_t bytes,int64_t * pnum)2811 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
2812                                             BlockDriverState *base,
2813                                             bool include_base, int64_t offset,
2814                                             int64_t bytes, int64_t *pnum)
2815 {
2816     int depth;
2817     int ret;
2818     IO_CODE();
2819 
2820     ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
2821                                             offset, bytes, pnum, NULL, NULL,
2822                                             &depth);
2823     if (ret < 0) {
2824         return ret;
2825     }
2826 
2827     if (ret & BDRV_BLOCK_ALLOCATED) {
2828         return depth;
2829     }
2830     return 0;
2831 }
2832 
2833 int coroutine_fn
bdrv_co_readv_vmstate(BlockDriverState * bs,QEMUIOVector * qiov,int64_t pos)2834 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2835 {
2836     BlockDriver *drv = bs->drv;
2837     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2838     int ret;
2839     IO_CODE();
2840     assert_bdrv_graph_readable();
2841 
2842     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2843     if (ret < 0) {
2844         return ret;
2845     }
2846 
2847     if (!drv) {
2848         return -ENOMEDIUM;
2849     }
2850 
2851     bdrv_inc_in_flight(bs);
2852 
2853     if (drv->bdrv_co_load_vmstate) {
2854         ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2855     } else if (child_bs) {
2856         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2857     } else {
2858         ret = -ENOTSUP;
2859     }
2860 
2861     bdrv_dec_in_flight(bs);
2862 
2863     return ret;
2864 }
2865 
2866 int coroutine_fn
bdrv_co_writev_vmstate(BlockDriverState * bs,QEMUIOVector * qiov,int64_t pos)2867 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2868 {
2869     BlockDriver *drv = bs->drv;
2870     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2871     int ret;
2872     IO_CODE();
2873     assert_bdrv_graph_readable();
2874 
2875     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2876     if (ret < 0) {
2877         return ret;
2878     }
2879 
2880     if (!drv) {
2881         return -ENOMEDIUM;
2882     }
2883 
2884     bdrv_inc_in_flight(bs);
2885 
2886     if (drv->bdrv_co_save_vmstate) {
2887         ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2888     } else if (child_bs) {
2889         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2890     } else {
2891         ret = -ENOTSUP;
2892     }
2893 
2894     bdrv_dec_in_flight(bs);
2895 
2896     return ret;
2897 }
2898 
bdrv_save_vmstate(BlockDriverState * bs,const uint8_t * buf,int64_t pos,int size)2899 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2900                       int64_t pos, int size)
2901 {
2902     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2903     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2904     IO_CODE();
2905 
2906     return ret < 0 ? ret : size;
2907 }
2908 
bdrv_load_vmstate(BlockDriverState * bs,uint8_t * buf,int64_t pos,int size)2909 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2910                       int64_t pos, int size)
2911 {
2912     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2913     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2914     IO_CODE();
2915 
2916     return ret < 0 ? ret : size;
2917 }
2918 
2919 /**************************************************************/
2920 /* async I/Os */
2921 
2922 /**
2923  * Synchronously cancels an acb. Must be called with the BQL held and the acb
2924  * must be processed with the BQL held too (IOThreads are not allowed).
2925  *
2926  * Use bdrv_aio_cancel_async() instead when possible.
2927  */
bdrv_aio_cancel(BlockAIOCB * acb)2928 void bdrv_aio_cancel(BlockAIOCB *acb)
2929 {
2930     GLOBAL_STATE_CODE();
2931     qemu_aio_ref(acb);
2932     bdrv_aio_cancel_async(acb);
2933     AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1);
2934     qemu_aio_unref(acb);
2935 }
2936 
2937 /* Async version of aio cancel. The caller is not blocked if the acb implements
2938  * cancel_async, otherwise we do nothing and let the request normally complete.
2939  * In either case the completion callback must be called. */
bdrv_aio_cancel_async(BlockAIOCB * acb)2940 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2941 {
2942     IO_CODE();
2943     if (acb->aiocb_info->cancel_async) {
2944         acb->aiocb_info->cancel_async(acb);
2945     }
2946 }
2947 
2948 /**************************************************************/
2949 /* Coroutine block device emulation */
2950 
bdrv_co_flush(BlockDriverState * bs)2951 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2952 {
2953     BdrvChild *primary_child = bdrv_primary_child(bs);
2954     BdrvChild *child;
2955     int current_gen;
2956     int ret = 0;
2957     IO_CODE();
2958 
2959     assert_bdrv_graph_readable();
2960     bdrv_inc_in_flight(bs);
2961 
2962     if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
2963         bdrv_is_sg(bs)) {
2964         goto early_exit;
2965     }
2966 
2967     qemu_mutex_lock(&bs->reqs_lock);
2968     current_gen = qatomic_read(&bs->write_gen);
2969 
2970     /* Wait until any previous flushes are completed */
2971     while (bs->active_flush_req) {
2972         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2973     }
2974 
2975     /* Flushes reach this point in nondecreasing current_gen order.  */
2976     bs->active_flush_req = true;
2977     qemu_mutex_unlock(&bs->reqs_lock);
2978 
2979     /* Write back all layers by calling one driver function */
2980     if (bs->drv->bdrv_co_flush) {
2981         ret = bs->drv->bdrv_co_flush(bs);
2982         goto out;
2983     }
2984 
2985     /* Write back cached data to the OS even with cache=unsafe */
2986     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2987     if (bs->drv->bdrv_co_flush_to_os) {
2988         ret = bs->drv->bdrv_co_flush_to_os(bs);
2989         if (ret < 0) {
2990             goto out;
2991         }
2992     }
2993 
2994     /* But don't actually force it to the disk with cache=unsafe */
2995     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2996         goto flush_children;
2997     }
2998 
2999     /* Check if we really need to flush anything */
3000     if (bs->flushed_gen == current_gen) {
3001         goto flush_children;
3002     }
3003 
3004     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
3005     if (!bs->drv) {
3006         /* bs->drv->bdrv_co_flush() might have ejected the BDS
3007          * (even in case of apparent success) */
3008         ret = -ENOMEDIUM;
3009         goto out;
3010     }
3011     if (bs->drv->bdrv_co_flush_to_disk) {
3012         ret = bs->drv->bdrv_co_flush_to_disk(bs);
3013     } else if (bs->drv->bdrv_aio_flush) {
3014         BlockAIOCB *acb;
3015         CoroutineIOCompletion co = {
3016             .coroutine = qemu_coroutine_self(),
3017         };
3018 
3019         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3020         if (acb == NULL) {
3021             ret = -EIO;
3022         } else {
3023             qemu_coroutine_yield();
3024             ret = co.ret;
3025         }
3026     } else {
3027         /*
3028          * Some block drivers always operate in either writethrough or unsafe
3029          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3030          * know how the server works (because the behaviour is hardcoded or
3031          * depends on server-side configuration), so we can't ensure that
3032          * everything is safe on disk. Returning an error doesn't work because
3033          * that would break guests even if the server operates in writethrough
3034          * mode.
3035          *
3036          * Let's hope the user knows what he's doing.
3037          */
3038         ret = 0;
3039     }
3040 
3041     if (ret < 0) {
3042         goto out;
3043     }
3044 
3045     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
3046      * in the case of cache=unsafe, so there are no useless flushes.
3047      */
3048 flush_children:
3049     ret = 0;
3050     QLIST_FOREACH(child, &bs->children, next) {
3051         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3052             int this_child_ret = bdrv_co_flush(child->bs);
3053             if (!ret) {
3054                 ret = this_child_ret;
3055             }
3056         }
3057     }
3058 
3059 out:
3060     /* Notify any pending flushes that we have completed */
3061     if (ret == 0) {
3062         bs->flushed_gen = current_gen;
3063     }
3064 
3065     qemu_mutex_lock(&bs->reqs_lock);
3066     bs->active_flush_req = false;
3067     /* Return value is ignored - it's ok if wait queue is empty */
3068     qemu_co_queue_next(&bs->flush_queue);
3069     qemu_mutex_unlock(&bs->reqs_lock);
3070 
3071 early_exit:
3072     bdrv_dec_in_flight(bs);
3073     return ret;
3074 }
3075 
bdrv_co_pdiscard(BdrvChild * child,int64_t offset,int64_t bytes)3076 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3077                                   int64_t bytes)
3078 {
3079     BdrvTrackedRequest req;
3080     int ret;
3081     int64_t max_pdiscard;
3082     int head, tail, align;
3083     BlockDriverState *bs = child->bs;
3084     IO_CODE();
3085     assert_bdrv_graph_readable();
3086 
3087     if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
3088         return -ENOMEDIUM;
3089     }
3090 
3091     if (bdrv_has_readonly_bitmaps(bs)) {
3092         return -EPERM;
3093     }
3094 
3095     ret = bdrv_check_request(offset, bytes, NULL);
3096     if (ret < 0) {
3097         return ret;
3098     }
3099 
3100     /* Do nothing if disabled.  */
3101     if (!(bs->open_flags & BDRV_O_UNMAP)) {
3102         return 0;
3103     }
3104 
3105     if (!bs->drv->bdrv_co_pdiscard) {
3106         return 0;
3107     }
3108 
3109     /* Invalidate the cached block-status data range if this discard overlaps */
3110     bdrv_bsc_invalidate_range(bs, offset, bytes);
3111 
3112     /*
3113      * Discard is advisory, but some devices track and coalesce
3114      * unaligned requests, so we must pass everything down rather than
3115      * round here.  Still, most devices reject unaligned requests with
3116      * -EINVAL or -ENOTSUP, so we must fragment the request accordingly.
3117      */
3118     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3119     assert(align % bs->bl.request_alignment == 0);
3120     head = offset % align;
3121     tail = (offset + bytes) % align;
3122 
3123     bdrv_inc_in_flight(bs);
3124     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3125 
3126     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3127     if (ret < 0) {
3128         goto out;
3129     }
3130 
3131     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
3132                                    align);
3133     assert(max_pdiscard >= bs->bl.request_alignment);
3134 
3135     while (bytes > 0) {
3136         int64_t num = bytes;
3137 
3138         if (head) {
3139             /* Make small requests to get to alignment boundaries. */
3140             num = MIN(bytes, align - head);
3141             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3142                 num %= bs->bl.request_alignment;
3143             }
3144             head = (head + num) % align;
3145             assert(num < max_pdiscard);
3146         } else if (tail) {
3147             if (num > align) {
3148                 /* Shorten the request to the last aligned cluster.  */
3149                 num -= tail;
3150             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3151                        tail > bs->bl.request_alignment) {
3152                 tail %= bs->bl.request_alignment;
3153                 num -= tail;
3154             }
3155         }
3156         /* limit request size */
3157         if (num > max_pdiscard) {
3158             num = max_pdiscard;
3159         }
3160 
3161         if (!bs->drv) {
3162             ret = -ENOMEDIUM;
3163             goto out;
3164         }
3165 
3166         ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3167         if (ret && ret != -ENOTSUP) {
3168             if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) {
3169                 /* Silently skip rejected unaligned head/tail requests */
3170             } else {
3171                 goto out; /* bail out */
3172             }
3173         }
3174 
3175         offset += num;
3176         bytes -= num;
3177     }
3178     ret = 0;
3179 out:
3180     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3181     tracked_request_end(&req);
3182     bdrv_dec_in_flight(bs);
3183     return ret;
3184 }
3185 
bdrv_co_ioctl(BlockDriverState * bs,int req,void * buf)3186 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3187 {
3188     BlockDriver *drv = bs->drv;
3189     CoroutineIOCompletion co = {
3190         .coroutine = qemu_coroutine_self(),
3191     };
3192     BlockAIOCB *acb;
3193     IO_CODE();
3194     assert_bdrv_graph_readable();
3195 
3196     bdrv_inc_in_flight(bs);
3197     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3198         co.ret = -ENOTSUP;
3199         goto out;
3200     }
3201 
3202     if (drv->bdrv_co_ioctl) {
3203         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3204     } else {
3205         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3206         if (!acb) {
3207             co.ret = -ENOTSUP;
3208             goto out;
3209         }
3210         qemu_coroutine_yield();
3211     }
3212 out:
3213     bdrv_dec_in_flight(bs);
3214     return co.ret;
3215 }
3216 
bdrv_co_zone_report(BlockDriverState * bs,int64_t offset,unsigned int * nr_zones,BlockZoneDescriptor * zones)3217 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
3218                         unsigned int *nr_zones,
3219                         BlockZoneDescriptor *zones)
3220 {
3221     BlockDriver *drv = bs->drv;
3222     CoroutineIOCompletion co = {
3223             .coroutine = qemu_coroutine_self(),
3224     };
3225     IO_CODE();
3226 
3227     bdrv_inc_in_flight(bs);
3228     if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
3229         co.ret = -ENOTSUP;
3230         goto out;
3231     }
3232     co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
3233 out:
3234     bdrv_dec_in_flight(bs);
3235     return co.ret;
3236 }
3237 
bdrv_co_zone_mgmt(BlockDriverState * bs,BlockZoneOp op,int64_t offset,int64_t len)3238 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
3239         int64_t offset, int64_t len)
3240 {
3241     BlockDriver *drv = bs->drv;
3242     CoroutineIOCompletion co = {
3243             .coroutine = qemu_coroutine_self(),
3244     };
3245     IO_CODE();
3246 
3247     bdrv_inc_in_flight(bs);
3248     if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
3249         co.ret = -ENOTSUP;
3250         goto out;
3251     }
3252     co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
3253 out:
3254     bdrv_dec_in_flight(bs);
3255     return co.ret;
3256 }
3257 
bdrv_co_zone_append(BlockDriverState * bs,int64_t * offset,QEMUIOVector * qiov,BdrvRequestFlags flags)3258 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
3259                         QEMUIOVector *qiov,
3260                         BdrvRequestFlags flags)
3261 {
3262     int ret;
3263     BlockDriver *drv = bs->drv;
3264     CoroutineIOCompletion co = {
3265             .coroutine = qemu_coroutine_self(),
3266     };
3267     IO_CODE();
3268 
3269     ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
3270     if (ret < 0) {
3271         return ret;
3272     }
3273 
3274     bdrv_inc_in_flight(bs);
3275     if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
3276         co.ret = -ENOTSUP;
3277         goto out;
3278     }
3279     co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
3280 out:
3281     bdrv_dec_in_flight(bs);
3282     return co.ret;
3283 }
3284 
qemu_blockalign(BlockDriverState * bs,size_t size)3285 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3286 {
3287     IO_CODE();
3288     return qemu_memalign(bdrv_opt_mem_align(bs), size);
3289 }
3290 
qemu_blockalign0(BlockDriverState * bs,size_t size)3291 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3292 {
3293     IO_CODE();
3294     return memset(qemu_blockalign(bs, size), 0, size);
3295 }
3296 
qemu_try_blockalign(BlockDriverState * bs,size_t size)3297 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3298 {
3299     size_t align = bdrv_opt_mem_align(bs);
3300     IO_CODE();
3301 
3302     /* Ensure that NULL is never returned on success */
3303     assert(align > 0);
3304     if (size == 0) {
3305         size = align;
3306     }
3307 
3308     return qemu_try_memalign(align, size);
3309 }
3310 
qemu_try_blockalign0(BlockDriverState * bs,size_t size)3311 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3312 {
3313     void *mem = qemu_try_blockalign(bs, size);
3314     IO_CODE();
3315 
3316     if (mem) {
3317         memset(mem, 0, size);
3318     }
3319 
3320     return mem;
3321 }
3322 
3323 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3324 static void GRAPH_RDLOCK
bdrv_register_buf_rollback(BlockDriverState * bs,void * host,size_t size,BdrvChild * final_child)3325 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3326                            BdrvChild *final_child)
3327 {
3328     BdrvChild *child;
3329 
3330     GLOBAL_STATE_CODE();
3331     assert_bdrv_graph_readable();
3332 
3333     QLIST_FOREACH(child, &bs->children, next) {
3334         if (child == final_child) {
3335             break;
3336         }
3337 
3338         bdrv_unregister_buf(child->bs, host, size);
3339     }
3340 
3341     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3342         bs->drv->bdrv_unregister_buf(bs, host, size);
3343     }
3344 }
3345 
bdrv_register_buf(BlockDriverState * bs,void * host,size_t size,Error ** errp)3346 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3347                        Error **errp)
3348 {
3349     BdrvChild *child;
3350 
3351     GLOBAL_STATE_CODE();
3352     GRAPH_RDLOCK_GUARD_MAINLOOP();
3353 
3354     if (bs->drv && bs->drv->bdrv_register_buf) {
3355         if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3356             return false;
3357         }
3358     }
3359     QLIST_FOREACH(child, &bs->children, next) {
3360         if (!bdrv_register_buf(child->bs, host, size, errp)) {
3361             bdrv_register_buf_rollback(bs, host, size, child);
3362             return false;
3363         }
3364     }
3365     return true;
3366 }
3367 
bdrv_unregister_buf(BlockDriverState * bs,void * host,size_t size)3368 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
3369 {
3370     BdrvChild *child;
3371 
3372     GLOBAL_STATE_CODE();
3373     GRAPH_RDLOCK_GUARD_MAINLOOP();
3374 
3375     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3376         bs->drv->bdrv_unregister_buf(bs, host, size);
3377     }
3378     QLIST_FOREACH(child, &bs->children, next) {
3379         bdrv_unregister_buf(child->bs, host, size);
3380     }
3381 }
3382 
bdrv_co_copy_range_internal(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags,bool recurse_src)3383 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3384         BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3385         int64_t dst_offset, int64_t bytes,
3386         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3387         bool recurse_src)
3388 {
3389     BdrvTrackedRequest req;
3390     int ret;
3391     assert_bdrv_graph_readable();
3392 
3393     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3394     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3395     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3396     assert(!(read_flags & BDRV_REQ_NO_WAIT));
3397     assert(!(write_flags & BDRV_REQ_NO_WAIT));
3398 
3399     if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3400         return -ENOMEDIUM;
3401     }
3402     ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3403     if (ret) {
3404         return ret;
3405     }
3406     if (write_flags & BDRV_REQ_ZERO_WRITE) {
3407         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3408     }
3409 
3410     if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3411         return -ENOMEDIUM;
3412     }
3413     ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3414     if (ret) {
3415         return ret;
3416     }
3417 
3418     if (!src->bs->drv->bdrv_co_copy_range_from
3419         || !dst->bs->drv->bdrv_co_copy_range_to
3420         || src->bs->encrypted || dst->bs->encrypted) {
3421         return -ENOTSUP;
3422     }
3423 
3424     if (recurse_src) {
3425         bdrv_inc_in_flight(src->bs);
3426         tracked_request_begin(&req, src->bs, src_offset, bytes,
3427                               BDRV_TRACKED_READ);
3428 
3429         /* BDRV_REQ_SERIALISING is only for write operation */
3430         assert(!(read_flags & BDRV_REQ_SERIALISING));
3431         bdrv_wait_serialising_requests(&req);
3432 
3433         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3434                                                     src, src_offset,
3435                                                     dst, dst_offset,
3436                                                     bytes,
3437                                                     read_flags, write_flags);
3438 
3439         tracked_request_end(&req);
3440         bdrv_dec_in_flight(src->bs);
3441     } else {
3442         bdrv_inc_in_flight(dst->bs);
3443         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3444                               BDRV_TRACKED_WRITE);
3445         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3446                                         write_flags);
3447         if (!ret) {
3448             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3449                                                       src, src_offset,
3450                                                       dst, dst_offset,
3451                                                       bytes,
3452                                                       read_flags, write_flags);
3453         }
3454         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3455         tracked_request_end(&req);
3456         bdrv_dec_in_flight(dst->bs);
3457     }
3458 
3459     return ret;
3460 }
3461 
3462 /* Copy range from @src to @dst.
3463  *
3464  * See the comment of bdrv_co_copy_range for the parameter and return value
3465  * semantics. */
bdrv_co_copy_range_from(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3466 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3467                                          BdrvChild *dst, int64_t dst_offset,
3468                                          int64_t bytes,
3469                                          BdrvRequestFlags read_flags,
3470                                          BdrvRequestFlags write_flags)
3471 {
3472     IO_CODE();
3473     assert_bdrv_graph_readable();
3474     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3475                                   read_flags, write_flags);
3476     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3477                                        bytes, read_flags, write_flags, true);
3478 }
3479 
3480 /* Copy range from @src to @dst.
3481  *
3482  * See the comment of bdrv_co_copy_range for the parameter and return value
3483  * semantics. */
bdrv_co_copy_range_to(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3484 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3485                                        BdrvChild *dst, int64_t dst_offset,
3486                                        int64_t bytes,
3487                                        BdrvRequestFlags read_flags,
3488                                        BdrvRequestFlags write_flags)
3489 {
3490     IO_CODE();
3491     assert_bdrv_graph_readable();
3492     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3493                                 read_flags, write_flags);
3494     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3495                                        bytes, read_flags, write_flags, false);
3496 }
3497 
bdrv_co_copy_range(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3498 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3499                                     BdrvChild *dst, int64_t dst_offset,
3500                                     int64_t bytes, BdrvRequestFlags read_flags,
3501                                     BdrvRequestFlags write_flags)
3502 {
3503     IO_CODE();
3504     assert_bdrv_graph_readable();
3505 
3506     return bdrv_co_copy_range_from(src, src_offset,
3507                                    dst, dst_offset,
3508                                    bytes, read_flags, write_flags);
3509 }
3510 
3511 static void coroutine_fn GRAPH_RDLOCK
bdrv_parent_cb_resize(BlockDriverState * bs)3512 bdrv_parent_cb_resize(BlockDriverState *bs)
3513 {
3514     BdrvChild *c;
3515 
3516     assert_bdrv_graph_readable();
3517 
3518     QLIST_FOREACH(c, &bs->parents, next_parent) {
3519         if (c->klass->resize) {
3520             c->klass->resize(c);
3521         }
3522     }
3523 }
3524 
3525 /**
3526  * Truncate file to 'offset' bytes (needed only for file protocols)
3527  *
3528  * If 'exact' is true, the file must be resized to exactly the given
3529  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3530  * 'offset' bytes in length.
3531  */
bdrv_co_truncate(BdrvChild * child,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)3532 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3533                                   PreallocMode prealloc, BdrvRequestFlags flags,
3534                                   Error **errp)
3535 {
3536     BlockDriverState *bs = child->bs;
3537     BdrvChild *filtered, *backing;
3538     BlockDriver *drv = bs->drv;
3539     BdrvTrackedRequest req;
3540     int64_t old_size, new_bytes;
3541     int ret;
3542     IO_CODE();
3543     assert_bdrv_graph_readable();
3544 
3545     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3546     if (!drv) {
3547         error_setg(errp, "No medium inserted");
3548         return -ENOMEDIUM;
3549     }
3550     if (offset < 0) {
3551         error_setg(errp, "Image size cannot be negative");
3552         return -EINVAL;
3553     }
3554 
3555     ret = bdrv_check_request(offset, 0, errp);
3556     if (ret < 0) {
3557         return ret;
3558     }
3559 
3560     old_size = bdrv_co_getlength(bs);
3561     if (old_size < 0) {
3562         error_setg_errno(errp, -old_size, "Failed to get old image size");
3563         return old_size;
3564     }
3565 
3566     if (bdrv_is_read_only(bs)) {
3567         error_setg(errp, "Image is read-only");
3568         return -EACCES;
3569     }
3570 
3571     if (offset > old_size) {
3572         new_bytes = offset - old_size;
3573     } else {
3574         new_bytes = 0;
3575     }
3576 
3577     bdrv_inc_in_flight(bs);
3578     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3579                           BDRV_TRACKED_TRUNCATE);
3580 
3581     /* If we are growing the image and potentially using preallocation for the
3582      * new area, we need to make sure that no write requests are made to it
3583      * concurrently or they might be overwritten by preallocation. */
3584     if (new_bytes) {
3585         bdrv_make_request_serialising(&req, 1);
3586     }
3587     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3588                                     0);
3589     if (ret < 0) {
3590         error_setg_errno(errp, -ret,
3591                          "Failed to prepare request for truncation");
3592         goto out;
3593     }
3594 
3595     filtered = bdrv_filter_child(bs);
3596     backing = bdrv_cow_child(bs);
3597 
3598     /*
3599      * If the image has a backing file that is large enough that it would
3600      * provide data for the new area, we cannot leave it unallocated because
3601      * then the backing file content would become visible. Instead, zero-fill
3602      * the new area.
3603      *
3604      * Note that if the image has a backing file, but was opened without the
3605      * backing file, taking care of keeping things consistent with that backing
3606      * file is the user's responsibility.
3607      */
3608     if (new_bytes && backing) {
3609         int64_t backing_len;
3610 
3611         backing_len = bdrv_co_getlength(backing->bs);
3612         if (backing_len < 0) {
3613             ret = backing_len;
3614             error_setg_errno(errp, -ret, "Could not get backing file size");
3615             goto out;
3616         }
3617 
3618         if (backing_len > old_size) {
3619             flags |= BDRV_REQ_ZERO_WRITE;
3620         }
3621     }
3622 
3623     if (drv->bdrv_co_truncate) {
3624         if (flags & ~bs->supported_truncate_flags) {
3625             error_setg(errp, "Block driver does not support requested flags");
3626             ret = -ENOTSUP;
3627             goto out;
3628         }
3629         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3630     } else if (filtered) {
3631         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3632     } else {
3633         error_setg(errp, "Image format driver does not support resize");
3634         ret = -ENOTSUP;
3635         goto out;
3636     }
3637     if (ret < 0) {
3638         goto out;
3639     }
3640 
3641     ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3642     if (ret < 0) {
3643         error_setg_errno(errp, -ret, "Could not refresh total sector count");
3644     } else {
3645         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3646     }
3647     /*
3648      * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3649      * failed, but the latter doesn't affect how we should finish the request.
3650      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3651      */
3652     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3653 
3654 out:
3655     tracked_request_end(&req);
3656     bdrv_dec_in_flight(bs);
3657 
3658     return ret;
3659 }
3660 
bdrv_cancel_in_flight(BlockDriverState * bs)3661 void bdrv_cancel_in_flight(BlockDriverState *bs)
3662 {
3663     GLOBAL_STATE_CODE();
3664     GRAPH_RDLOCK_GUARD_MAINLOOP();
3665 
3666     if (!bs || !bs->drv) {
3667         return;
3668     }
3669 
3670     if (bs->drv->bdrv_cancel_in_flight) {
3671         bs->drv->bdrv_cancel_in_flight(bs);
3672     }
3673 }
3674 
3675 int coroutine_fn
bdrv_co_preadv_snapshot(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)3676 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3677                         QEMUIOVector *qiov, size_t qiov_offset)
3678 {
3679     BlockDriverState *bs = child->bs;
3680     BlockDriver *drv = bs->drv;
3681     int ret;
3682     IO_CODE();
3683     assert_bdrv_graph_readable();
3684 
3685     if (!drv) {
3686         return -ENOMEDIUM;
3687     }
3688 
3689     if (!drv->bdrv_co_preadv_snapshot) {
3690         return -ENOTSUP;
3691     }
3692 
3693     bdrv_inc_in_flight(bs);
3694     ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3695     bdrv_dec_in_flight(bs);
3696 
3697     return ret;
3698 }
3699 
3700 int coroutine_fn
bdrv_co_snapshot_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)3701 bdrv_co_snapshot_block_status(BlockDriverState *bs,
3702                               bool want_zero, int64_t offset, int64_t bytes,
3703                               int64_t *pnum, int64_t *map,
3704                               BlockDriverState **file)
3705 {
3706     BlockDriver *drv = bs->drv;
3707     int ret;
3708     IO_CODE();
3709     assert_bdrv_graph_readable();
3710 
3711     if (!drv) {
3712         return -ENOMEDIUM;
3713     }
3714 
3715     if (!drv->bdrv_co_snapshot_block_status) {
3716         return -ENOTSUP;
3717     }
3718 
3719     bdrv_inc_in_flight(bs);
3720     ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3721                                              pnum, map, file);
3722     bdrv_dec_in_flight(bs);
3723 
3724     return ret;
3725 }
3726 
3727 int coroutine_fn
bdrv_co_pdiscard_snapshot(BlockDriverState * bs,int64_t offset,int64_t bytes)3728 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3729 {
3730     BlockDriver *drv = bs->drv;
3731     int ret;
3732     IO_CODE();
3733     assert_bdrv_graph_readable();
3734 
3735     if (!drv) {
3736         return -ENOMEDIUM;
3737     }
3738 
3739     if (!drv->bdrv_co_pdiscard_snapshot) {
3740         return -ENOTSUP;
3741     }
3742 
3743     bdrv_inc_in_flight(bs);
3744     ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3745     bdrv_dec_in_flight(bs);
3746 
3747     return ret;
3748 }
3749