xref: /qemu/block/io.c (revision 86c54a3a418e462e67444ac4db25b2757fd62079)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "system/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "system/replay.h"
41 #include "qemu/units.h"
42 
43 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
44 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
45 
46 /* Maximum read size for checking if data reads as zero, in bytes */
47 #define MAX_ZERO_CHECK_BUFFER (128 * KiB)
48 
49 static void coroutine_fn GRAPH_RDLOCK
50 bdrv_parent_cb_resize(BlockDriverState *bs);
51 
52 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
53     int64_t offset, int64_t bytes, BdrvRequestFlags flags);
54 
55 static void GRAPH_RDLOCK
56 bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
57 {
58     BdrvChild *c, *next;
59     IO_OR_GS_CODE();
60     assert_bdrv_graph_readable();
61 
62     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
63         if (c == ignore) {
64             continue;
65         }
66         bdrv_parent_drained_begin_single(c);
67     }
68 }
69 
70 void bdrv_parent_drained_end_single(BdrvChild *c)
71 {
72     GLOBAL_STATE_CODE();
73 
74     assert(c->quiesced_parent);
75     c->quiesced_parent = false;
76 
77     if (c->klass->drained_end) {
78         c->klass->drained_end(c);
79     }
80 }
81 
82 static void GRAPH_RDLOCK
83 bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
84 {
85     BdrvChild *c;
86     IO_OR_GS_CODE();
87     assert_bdrv_graph_readable();
88 
89     QLIST_FOREACH(c, &bs->parents, next_parent) {
90         if (c == ignore) {
91             continue;
92         }
93         bdrv_parent_drained_end_single(c);
94     }
95 }
96 
97 bool bdrv_parent_drained_poll_single(BdrvChild *c)
98 {
99     IO_OR_GS_CODE();
100 
101     if (c->klass->drained_poll) {
102         return c->klass->drained_poll(c);
103     }
104     return false;
105 }
106 
107 static bool GRAPH_RDLOCK
108 bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
109                          bool ignore_bds_parents)
110 {
111     BdrvChild *c, *next;
112     bool busy = false;
113     IO_OR_GS_CODE();
114     assert_bdrv_graph_readable();
115 
116     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
117         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
118             continue;
119         }
120         busy |= bdrv_parent_drained_poll_single(c);
121     }
122 
123     return busy;
124 }
125 
126 void bdrv_parent_drained_begin_single(BdrvChild *c)
127 {
128     GLOBAL_STATE_CODE();
129 
130     assert(!c->quiesced_parent);
131     c->quiesced_parent = true;
132 
133     if (c->klass->drained_begin) {
134         /* called with rdlock taken, but it doesn't really need it. */
135         c->klass->drained_begin(c);
136     }
137 }
138 
139 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
140 {
141     dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
142                                   src->pdiscard_alignment);
143     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
144     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
145     dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
146                                         src->max_hw_transfer);
147     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
148                                  src->opt_mem_alignment);
149     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
150                                  src->min_mem_alignment);
151     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
152     dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
153 }
154 
155 typedef struct BdrvRefreshLimitsState {
156     BlockDriverState *bs;
157     BlockLimits old_bl;
158 } BdrvRefreshLimitsState;
159 
160 static void bdrv_refresh_limits_abort(void *opaque)
161 {
162     BdrvRefreshLimitsState *s = opaque;
163 
164     s->bs->bl = s->old_bl;
165 }
166 
167 static TransactionActionDrv bdrv_refresh_limits_drv = {
168     .abort = bdrv_refresh_limits_abort,
169     .clean = g_free,
170 };
171 
172 /* @tran is allowed to be NULL, in this case no rollback is possible. */
173 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
174 {
175     ERRP_GUARD();
176     BlockDriver *drv = bs->drv;
177     BdrvChild *c;
178     bool have_limits;
179 
180     GLOBAL_STATE_CODE();
181 
182     if (tran) {
183         BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
184         *s = (BdrvRefreshLimitsState) {
185             .bs = bs,
186             .old_bl = bs->bl,
187         };
188         tran_add(tran, &bdrv_refresh_limits_drv, s);
189     }
190 
191     memset(&bs->bl, 0, sizeof(bs->bl));
192 
193     if (!drv) {
194         return;
195     }
196 
197     /* Default alignment based on whether driver has byte interface */
198     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
199                                 drv->bdrv_aio_preadv ||
200                                 drv->bdrv_co_preadv_part) ? 1 : 512;
201 
202     /* Take some limits from the children as a default */
203     have_limits = false;
204     QLIST_FOREACH(c, &bs->children, next) {
205         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
206         {
207             bdrv_merge_limits(&bs->bl, &c->bs->bl);
208             have_limits = true;
209         }
210 
211         if (c->role & BDRV_CHILD_FILTERED) {
212             bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
213         }
214     }
215 
216     if (!have_limits) {
217         bs->bl.min_mem_alignment = 512;
218         bs->bl.opt_mem_alignment = qemu_real_host_page_size();
219 
220         /* Safe default since most protocols use readv()/writev()/etc */
221         bs->bl.max_iov = IOV_MAX;
222     }
223 
224     /* Then let the driver override it */
225     if (drv->bdrv_refresh_limits) {
226         drv->bdrv_refresh_limits(bs, errp);
227         if (*errp) {
228             return;
229         }
230     }
231 
232     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
233         error_setg(errp, "Driver requires too large request alignment");
234     }
235 }
236 
237 /**
238  * The copy-on-read flag is actually a reference count so multiple users may
239  * use the feature without worrying about clobbering its previous state.
240  * Copy-on-read stays enabled until all users have called to disable it.
241  */
242 void bdrv_enable_copy_on_read(BlockDriverState *bs)
243 {
244     IO_CODE();
245     qatomic_inc(&bs->copy_on_read);
246 }
247 
248 void bdrv_disable_copy_on_read(BlockDriverState *bs)
249 {
250     int old = qatomic_fetch_dec(&bs->copy_on_read);
251     IO_CODE();
252     assert(old >= 1);
253 }
254 
255 typedef struct {
256     Coroutine *co;
257     BlockDriverState *bs;
258     bool done;
259     bool begin;
260     bool poll;
261     BdrvChild *parent;
262 } BdrvCoDrainData;
263 
264 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
265 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
266                      bool ignore_bds_parents)
267 {
268     GLOBAL_STATE_CODE();
269 
270     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
271         return true;
272     }
273 
274     if (qatomic_read(&bs->in_flight)) {
275         return true;
276     }
277 
278     return false;
279 }
280 
281 static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
282                                       BdrvChild *ignore_parent)
283 {
284     GLOBAL_STATE_CODE();
285     GRAPH_RDLOCK_GUARD_MAINLOOP();
286 
287     return bdrv_drain_poll(bs, ignore_parent, false);
288 }
289 
290 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
291                                   bool poll);
292 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
293 
294 static void bdrv_co_drain_bh_cb(void *opaque)
295 {
296     BdrvCoDrainData *data = opaque;
297     Coroutine *co = data->co;
298     BlockDriverState *bs = data->bs;
299 
300     if (bs) {
301         bdrv_dec_in_flight(bs);
302         if (data->begin) {
303             bdrv_do_drained_begin(bs, data->parent, data->poll);
304         } else {
305             assert(!data->poll);
306             bdrv_do_drained_end(bs, data->parent);
307         }
308     } else {
309         assert(data->begin);
310         bdrv_drain_all_begin();
311     }
312 
313     data->done = true;
314     aio_co_wake(co);
315 }
316 
317 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
318                                                 bool begin,
319                                                 BdrvChild *parent,
320                                                 bool poll)
321 {
322     BdrvCoDrainData data;
323     Coroutine *self = qemu_coroutine_self();
324 
325     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
326      * other coroutines run if they were queued by aio_co_enter(). */
327 
328     assert(qemu_in_coroutine());
329     data = (BdrvCoDrainData) {
330         .co = self,
331         .bs = bs,
332         .done = false,
333         .begin = begin,
334         .parent = parent,
335         .poll = poll,
336     };
337 
338     if (bs) {
339         bdrv_inc_in_flight(bs);
340     }
341 
342     replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
343                                      bdrv_co_drain_bh_cb, &data);
344 
345     qemu_coroutine_yield();
346     /* If we are resumed from some other event (such as an aio completion or a
347      * timer callback), it is a bug in the caller that should be fixed. */
348     assert(data.done);
349 }
350 
351 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
352                                   bool poll)
353 {
354     IO_OR_GS_CODE();
355 
356     if (qemu_in_coroutine()) {
357         bdrv_co_yield_to_drain(bs, true, parent, poll);
358         return;
359     }
360 
361     GLOBAL_STATE_CODE();
362 
363     /* Stop things in parent-to-child order */
364     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
365         GRAPH_RDLOCK_GUARD_MAINLOOP();
366         bdrv_parent_drained_begin(bs, parent);
367         if (bs->drv && bs->drv->bdrv_drain_begin) {
368             bs->drv->bdrv_drain_begin(bs);
369         }
370     }
371 
372     /*
373      * Wait for drained requests to finish.
374      *
375      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
376      * call is needed so things in this AioContext can make progress even
377      * though we don't return to the main AioContext loop - this automatically
378      * includes other nodes in the same AioContext and therefore all child
379      * nodes.
380      */
381     if (poll) {
382         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
383     }
384 }
385 
386 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
387 {
388     bdrv_do_drained_begin(bs, parent, false);
389 }
390 
391 void coroutine_mixed_fn
392 bdrv_drained_begin(BlockDriverState *bs)
393 {
394     IO_OR_GS_CODE();
395     bdrv_do_drained_begin(bs, NULL, true);
396 }
397 
398 /**
399  * This function does not poll, nor must any of its recursively called
400  * functions.
401  */
402 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
403 {
404     int old_quiesce_counter;
405 
406     IO_OR_GS_CODE();
407 
408     if (qemu_in_coroutine()) {
409         bdrv_co_yield_to_drain(bs, false, parent, false);
410         return;
411     }
412 
413     /* At this point, we should be always running in the main loop. */
414     GLOBAL_STATE_CODE();
415     assert(bs->quiesce_counter > 0);
416     GLOBAL_STATE_CODE();
417 
418     /* Re-enable things in child-to-parent order */
419     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
420     if (old_quiesce_counter == 1) {
421         GRAPH_RDLOCK_GUARD_MAINLOOP();
422         if (bs->drv && bs->drv->bdrv_drain_end) {
423             bs->drv->bdrv_drain_end(bs);
424         }
425         bdrv_parent_drained_end(bs, parent);
426     }
427 }
428 
429 void bdrv_drained_end(BlockDriverState *bs)
430 {
431     IO_OR_GS_CODE();
432     bdrv_do_drained_end(bs, NULL);
433 }
434 
435 void bdrv_drain(BlockDriverState *bs)
436 {
437     IO_OR_GS_CODE();
438     bdrv_drained_begin(bs);
439     bdrv_drained_end(bs);
440 }
441 
442 static void bdrv_drain_assert_idle(BlockDriverState *bs)
443 {
444     BdrvChild *child, *next;
445     GLOBAL_STATE_CODE();
446     GRAPH_RDLOCK_GUARD_MAINLOOP();
447 
448     assert(qatomic_read(&bs->in_flight) == 0);
449     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
450         bdrv_drain_assert_idle(child->bs);
451     }
452 }
453 
454 unsigned int bdrv_drain_all_count = 0;
455 
456 static bool bdrv_drain_all_poll(void)
457 {
458     BlockDriverState *bs = NULL;
459     bool result = false;
460 
461     GLOBAL_STATE_CODE();
462     GRAPH_RDLOCK_GUARD_MAINLOOP();
463 
464     /*
465      * bdrv_drain_poll() can't make changes to the graph and we hold the BQL,
466      * so iterating bdrv_next_all_states() is safe.
467      */
468     while ((bs = bdrv_next_all_states(bs))) {
469         result |= bdrv_drain_poll(bs, NULL, true);
470     }
471 
472     return result;
473 }
474 
475 /*
476  * Wait for pending requests to complete across all BlockDriverStates
477  *
478  * This function does not flush data to disk, use bdrv_flush_all() for that
479  * after calling this function.
480  *
481  * This pauses all block jobs and disables external clients. It must
482  * be paired with bdrv_drain_all_end().
483  *
484  * NOTE: no new block jobs or BlockDriverStates can be created between
485  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
486  */
487 void bdrv_drain_all_begin_nopoll(void)
488 {
489     BlockDriverState *bs = NULL;
490     GLOBAL_STATE_CODE();
491 
492     /*
493      * bdrv queue is managed by record/replay,
494      * waiting for finishing the I/O requests may
495      * be infinite
496      */
497     if (replay_events_enabled()) {
498         return;
499     }
500 
501     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
502      * loop AioContext, so make sure we're in the main context. */
503     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
504     assert(bdrv_drain_all_count < INT_MAX);
505     bdrv_drain_all_count++;
506 
507     /* Quiesce all nodes, without polling in-flight requests yet. The graph
508      * cannot change during this loop. */
509     while ((bs = bdrv_next_all_states(bs))) {
510         bdrv_do_drained_begin(bs, NULL, false);
511     }
512 }
513 
514 void coroutine_mixed_fn bdrv_drain_all_begin(void)
515 {
516     BlockDriverState *bs = NULL;
517 
518     if (qemu_in_coroutine()) {
519         bdrv_co_yield_to_drain(NULL, true, NULL, true);
520         return;
521     }
522 
523     /*
524      * bdrv queue is managed by record/replay,
525      * waiting for finishing the I/O requests may
526      * be infinite
527      */
528     if (replay_events_enabled()) {
529         return;
530     }
531 
532     bdrv_drain_all_begin_nopoll();
533 
534     /* Now poll the in-flight requests */
535     AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
536 
537     while ((bs = bdrv_next_all_states(bs))) {
538         bdrv_drain_assert_idle(bs);
539     }
540 }
541 
542 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
543 {
544     GLOBAL_STATE_CODE();
545 
546     g_assert(bs->quiesce_counter > 0);
547     g_assert(!bs->refcnt);
548 
549     while (bs->quiesce_counter) {
550         bdrv_do_drained_end(bs, NULL);
551     }
552 }
553 
554 void bdrv_drain_all_end(void)
555 {
556     BlockDriverState *bs = NULL;
557     GLOBAL_STATE_CODE();
558 
559     /*
560      * bdrv queue is managed by record/replay,
561      * waiting for finishing the I/O requests may
562      * be endless
563      */
564     if (replay_events_enabled()) {
565         return;
566     }
567 
568     while ((bs = bdrv_next_all_states(bs))) {
569         bdrv_do_drained_end(bs, NULL);
570     }
571 
572     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
573     assert(bdrv_drain_all_count > 0);
574     bdrv_drain_all_count--;
575 }
576 
577 void bdrv_drain_all(void)
578 {
579     GLOBAL_STATE_CODE();
580     bdrv_drain_all_begin();
581     bdrv_drain_all_end();
582 }
583 
584 /**
585  * Remove an active request from the tracked requests list
586  *
587  * This function should be called when a tracked request is completing.
588  */
589 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
590 {
591     if (req->serialising) {
592         qatomic_dec(&req->bs->serialising_in_flight);
593     }
594 
595     qemu_mutex_lock(&req->bs->reqs_lock);
596     QLIST_REMOVE(req, list);
597     qemu_mutex_unlock(&req->bs->reqs_lock);
598 
599     /*
600      * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
601      * anymore because the request has been removed from the list, so it's safe
602      * to restart the queue outside reqs_lock to minimize the critical section.
603      */
604     qemu_co_queue_restart_all(&req->wait_queue);
605 }
606 
607 /**
608  * Add an active request to the tracked requests list
609  */
610 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
611                                                BlockDriverState *bs,
612                                                int64_t offset,
613                                                int64_t bytes,
614                                                enum BdrvTrackedRequestType type)
615 {
616     bdrv_check_request(offset, bytes, &error_abort);
617 
618     *req = (BdrvTrackedRequest){
619         .bs = bs,
620         .offset         = offset,
621         .bytes          = bytes,
622         .type           = type,
623         .co             = qemu_coroutine_self(),
624         .serialising    = false,
625         .overlap_offset = offset,
626         .overlap_bytes  = bytes,
627     };
628 
629     qemu_co_queue_init(&req->wait_queue);
630 
631     qemu_mutex_lock(&bs->reqs_lock);
632     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
633     qemu_mutex_unlock(&bs->reqs_lock);
634 }
635 
636 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
637                                      int64_t offset, int64_t bytes)
638 {
639     bdrv_check_request(offset, bytes, &error_abort);
640 
641     /*        aaaa   bbbb */
642     if (offset >= req->overlap_offset + req->overlap_bytes) {
643         return false;
644     }
645     /* bbbb   aaaa        */
646     if (req->overlap_offset >= offset + bytes) {
647         return false;
648     }
649     return true;
650 }
651 
652 /* Called with self->bs->reqs_lock held */
653 static coroutine_fn BdrvTrackedRequest *
654 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
655 {
656     BdrvTrackedRequest *req;
657 
658     QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
659         if (req == self || (!req->serialising && !self->serialising)) {
660             continue;
661         }
662         if (tracked_request_overlaps(req, self->overlap_offset,
663                                      self->overlap_bytes))
664         {
665             /*
666              * Hitting this means there was a reentrant request, for
667              * example, a block driver issuing nested requests.  This must
668              * never happen since it means deadlock.
669              */
670             assert(qemu_coroutine_self() != req->co);
671 
672             /*
673              * If the request is already (indirectly) waiting for us, or
674              * will wait for us as soon as it wakes up, then just go on
675              * (instead of producing a deadlock in the former case).
676              */
677             if (!req->waiting_for) {
678                 return req;
679             }
680         }
681     }
682 
683     return NULL;
684 }
685 
686 /* Called with self->bs->reqs_lock held */
687 static void coroutine_fn
688 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
689 {
690     BdrvTrackedRequest *req;
691 
692     while ((req = bdrv_find_conflicting_request(self))) {
693         self->waiting_for = req;
694         qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
695         self->waiting_for = NULL;
696     }
697 }
698 
699 /* Called with req->bs->reqs_lock held */
700 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
701                                             uint64_t align)
702 {
703     int64_t overlap_offset = req->offset & ~(align - 1);
704     int64_t overlap_bytes =
705         ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
706 
707     bdrv_check_request(req->offset, req->bytes, &error_abort);
708 
709     if (!req->serialising) {
710         qatomic_inc(&req->bs->serialising_in_flight);
711         req->serialising = true;
712     }
713 
714     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
715     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
716 }
717 
718 /**
719  * Return the tracked request on @bs for the current coroutine, or
720  * NULL if there is none.
721  */
722 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
723 {
724     BdrvTrackedRequest *req;
725     Coroutine *self = qemu_coroutine_self();
726     IO_CODE();
727 
728     QLIST_FOREACH(req, &bs->tracked_requests, list) {
729         if (req->co == self) {
730             return req;
731         }
732     }
733 
734     return NULL;
735 }
736 
737 /**
738  * Round a region to subcluster (if supported) or cluster boundaries
739  */
740 void coroutine_fn GRAPH_RDLOCK
741 bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
742                           int64_t *align_offset, int64_t *align_bytes)
743 {
744     BlockDriverInfo bdi;
745     IO_CODE();
746     if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
747         *align_offset = offset;
748         *align_bytes = bytes;
749     } else {
750         int64_t c = bdi.subcluster_size;
751         *align_offset = QEMU_ALIGN_DOWN(offset, c);
752         *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
753     }
754 }
755 
756 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
757 {
758     BlockDriverInfo bdi;
759     int ret;
760 
761     ret = bdrv_co_get_info(bs, &bdi);
762     if (ret < 0 || bdi.cluster_size == 0) {
763         return bs->bl.request_alignment;
764     } else {
765         return bdi.cluster_size;
766     }
767 }
768 
769 void bdrv_inc_in_flight(BlockDriverState *bs)
770 {
771     IO_CODE();
772     qatomic_inc(&bs->in_flight);
773 }
774 
775 void bdrv_wakeup(BlockDriverState *bs)
776 {
777     IO_CODE();
778     aio_wait_kick();
779 }
780 
781 void bdrv_dec_in_flight(BlockDriverState *bs)
782 {
783     IO_CODE();
784     qatomic_dec(&bs->in_flight);
785     bdrv_wakeup(bs);
786 }
787 
788 static void coroutine_fn
789 bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
790 {
791     BlockDriverState *bs = self->bs;
792 
793     if (!qatomic_read(&bs->serialising_in_flight)) {
794         return;
795     }
796 
797     qemu_mutex_lock(&bs->reqs_lock);
798     bdrv_wait_serialising_requests_locked(self);
799     qemu_mutex_unlock(&bs->reqs_lock);
800 }
801 
802 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
803                                                 uint64_t align)
804 {
805     IO_CODE();
806 
807     qemu_mutex_lock(&req->bs->reqs_lock);
808 
809     tracked_request_set_serialising(req, align);
810     bdrv_wait_serialising_requests_locked(req);
811 
812     qemu_mutex_unlock(&req->bs->reqs_lock);
813 }
814 
815 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
816                             QEMUIOVector *qiov, size_t qiov_offset,
817                             Error **errp)
818 {
819     /*
820      * Check generic offset/bytes correctness
821      */
822 
823     if (offset < 0) {
824         error_setg(errp, "offset is negative: %" PRIi64, offset);
825         return -EIO;
826     }
827 
828     if (bytes < 0) {
829         error_setg(errp, "bytes is negative: %" PRIi64, bytes);
830         return -EIO;
831     }
832 
833     if (bytes > BDRV_MAX_LENGTH) {
834         error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
835                    bytes, BDRV_MAX_LENGTH);
836         return -EIO;
837     }
838 
839     if (offset > BDRV_MAX_LENGTH) {
840         error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
841                    offset, BDRV_MAX_LENGTH);
842         return -EIO;
843     }
844 
845     if (offset > BDRV_MAX_LENGTH - bytes) {
846         error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
847                    "exceeds maximum(%" PRIi64 ")", offset, bytes,
848                    BDRV_MAX_LENGTH);
849         return -EIO;
850     }
851 
852     if (!qiov) {
853         return 0;
854     }
855 
856     /*
857      * Check qiov and qiov_offset
858      */
859 
860     if (qiov_offset > qiov->size) {
861         error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
862                    qiov_offset, qiov->size);
863         return -EIO;
864     }
865 
866     if (bytes > qiov->size - qiov_offset) {
867         error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
868                    "vector size(%zu)", bytes, qiov_offset, qiov->size);
869         return -EIO;
870     }
871 
872     return 0;
873 }
874 
875 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
876 {
877     return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
878 }
879 
880 static int bdrv_check_request32(int64_t offset, int64_t bytes,
881                                 QEMUIOVector *qiov, size_t qiov_offset)
882 {
883     int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
884     if (ret < 0) {
885         return ret;
886     }
887 
888     if (bytes > BDRV_REQUEST_MAX_BYTES) {
889         return -EIO;
890     }
891 
892     return 0;
893 }
894 
895 /*
896  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
897  * The operation is sped up by checking the block status and only writing
898  * zeroes to the device if they currently do not return zeroes. Optional
899  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
900  * BDRV_REQ_FUA).
901  *
902  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
903  */
904 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
905 {
906     int ret;
907     int64_t target_size, bytes, offset = 0;
908     BlockDriverState *bs = child->bs;
909     IO_CODE();
910 
911     target_size = bdrv_getlength(bs);
912     if (target_size < 0) {
913         return target_size;
914     }
915 
916     for (;;) {
917         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
918         if (bytes <= 0) {
919             return 0;
920         }
921         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
922         if (ret < 0) {
923             return ret;
924         }
925         if (ret & BDRV_BLOCK_ZERO) {
926             offset += bytes;
927             continue;
928         }
929         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
930         if (ret < 0) {
931             return ret;
932         }
933         offset += bytes;
934     }
935 }
936 
937 /*
938  * Writes to the file and ensures that no writes are reordered across this
939  * request (acts as a barrier)
940  *
941  * Returns 0 on success, -errno in error cases.
942  */
943 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
944                                      int64_t bytes, const void *buf,
945                                      BdrvRequestFlags flags)
946 {
947     int ret;
948     IO_CODE();
949     assert_bdrv_graph_readable();
950 
951     ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
952     if (ret < 0) {
953         return ret;
954     }
955 
956     ret = bdrv_co_flush(child->bs);
957     if (ret < 0) {
958         return ret;
959     }
960 
961     return 0;
962 }
963 
964 typedef struct CoroutineIOCompletion {
965     Coroutine *coroutine;
966     int ret;
967 } CoroutineIOCompletion;
968 
969 static void bdrv_co_io_em_complete(void *opaque, int ret)
970 {
971     CoroutineIOCompletion *co = opaque;
972 
973     co->ret = ret;
974     aio_co_wake(co->coroutine);
975 }
976 
977 static int coroutine_fn GRAPH_RDLOCK
978 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
979                    QEMUIOVector *qiov, size_t qiov_offset, int flags)
980 {
981     BlockDriver *drv = bs->drv;
982     int64_t sector_num;
983     unsigned int nb_sectors;
984     QEMUIOVector local_qiov;
985     int ret;
986     assert_bdrv_graph_readable();
987 
988     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
989     assert(!(flags & ~bs->supported_read_flags));
990 
991     if (!drv) {
992         return -ENOMEDIUM;
993     }
994 
995     if (drv->bdrv_co_preadv_part) {
996         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
997                                         flags);
998     }
999 
1000     if (qiov_offset > 0 || bytes != qiov->size) {
1001         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1002         qiov = &local_qiov;
1003     }
1004 
1005     if (drv->bdrv_co_preadv) {
1006         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1007         goto out;
1008     }
1009 
1010     if (drv->bdrv_aio_preadv) {
1011         BlockAIOCB *acb;
1012         CoroutineIOCompletion co = {
1013             .coroutine = qemu_coroutine_self(),
1014         };
1015 
1016         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1017                                    bdrv_co_io_em_complete, &co);
1018         if (acb == NULL) {
1019             ret = -EIO;
1020             goto out;
1021         } else {
1022             qemu_coroutine_yield();
1023             ret = co.ret;
1024             goto out;
1025         }
1026     }
1027 
1028     sector_num = offset >> BDRV_SECTOR_BITS;
1029     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1030 
1031     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1032     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1033     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1034     assert(drv->bdrv_co_readv);
1035 
1036     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1037 
1038 out:
1039     if (qiov == &local_qiov) {
1040         qemu_iovec_destroy(&local_qiov);
1041     }
1042 
1043     return ret;
1044 }
1045 
1046 static int coroutine_fn GRAPH_RDLOCK
1047 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1048                     QEMUIOVector *qiov, size_t qiov_offset,
1049                     BdrvRequestFlags flags)
1050 {
1051     BlockDriver *drv = bs->drv;
1052     bool emulate_fua = false;
1053     int64_t sector_num;
1054     unsigned int nb_sectors;
1055     QEMUIOVector local_qiov;
1056     int ret;
1057     assert_bdrv_graph_readable();
1058 
1059     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1060 
1061     if (!drv) {
1062         return -ENOMEDIUM;
1063     }
1064 
1065     if (bs->open_flags & BDRV_O_NO_FLUSH) {
1066         flags &= ~BDRV_REQ_FUA;
1067     }
1068 
1069     if ((flags & BDRV_REQ_FUA) &&
1070         (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1071         flags &= ~BDRV_REQ_FUA;
1072         emulate_fua = true;
1073     }
1074 
1075     flags &= bs->supported_write_flags;
1076 
1077     if (drv->bdrv_co_pwritev_part) {
1078         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1079                                         flags);
1080         goto emulate_flags;
1081     }
1082 
1083     if (qiov_offset > 0 || bytes != qiov->size) {
1084         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1085         qiov = &local_qiov;
1086     }
1087 
1088     if (drv->bdrv_co_pwritev) {
1089         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
1090         goto emulate_flags;
1091     }
1092 
1093     if (drv->bdrv_aio_pwritev) {
1094         BlockAIOCB *acb;
1095         CoroutineIOCompletion co = {
1096             .coroutine = qemu_coroutine_self(),
1097         };
1098 
1099         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
1100                                     bdrv_co_io_em_complete, &co);
1101         if (acb == NULL) {
1102             ret = -EIO;
1103         } else {
1104             qemu_coroutine_yield();
1105             ret = co.ret;
1106         }
1107         goto emulate_flags;
1108     }
1109 
1110     sector_num = offset >> BDRV_SECTOR_BITS;
1111     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1112 
1113     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1114     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1115     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1116 
1117     assert(drv->bdrv_co_writev);
1118     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
1119 
1120 emulate_flags:
1121     if (ret == 0 && emulate_fua) {
1122         ret = bdrv_co_flush(bs);
1123     }
1124 
1125     if (qiov == &local_qiov) {
1126         qemu_iovec_destroy(&local_qiov);
1127     }
1128 
1129     return ret;
1130 }
1131 
1132 static int coroutine_fn GRAPH_RDLOCK
1133 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1134                                int64_t bytes, QEMUIOVector *qiov,
1135                                size_t qiov_offset)
1136 {
1137     BlockDriver *drv = bs->drv;
1138     QEMUIOVector local_qiov;
1139     int ret;
1140     assert_bdrv_graph_readable();
1141 
1142     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1143 
1144     if (!drv) {
1145         return -ENOMEDIUM;
1146     }
1147 
1148     if (!block_driver_can_compress(drv)) {
1149         return -ENOTSUP;
1150     }
1151 
1152     if (drv->bdrv_co_pwritev_compressed_part) {
1153         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1154                                                     qiov, qiov_offset);
1155     }
1156 
1157     if (qiov_offset == 0) {
1158         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1159     }
1160 
1161     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1162     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1163     qemu_iovec_destroy(&local_qiov);
1164 
1165     return ret;
1166 }
1167 
1168 static int coroutine_fn GRAPH_RDLOCK
1169 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
1170                          QEMUIOVector *qiov, size_t qiov_offset, int flags)
1171 {
1172     BlockDriverState *bs = child->bs;
1173 
1174     /* Perform I/O through a temporary buffer so that users who scribble over
1175      * their read buffer while the operation is in progress do not end up
1176      * modifying the image file.  This is critical for zero-copy guest I/O
1177      * where anything might happen inside guest memory.
1178      */
1179     void *bounce_buffer = NULL;
1180 
1181     BlockDriver *drv = bs->drv;
1182     int64_t align_offset;
1183     int64_t align_bytes;
1184     int64_t skip_bytes;
1185     int ret;
1186     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1187                                     BDRV_REQUEST_MAX_BYTES);
1188     int64_t progress = 0;
1189     bool skip_write;
1190 
1191     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1192 
1193     if (!drv) {
1194         return -ENOMEDIUM;
1195     }
1196 
1197     /*
1198      * Do not write anything when the BDS is inactive.  That is not
1199      * allowed, and it would not help.
1200      */
1201     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1202 
1203     /* FIXME We cannot require callers to have write permissions when all they
1204      * are doing is a read request. If we did things right, write permissions
1205      * would be obtained anyway, but internally by the copy-on-read code. As
1206      * long as it is implemented here rather than in a separate filter driver,
1207      * the copy-on-read code doesn't have its own BdrvChild, however, for which
1208      * it could request permissions. Therefore we have to bypass the permission
1209      * system for the moment. */
1210     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1211 
1212     /* Cover entire cluster so no additional backing file I/O is required when
1213      * allocating cluster in the image file.  Note that this value may exceed
1214      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1215      * is one reason we loop rather than doing it all at once.
1216      */
1217     bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
1218     skip_bytes = offset - align_offset;
1219 
1220     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1221                                    align_offset, align_bytes);
1222 
1223     while (align_bytes) {
1224         int64_t pnum;
1225 
1226         if (skip_write) {
1227             ret = 1; /* "already allocated", so nothing will be copied */
1228             pnum = MIN(align_bytes, max_transfer);
1229         } else {
1230             ret = bdrv_co_is_allocated(bs, align_offset,
1231                                        MIN(align_bytes, max_transfer), &pnum);
1232             if (ret < 0) {
1233                 /*
1234                  * Safe to treat errors in querying allocation as if
1235                  * unallocated; we'll probably fail again soon on the
1236                  * read, but at least that will set a decent errno.
1237                  */
1238                 pnum = MIN(align_bytes, max_transfer);
1239             }
1240 
1241             /* Stop at EOF if the image ends in the middle of the cluster */
1242             if (ret == 0 && pnum == 0) {
1243                 assert(progress >= bytes);
1244                 break;
1245             }
1246 
1247             assert(skip_bytes < pnum);
1248         }
1249 
1250         if (ret <= 0) {
1251             QEMUIOVector local_qiov;
1252 
1253             /* Must copy-on-read; use the bounce buffer */
1254             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1255             if (!bounce_buffer) {
1256                 int64_t max_we_need = MAX(pnum, align_bytes - pnum);
1257                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1258                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1259 
1260                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1261                 if (!bounce_buffer) {
1262                     ret = -ENOMEM;
1263                     goto err;
1264                 }
1265             }
1266             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1267 
1268             ret = bdrv_driver_preadv(bs, align_offset, pnum,
1269                                      &local_qiov, 0, 0);
1270             if (ret < 0) {
1271                 goto err;
1272             }
1273 
1274             bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1275             if (drv->bdrv_co_pwrite_zeroes &&
1276                 buffer_is_zero(bounce_buffer, pnum)) {
1277                 /* FIXME: Should we (perhaps conditionally) be setting
1278                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1279                  * that still correctly reads as zero? */
1280                 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
1281                                                BDRV_REQ_WRITE_UNCHANGED);
1282             } else {
1283                 /* This does not change the data on the disk, it is not
1284                  * necessary to flush even in cache=writethrough mode.
1285                  */
1286                 ret = bdrv_driver_pwritev(bs, align_offset, pnum,
1287                                           &local_qiov, 0,
1288                                           BDRV_REQ_WRITE_UNCHANGED);
1289             }
1290 
1291             if (ret < 0) {
1292                 /* It might be okay to ignore write errors for guest
1293                  * requests.  If this is a deliberate copy-on-read
1294                  * then we don't want to ignore the error.  Simply
1295                  * report it in all cases.
1296                  */
1297                 goto err;
1298             }
1299 
1300             if (!(flags & BDRV_REQ_PREFETCH)) {
1301                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1302                                     bounce_buffer + skip_bytes,
1303                                     MIN(pnum - skip_bytes, bytes - progress));
1304             }
1305         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1306             /* Read directly into the destination */
1307             ret = bdrv_driver_preadv(bs, offset + progress,
1308                                      MIN(pnum - skip_bytes, bytes - progress),
1309                                      qiov, qiov_offset + progress, 0);
1310             if (ret < 0) {
1311                 goto err;
1312             }
1313         }
1314 
1315         align_offset += pnum;
1316         align_bytes -= pnum;
1317         progress += pnum - skip_bytes;
1318         skip_bytes = 0;
1319     }
1320     ret = 0;
1321 
1322 err:
1323     qemu_vfree(bounce_buffer);
1324     return ret;
1325 }
1326 
1327 /*
1328  * Forwards an already correctly aligned request to the BlockDriver. This
1329  * handles copy on read, zeroing after EOF, and fragmentation of large
1330  * reads; any other features must be implemented by the caller.
1331  */
1332 static int coroutine_fn GRAPH_RDLOCK
1333 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
1334                     int64_t offset, int64_t bytes, int64_t align,
1335                     QEMUIOVector *qiov, size_t qiov_offset, int flags)
1336 {
1337     BlockDriverState *bs = child->bs;
1338     int64_t total_bytes, max_bytes;
1339     int ret = 0;
1340     int64_t bytes_remaining = bytes;
1341     int max_transfer;
1342 
1343     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1344     assert(is_power_of_2(align));
1345     assert((offset & (align - 1)) == 0);
1346     assert((bytes & (align - 1)) == 0);
1347     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1348     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1349                                    align);
1350 
1351     /*
1352      * TODO: We would need a per-BDS .supported_read_flags and
1353      * potential fallback support, if we ever implement any read flags
1354      * to pass through to drivers.  For now, there aren't any
1355      * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1356      */
1357     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1358                        BDRV_REQ_REGISTERED_BUF)));
1359 
1360     /* Handle Copy on Read and associated serialisation */
1361     if (flags & BDRV_REQ_COPY_ON_READ) {
1362         /* If we touch the same cluster it counts as an overlap.  This
1363          * guarantees that allocating writes will be serialized and not race
1364          * with each other for the same cluster.  For example, in copy-on-read
1365          * it ensures that the CoR read and write operations are atomic and
1366          * guest writes cannot interleave between them. */
1367         bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1368     } else {
1369         bdrv_wait_serialising_requests(req);
1370     }
1371 
1372     if (flags & BDRV_REQ_COPY_ON_READ) {
1373         int64_t pnum;
1374 
1375         /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1376         flags &= ~BDRV_REQ_COPY_ON_READ;
1377 
1378         ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
1379         if (ret < 0) {
1380             goto out;
1381         }
1382 
1383         if (!ret || pnum != bytes) {
1384             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1385                                            qiov, qiov_offset, flags);
1386             goto out;
1387         } else if (flags & BDRV_REQ_PREFETCH) {
1388             goto out;
1389         }
1390     }
1391 
1392     /* Forward the request to the BlockDriver, possibly fragmenting it */
1393     total_bytes = bdrv_co_getlength(bs);
1394     if (total_bytes < 0) {
1395         ret = total_bytes;
1396         goto out;
1397     }
1398 
1399     assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1400 
1401     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1402     if (bytes <= max_bytes && bytes <= max_transfer) {
1403         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1404         goto out;
1405     }
1406 
1407     while (bytes_remaining) {
1408         int64_t num;
1409 
1410         if (max_bytes) {
1411             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1412             assert(num);
1413 
1414             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1415                                      num, qiov,
1416                                      qiov_offset + bytes - bytes_remaining,
1417                                      flags);
1418             max_bytes -= num;
1419         } else {
1420             num = bytes_remaining;
1421             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1422                                     0, bytes_remaining);
1423         }
1424         if (ret < 0) {
1425             goto out;
1426         }
1427         bytes_remaining -= num;
1428     }
1429 
1430 out:
1431     return ret < 0 ? ret : 0;
1432 }
1433 
1434 /*
1435  * Request padding
1436  *
1437  *  |<---- align ----->|                     |<----- align ---->|
1438  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
1439  *  |          |       |                     |     |            |
1440  * -*----------$-------*-------- ... --------*-----$------------*---
1441  *  |          |       |                     |     |            |
1442  *  |          offset  |                     |     end          |
1443  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
1444  *  [buf   ... )                             [tail_buf          )
1445  *
1446  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1447  * is placed at the beginning of @buf and @tail at the @end.
1448  *
1449  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1450  * around tail, if tail exists.
1451  *
1452  * @merge_reads is true for small requests,
1453  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1454  * head and tail exist but @buf_len == align and @tail_buf == @buf.
1455  *
1456  * @write is true for write requests, false for read requests.
1457  *
1458  * If padding makes the vector too long (exceeding IOV_MAX), then we need to
1459  * merge existing vector elements into a single one.  @collapse_bounce_buf acts
1460  * as the bounce buffer in such cases.  @pre_collapse_qiov has the pre-collapse
1461  * I/O vector elements so for read requests, the data can be copied back after
1462  * the read is done.
1463  */
1464 typedef struct BdrvRequestPadding {
1465     uint8_t *buf;
1466     size_t buf_len;
1467     uint8_t *tail_buf;
1468     size_t head;
1469     size_t tail;
1470     bool merge_reads;
1471     bool write;
1472     QEMUIOVector local_qiov;
1473 
1474     uint8_t *collapse_bounce_buf;
1475     size_t collapse_len;
1476     QEMUIOVector pre_collapse_qiov;
1477 } BdrvRequestPadding;
1478 
1479 static bool bdrv_init_padding(BlockDriverState *bs,
1480                               int64_t offset, int64_t bytes,
1481                               bool write,
1482                               BdrvRequestPadding *pad)
1483 {
1484     int64_t align = bs->bl.request_alignment;
1485     int64_t sum;
1486 
1487     bdrv_check_request(offset, bytes, &error_abort);
1488     assert(align <= INT_MAX); /* documented in block/block_int.h */
1489     assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1490 
1491     memset(pad, 0, sizeof(*pad));
1492 
1493     pad->head = offset & (align - 1);
1494     pad->tail = ((offset + bytes) & (align - 1));
1495     if (pad->tail) {
1496         pad->tail = align - pad->tail;
1497     }
1498 
1499     if (!pad->head && !pad->tail) {
1500         return false;
1501     }
1502 
1503     assert(bytes); /* Nothing good in aligning zero-length requests */
1504 
1505     sum = pad->head + bytes + pad->tail;
1506     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1507     pad->buf = qemu_blockalign(bs, pad->buf_len);
1508     pad->merge_reads = sum == pad->buf_len;
1509     if (pad->tail) {
1510         pad->tail_buf = pad->buf + pad->buf_len - align;
1511     }
1512 
1513     pad->write = write;
1514 
1515     return true;
1516 }
1517 
1518 static int coroutine_fn GRAPH_RDLOCK
1519 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
1520                       BdrvRequestPadding *pad, bool zero_middle)
1521 {
1522     QEMUIOVector local_qiov;
1523     BlockDriverState *bs = child->bs;
1524     uint64_t align = bs->bl.request_alignment;
1525     int ret;
1526 
1527     assert(req->serialising && pad->buf);
1528 
1529     if (pad->head || pad->merge_reads) {
1530         int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1531 
1532         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1533 
1534         if (pad->head) {
1535             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1536         }
1537         if (pad->merge_reads && pad->tail) {
1538             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1539         }
1540         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1541                                   align, &local_qiov, 0, 0);
1542         if (ret < 0) {
1543             return ret;
1544         }
1545         if (pad->head) {
1546             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1547         }
1548         if (pad->merge_reads && pad->tail) {
1549             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1550         }
1551 
1552         if (pad->merge_reads) {
1553             goto zero_mem;
1554         }
1555     }
1556 
1557     if (pad->tail) {
1558         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1559 
1560         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1561         ret = bdrv_aligned_preadv(
1562                 child, req,
1563                 req->overlap_offset + req->overlap_bytes - align,
1564                 align, align, &local_qiov, 0, 0);
1565         if (ret < 0) {
1566             return ret;
1567         }
1568         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1569     }
1570 
1571 zero_mem:
1572     if (zero_middle) {
1573         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1574     }
1575 
1576     return 0;
1577 }
1578 
1579 /**
1580  * Free *pad's associated buffers, and perform any necessary finalization steps.
1581  */
1582 static void bdrv_padding_finalize(BdrvRequestPadding *pad)
1583 {
1584     if (pad->collapse_bounce_buf) {
1585         if (!pad->write) {
1586             /*
1587              * If padding required elements in the vector to be collapsed into a
1588              * bounce buffer, copy the bounce buffer content back
1589              */
1590             qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
1591                                 pad->collapse_bounce_buf, pad->collapse_len);
1592         }
1593         qemu_vfree(pad->collapse_bounce_buf);
1594         qemu_iovec_destroy(&pad->pre_collapse_qiov);
1595     }
1596     if (pad->buf) {
1597         qemu_vfree(pad->buf);
1598         qemu_iovec_destroy(&pad->local_qiov);
1599     }
1600     memset(pad, 0, sizeof(*pad));
1601 }
1602 
1603 /*
1604  * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
1605  * ensuring that the resulting vector will not exceed IOV_MAX elements.
1606  *
1607  * To ensure this, when necessary, the first two or three elements of @iov are
1608  * merged into pad->collapse_bounce_buf and replaced by a reference to that
1609  * bounce buffer in pad->local_qiov.
1610  *
1611  * After performing a read request, the data from the bounce buffer must be
1612  * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
1613  */
1614 static int bdrv_create_padded_qiov(BlockDriverState *bs,
1615                                    BdrvRequestPadding *pad,
1616                                    struct iovec *iov, int niov,
1617                                    size_t iov_offset, size_t bytes)
1618 {
1619     int padded_niov, surplus_count, collapse_count;
1620 
1621     /* Assert this invariant */
1622     assert(niov <= IOV_MAX);
1623 
1624     /*
1625      * Cannot pad if resulting length would exceed SIZE_MAX.  Returning an error
1626      * to the guest is not ideal, but there is little else we can do.  At least
1627      * this will practically never happen on 64-bit systems.
1628      */
1629     if (SIZE_MAX - pad->head < bytes ||
1630         SIZE_MAX - pad->head - bytes < pad->tail)
1631     {
1632         return -EINVAL;
1633     }
1634 
1635     /* Length of the resulting IOV if we just concatenated everything */
1636     padded_niov = !!pad->head + niov + !!pad->tail;
1637 
1638     qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
1639 
1640     if (pad->head) {
1641         qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
1642     }
1643 
1644     /*
1645      * If padded_niov > IOV_MAX, we cannot just concatenate everything.
1646      * Instead, merge the first two or three elements of @iov to reduce the
1647      * number of vector elements as necessary.
1648      */
1649     if (padded_niov > IOV_MAX) {
1650         /*
1651          * Only head and tail can have lead to the number of entries exceeding
1652          * IOV_MAX, so we can exceed it by the head and tail at most.  We need
1653          * to reduce the number of elements by `surplus_count`, so we merge that
1654          * many elements plus one into one element.
1655          */
1656         surplus_count = padded_niov - IOV_MAX;
1657         assert(surplus_count <= !!pad->head + !!pad->tail);
1658         collapse_count = surplus_count + 1;
1659 
1660         /*
1661          * Move the elements to collapse into `pad->pre_collapse_qiov`, then
1662          * advance `iov` (and associated variables) by those elements.
1663          */
1664         qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
1665         qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
1666                               collapse_count, iov_offset, SIZE_MAX);
1667         iov += collapse_count;
1668         iov_offset = 0;
1669         niov -= collapse_count;
1670         bytes -= pad->pre_collapse_qiov.size;
1671 
1672         /*
1673          * Construct the bounce buffer to match the length of the to-collapse
1674          * vector elements, and for write requests, initialize it with the data
1675          * from those elements.  Then add it to `pad->local_qiov`.
1676          */
1677         pad->collapse_len = pad->pre_collapse_qiov.size;
1678         pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
1679         if (pad->write) {
1680             qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
1681                               pad->collapse_bounce_buf, pad->collapse_len);
1682         }
1683         qemu_iovec_add(&pad->local_qiov,
1684                        pad->collapse_bounce_buf, pad->collapse_len);
1685     }
1686 
1687     qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
1688 
1689     if (pad->tail) {
1690         qemu_iovec_add(&pad->local_qiov,
1691                        pad->buf + pad->buf_len - pad->tail, pad->tail);
1692     }
1693 
1694     assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
1695     return 0;
1696 }
1697 
1698 /*
1699  * bdrv_pad_request
1700  *
1701  * Exchange request parameters with padded request if needed. Don't include RMW
1702  * read of padding, bdrv_padding_rmw_read() should be called separately if
1703  * needed.
1704  *
1705  * @write is true for write requests, false for read requests.
1706  *
1707  * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1708  *  - on function start they represent original request
1709  *  - on failure or when padding is not needed they are unchanged
1710  *  - on success when padding is needed they represent padded request
1711  */
1712 static int bdrv_pad_request(BlockDriverState *bs,
1713                             QEMUIOVector **qiov, size_t *qiov_offset,
1714                             int64_t *offset, int64_t *bytes,
1715                             bool write,
1716                             BdrvRequestPadding *pad, bool *padded,
1717                             BdrvRequestFlags *flags)
1718 {
1719     int ret;
1720     struct iovec *sliced_iov;
1721     int sliced_niov;
1722     size_t sliced_head, sliced_tail;
1723 
1724     /* Should have been checked by the caller already */
1725     ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
1726     if (ret < 0) {
1727         return ret;
1728     }
1729 
1730     if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
1731         if (padded) {
1732             *padded = false;
1733         }
1734         return 0;
1735     }
1736 
1737     /*
1738      * For prefetching in stream_populate(), no qiov is passed along, because
1739      * only copy-on-read matters.
1740      */
1741     if (*qiov) {
1742         sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
1743                                       &sliced_head, &sliced_tail,
1744                                       &sliced_niov);
1745 
1746         /* Guaranteed by bdrv_check_request32() */
1747         assert(*bytes <= SIZE_MAX);
1748         ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
1749                                       sliced_head, *bytes);
1750         if (ret < 0) {
1751             bdrv_padding_finalize(pad);
1752             return ret;
1753         }
1754         *qiov = &pad->local_qiov;
1755         *qiov_offset = 0;
1756     }
1757 
1758     *bytes += pad->head + pad->tail;
1759     *offset -= pad->head;
1760     if (padded) {
1761         *padded = true;
1762     }
1763     if (flags) {
1764         /* Can't use optimization hint with bounce buffer */
1765         *flags &= ~BDRV_REQ_REGISTERED_BUF;
1766     }
1767 
1768     return 0;
1769 }
1770 
1771 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1772     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1773     BdrvRequestFlags flags)
1774 {
1775     IO_CODE();
1776     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1777 }
1778 
1779 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1780     int64_t offset, int64_t bytes,
1781     QEMUIOVector *qiov, size_t qiov_offset,
1782     BdrvRequestFlags flags)
1783 {
1784     BlockDriverState *bs = child->bs;
1785     BdrvTrackedRequest req;
1786     BdrvRequestPadding pad;
1787     int ret;
1788     IO_CODE();
1789 
1790     trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1791 
1792     if (!bdrv_co_is_inserted(bs)) {
1793         return -ENOMEDIUM;
1794     }
1795 
1796     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1797     if (ret < 0) {
1798         return ret;
1799     }
1800 
1801     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1802         /*
1803          * Aligning zero request is nonsense. Even if driver has special meaning
1804          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1805          * it to driver due to request_alignment.
1806          *
1807          * Still, no reason to return an error if someone do unaligned
1808          * zero-length read occasionally.
1809          */
1810         return 0;
1811     }
1812 
1813     bdrv_inc_in_flight(bs);
1814 
1815     /* Don't do copy-on-read if we read data before write operation */
1816     if (qatomic_read(&bs->copy_on_read)) {
1817         flags |= BDRV_REQ_COPY_ON_READ;
1818     }
1819 
1820     ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
1821                            &pad, NULL, &flags);
1822     if (ret < 0) {
1823         goto fail;
1824     }
1825 
1826     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1827     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1828                               bs->bl.request_alignment,
1829                               qiov, qiov_offset, flags);
1830     tracked_request_end(&req);
1831     bdrv_padding_finalize(&pad);
1832 
1833 fail:
1834     bdrv_dec_in_flight(bs);
1835 
1836     return ret;
1837 }
1838 
1839 static int coroutine_fn GRAPH_RDLOCK
1840 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1841                          BdrvRequestFlags flags)
1842 {
1843     BlockDriver *drv = bs->drv;
1844     QEMUIOVector qiov;
1845     void *buf = NULL;
1846     int ret = 0;
1847     bool need_flush = false;
1848     int head = 0;
1849     int tail = 0;
1850 
1851     int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1852                                             INT64_MAX);
1853     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1854                         bs->bl.request_alignment);
1855     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1856 
1857     assert_bdrv_graph_readable();
1858     bdrv_check_request(offset, bytes, &error_abort);
1859 
1860     if (!drv) {
1861         return -ENOMEDIUM;
1862     }
1863 
1864     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1865         return -ENOTSUP;
1866     }
1867 
1868     /* By definition there is no user buffer so this flag doesn't make sense */
1869     if (flags & BDRV_REQ_REGISTERED_BUF) {
1870         return -EINVAL;
1871     }
1872 
1873     /* If opened with discard=off we should never unmap. */
1874     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1875         flags &= ~BDRV_REQ_MAY_UNMAP;
1876     }
1877 
1878     /* Invalidate the cached block-status data range if this write overlaps */
1879     bdrv_bsc_invalidate_range(bs, offset, bytes);
1880 
1881     assert(alignment % bs->bl.request_alignment == 0);
1882     head = offset % alignment;
1883     tail = (offset + bytes) % alignment;
1884     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1885     assert(max_write_zeroes >= bs->bl.request_alignment);
1886 
1887     while (bytes > 0 && !ret) {
1888         int64_t num = bytes;
1889 
1890         /* Align request.  Block drivers can expect the "bulk" of the request
1891          * to be aligned, and that unaligned requests do not cross cluster
1892          * boundaries.
1893          */
1894         if (head) {
1895             /* Make a small request up to the first aligned sector. For
1896              * convenience, limit this request to max_transfer even if
1897              * we don't need to fall back to writes.  */
1898             num = MIN(MIN(bytes, max_transfer), alignment - head);
1899             head = (head + num) % alignment;
1900             assert(num < max_write_zeroes);
1901         } else if (tail && num > alignment) {
1902             /* Shorten the request to the last aligned sector.  */
1903             num -= tail;
1904         }
1905 
1906         /* limit request size */
1907         if (num > max_write_zeroes) {
1908             num = max_write_zeroes;
1909         }
1910 
1911         ret = -ENOTSUP;
1912         /* First try the efficient write zeroes operation */
1913         if (drv->bdrv_co_pwrite_zeroes) {
1914             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1915                                              flags & bs->supported_zero_flags);
1916             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1917                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1918                 need_flush = true;
1919             }
1920         } else {
1921             assert(!bs->supported_zero_flags);
1922         }
1923 
1924         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1925             /* Fall back to bounce buffer if write zeroes is unsupported */
1926             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1927 
1928             if ((flags & BDRV_REQ_FUA) &&
1929                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1930                 /* No need for bdrv_driver_pwrite() to do a fallback
1931                  * flush on each chunk; use just one at the end */
1932                 write_flags &= ~BDRV_REQ_FUA;
1933                 need_flush = true;
1934             }
1935             num = MIN(num, max_transfer);
1936             if (buf == NULL) {
1937                 buf = qemu_try_blockalign0(bs, num);
1938                 if (buf == NULL) {
1939                     ret = -ENOMEM;
1940                     goto fail;
1941                 }
1942             }
1943             qemu_iovec_init_buf(&qiov, buf, num);
1944 
1945             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1946 
1947             /* Keep bounce buffer around if it is big enough for all
1948              * all future requests.
1949              */
1950             if (num < max_transfer) {
1951                 qemu_vfree(buf);
1952                 buf = NULL;
1953             }
1954         }
1955 
1956         offset += num;
1957         bytes -= num;
1958     }
1959 
1960 fail:
1961     if (ret == 0 && need_flush) {
1962         ret = bdrv_co_flush(bs);
1963     }
1964     qemu_vfree(buf);
1965     return ret;
1966 }
1967 
1968 static inline int coroutine_fn GRAPH_RDLOCK
1969 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1970                           BdrvTrackedRequest *req, int flags)
1971 {
1972     BlockDriverState *bs = child->bs;
1973 
1974     bdrv_check_request(offset, bytes, &error_abort);
1975 
1976     if (bdrv_is_read_only(bs)) {
1977         return -EPERM;
1978     }
1979 
1980     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1981     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1982     assert(!(flags & ~BDRV_REQ_MASK));
1983     assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1984 
1985     if (flags & BDRV_REQ_SERIALISING) {
1986         QEMU_LOCK_GUARD(&bs->reqs_lock);
1987 
1988         tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1989 
1990         if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1991             return -EBUSY;
1992         }
1993 
1994         bdrv_wait_serialising_requests_locked(req);
1995     } else {
1996         bdrv_wait_serialising_requests(req);
1997     }
1998 
1999     assert(req->overlap_offset <= offset);
2000     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
2001     assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
2002            child->perm & BLK_PERM_RESIZE);
2003 
2004     switch (req->type) {
2005     case BDRV_TRACKED_WRITE:
2006     case BDRV_TRACKED_DISCARD:
2007         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
2008             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
2009         } else {
2010             assert(child->perm & BLK_PERM_WRITE);
2011         }
2012         bdrv_write_threshold_check_write(bs, offset, bytes);
2013         return 0;
2014     case BDRV_TRACKED_TRUNCATE:
2015         assert(child->perm & BLK_PERM_RESIZE);
2016         return 0;
2017     default:
2018         abort();
2019     }
2020 }
2021 
2022 static inline void coroutine_fn GRAPH_RDLOCK
2023 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
2024                          BdrvTrackedRequest *req, int ret)
2025 {
2026     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2027     BlockDriverState *bs = child->bs;
2028 
2029     bdrv_check_request(offset, bytes, &error_abort);
2030 
2031     qatomic_inc(&bs->write_gen);
2032 
2033     /*
2034      * Discard cannot extend the image, but in error handling cases, such as
2035      * when reverting a qcow2 cluster allocation, the discarded range can pass
2036      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2037      * here. Instead, just skip it, since semantically a discard request
2038      * beyond EOF cannot expand the image anyway.
2039      */
2040     if (ret == 0 &&
2041         (req->type == BDRV_TRACKED_TRUNCATE ||
2042          end_sector > bs->total_sectors) &&
2043         req->type != BDRV_TRACKED_DISCARD) {
2044         bs->total_sectors = end_sector;
2045         bdrv_parent_cb_resize(bs);
2046         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
2047     }
2048     if (req->bytes) {
2049         switch (req->type) {
2050         case BDRV_TRACKED_WRITE:
2051             stat64_max(&bs->wr_highest_offset, offset + bytes);
2052             /* fall through, to set dirty bits */
2053         case BDRV_TRACKED_DISCARD:
2054             bdrv_set_dirty(bs, offset, bytes);
2055             break;
2056         default:
2057             break;
2058         }
2059     }
2060 }
2061 
2062 /*
2063  * Forwards an already correctly aligned write request to the BlockDriver,
2064  * after possibly fragmenting it.
2065  */
2066 static int coroutine_fn GRAPH_RDLOCK
2067 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
2068                      int64_t offset, int64_t bytes, int64_t align,
2069                      QEMUIOVector *qiov, size_t qiov_offset,
2070                      BdrvRequestFlags flags)
2071 {
2072     BlockDriverState *bs = child->bs;
2073     BlockDriver *drv = bs->drv;
2074     int ret;
2075 
2076     int64_t bytes_remaining = bytes;
2077     int max_transfer;
2078 
2079     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2080 
2081     if (!drv) {
2082         return -ENOMEDIUM;
2083     }
2084 
2085     if (bdrv_has_readonly_bitmaps(bs)) {
2086         return -EPERM;
2087     }
2088 
2089     assert(is_power_of_2(align));
2090     assert((offset & (align - 1)) == 0);
2091     assert((bytes & (align - 1)) == 0);
2092     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2093                                    align);
2094 
2095     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
2096 
2097     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2098         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
2099         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
2100         flags |= BDRV_REQ_ZERO_WRITE;
2101         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2102             flags |= BDRV_REQ_MAY_UNMAP;
2103         }
2104 
2105         /* Can't use optimization hint with bufferless zero write */
2106         flags &= ~BDRV_REQ_REGISTERED_BUF;
2107     }
2108 
2109     if (ret < 0) {
2110         /* Do nothing, write notifier decided to fail this request */
2111     } else if (flags & BDRV_REQ_ZERO_WRITE) {
2112         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
2113         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
2114     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
2115         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2116                                              qiov, qiov_offset);
2117     } else if (bytes <= max_transfer) {
2118         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
2119         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
2120     } else {
2121         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
2122         while (bytes_remaining) {
2123             int num = MIN(bytes_remaining, max_transfer);
2124             int local_flags = flags;
2125 
2126             assert(num);
2127             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2128                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2129                 /* If FUA is going to be emulated by flush, we only
2130                  * need to flush on the last iteration */
2131                 local_flags &= ~BDRV_REQ_FUA;
2132             }
2133 
2134             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2135                                       num, qiov,
2136                                       qiov_offset + bytes - bytes_remaining,
2137                                       local_flags);
2138             if (ret < 0) {
2139                 break;
2140             }
2141             bytes_remaining -= num;
2142         }
2143     }
2144     bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
2145 
2146     if (ret >= 0) {
2147         ret = 0;
2148     }
2149     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2150 
2151     return ret;
2152 }
2153 
2154 static int coroutine_fn GRAPH_RDLOCK
2155 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
2156                         BdrvRequestFlags flags, BdrvTrackedRequest *req)
2157 {
2158     BlockDriverState *bs = child->bs;
2159     QEMUIOVector local_qiov;
2160     uint64_t align = bs->bl.request_alignment;
2161     int ret = 0;
2162     bool padding;
2163     BdrvRequestPadding pad;
2164 
2165     /* This flag doesn't make sense for padding or zero writes */
2166     flags &= ~BDRV_REQ_REGISTERED_BUF;
2167 
2168     padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
2169     if (padding) {
2170         assert(!(flags & BDRV_REQ_NO_WAIT));
2171         bdrv_make_request_serialising(req, align);
2172 
2173         bdrv_padding_rmw_read(child, req, &pad, true);
2174 
2175         if (pad.head || pad.merge_reads) {
2176             int64_t aligned_offset = offset & ~(align - 1);
2177             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2178 
2179             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2180             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2181                                        align, &local_qiov, 0,
2182                                        flags & ~BDRV_REQ_ZERO_WRITE);
2183             if (ret < 0 || pad.merge_reads) {
2184                 /* Error or all work is done */
2185                 goto out;
2186             }
2187             offset += write_bytes - pad.head;
2188             bytes -= write_bytes - pad.head;
2189         }
2190     }
2191 
2192     assert(!bytes || (offset & (align - 1)) == 0);
2193     if (bytes >= align) {
2194         /* Write the aligned part in the middle. */
2195         int64_t aligned_bytes = bytes & ~(align - 1);
2196         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2197                                    NULL, 0, flags);
2198         if (ret < 0) {
2199             goto out;
2200         }
2201         bytes -= aligned_bytes;
2202         offset += aligned_bytes;
2203     }
2204 
2205     assert(!bytes || (offset & (align - 1)) == 0);
2206     if (bytes) {
2207         assert(align == pad.tail + bytes);
2208 
2209         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2210         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2211                                    &local_qiov, 0,
2212                                    flags & ~BDRV_REQ_ZERO_WRITE);
2213     }
2214 
2215 out:
2216     bdrv_padding_finalize(&pad);
2217 
2218     return ret;
2219 }
2220 
2221 /*
2222  * Handle a write request in coroutine context
2223  */
2224 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2225     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2226     BdrvRequestFlags flags)
2227 {
2228     IO_CODE();
2229     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2230 }
2231 
2232 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2233     int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2234     BdrvRequestFlags flags)
2235 {
2236     BlockDriverState *bs = child->bs;
2237     BdrvTrackedRequest req;
2238     uint64_t align = bs->bl.request_alignment;
2239     BdrvRequestPadding pad;
2240     int ret;
2241     bool padded = false;
2242     IO_CODE();
2243 
2244     trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2245 
2246     if (!bdrv_co_is_inserted(bs)) {
2247         return -ENOMEDIUM;
2248     }
2249 
2250     if (flags & BDRV_REQ_ZERO_WRITE) {
2251         ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2252     } else {
2253         ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2254     }
2255     if (ret < 0) {
2256         return ret;
2257     }
2258 
2259     /* If the request is misaligned then we can't make it efficient */
2260     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2261         !QEMU_IS_ALIGNED(offset | bytes, align))
2262     {
2263         return -ENOTSUP;
2264     }
2265 
2266     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2267         /*
2268          * Aligning zero request is nonsense. Even if driver has special meaning
2269          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2270          * it to driver due to request_alignment.
2271          *
2272          * Still, no reason to return an error if someone do unaligned
2273          * zero-length write occasionally.
2274          */
2275         return 0;
2276     }
2277 
2278     if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2279         /*
2280          * Pad request for following read-modify-write cycle.
2281          * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2282          * alignment only if there is no ZERO flag.
2283          */
2284         ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
2285                                &pad, &padded, &flags);
2286         if (ret < 0) {
2287             return ret;
2288         }
2289     }
2290 
2291     bdrv_inc_in_flight(bs);
2292     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2293 
2294     if (flags & BDRV_REQ_ZERO_WRITE) {
2295         assert(!padded);
2296         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2297         goto out;
2298     }
2299 
2300     if (padded) {
2301         /*
2302          * Request was unaligned to request_alignment and therefore
2303          * padded.  We are going to do read-modify-write, and must
2304          * serialize the request to prevent interactions of the
2305          * widened region with other transactions.
2306          */
2307         assert(!(flags & BDRV_REQ_NO_WAIT));
2308         bdrv_make_request_serialising(&req, align);
2309         bdrv_padding_rmw_read(child, &req, &pad, false);
2310     }
2311 
2312     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2313                                qiov, qiov_offset, flags);
2314 
2315     bdrv_padding_finalize(&pad);
2316 
2317 out:
2318     tracked_request_end(&req);
2319     bdrv_dec_in_flight(bs);
2320 
2321     return ret;
2322 }
2323 
2324 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2325                                        int64_t bytes, BdrvRequestFlags flags)
2326 {
2327     IO_CODE();
2328     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2329     assert_bdrv_graph_readable();
2330 
2331     return bdrv_co_pwritev(child, offset, bytes, NULL,
2332                            BDRV_REQ_ZERO_WRITE | flags);
2333 }
2334 
2335 /*
2336  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2337  */
2338 int bdrv_flush_all(void)
2339 {
2340     BdrvNextIterator it;
2341     BlockDriverState *bs = NULL;
2342     int result = 0;
2343 
2344     GLOBAL_STATE_CODE();
2345     GRAPH_RDLOCK_GUARD_MAINLOOP();
2346 
2347     /*
2348      * bdrv queue is managed by record/replay,
2349      * creating new flush request for stopping
2350      * the VM may break the determinism
2351      */
2352     if (replay_events_enabled()) {
2353         return result;
2354     }
2355 
2356     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2357         int ret = bdrv_flush(bs);
2358         if (ret < 0 && !result) {
2359             result = ret;
2360         }
2361     }
2362 
2363     return result;
2364 }
2365 
2366 /*
2367  * Returns the allocation status of the specified sectors.
2368  * Drivers not implementing the functionality are assumed to not support
2369  * backing files, hence all their sectors are reported as allocated.
2370  *
2371  * 'mode' serves as a hint as to which results are favored; see the
2372  * BDRV_WANT_* macros for details.
2373  *
2374  * If 'offset' is beyond the end of the disk image the return value is
2375  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2376  *
2377  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2378  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2379  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2380  *
2381  * 'pnum' is set to the number of bytes (including and immediately
2382  * following the specified offset) that are easily known to be in the
2383  * same allocated/unallocated state.  Note that a second call starting
2384  * at the original offset plus returned pnum may have the same status.
2385  * The returned value is non-zero on success except at end-of-file.
2386  *
2387  * Returns negative errno on failure.  Otherwise, if the
2388  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2389  * set to the host mapping and BDS corresponding to the guest offset.
2390  */
2391 static int coroutine_fn GRAPH_RDLOCK
2392 bdrv_co_do_block_status(BlockDriverState *bs, unsigned int mode,
2393                         int64_t offset, int64_t bytes,
2394                         int64_t *pnum, int64_t *map, BlockDriverState **file)
2395 {
2396     int64_t total_size;
2397     int64_t n; /* bytes */
2398     int ret;
2399     int64_t local_map = 0;
2400     BlockDriverState *local_file = NULL;
2401     int64_t aligned_offset, aligned_bytes;
2402     uint32_t align;
2403     bool has_filtered_child;
2404 
2405     assert(pnum);
2406     assert_bdrv_graph_readable();
2407     *pnum = 0;
2408     total_size = bdrv_co_getlength(bs);
2409     if (total_size < 0) {
2410         ret = total_size;
2411         goto early_out;
2412     }
2413 
2414     if (offset >= total_size) {
2415         ret = BDRV_BLOCK_EOF;
2416         goto early_out;
2417     }
2418     if (!bytes) {
2419         ret = 0;
2420         goto early_out;
2421     }
2422 
2423     n = total_size - offset;
2424     if (n < bytes) {
2425         bytes = n;
2426     }
2427 
2428     /* Must be non-NULL or bdrv_co_getlength() would have failed */
2429     assert(bs->drv);
2430     has_filtered_child = bdrv_filter_child(bs);
2431     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2432         *pnum = bytes;
2433         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2434         if (offset + bytes == total_size) {
2435             ret |= BDRV_BLOCK_EOF;
2436         }
2437         if (bs->drv->protocol_name) {
2438             ret |= BDRV_BLOCK_OFFSET_VALID;
2439             local_map = offset;
2440             local_file = bs;
2441         }
2442         goto early_out;
2443     }
2444 
2445     bdrv_inc_in_flight(bs);
2446 
2447     /* Round out to request_alignment boundaries */
2448     align = bs->bl.request_alignment;
2449     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2450     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2451 
2452     if (bs->drv->bdrv_co_block_status) {
2453         /*
2454          * Use the block-status cache only for protocol nodes: Format
2455          * drivers are generally quick to inquire the status, but protocol
2456          * drivers often need to get information from outside of qemu, so
2457          * we do not have control over the actual implementation.  There
2458          * have been cases where inquiring the status took an unreasonably
2459          * long time, and we can do nothing in qemu to fix it.
2460          * This is especially problematic for images with large data areas,
2461          * because finding the few holes in them and giving them special
2462          * treatment does not gain much performance.  Therefore, we try to
2463          * cache the last-identified data region.
2464          *
2465          * Second, limiting ourselves to protocol nodes allows us to assume
2466          * the block status for data regions to be DATA | OFFSET_VALID, and
2467          * that the host offset is the same as the guest offset.
2468          *
2469          * Note that it is possible that external writers zero parts of
2470          * the cached regions without the cache being invalidated, and so
2471          * we may report zeroes as data.  This is not catastrophic,
2472          * however, because reporting zeroes as data is fine.
2473          */
2474         if (QLIST_EMPTY(&bs->children) &&
2475             bdrv_bsc_is_data(bs, aligned_offset, pnum))
2476         {
2477             ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2478             local_file = bs;
2479             local_map = aligned_offset;
2480         } else {
2481             ret = bs->drv->bdrv_co_block_status(bs, mode, aligned_offset,
2482                                                 aligned_bytes, pnum, &local_map,
2483                                                 &local_file);
2484 
2485             /*
2486              * Note that checking QLIST_EMPTY(&bs->children) is also done when
2487              * the cache is queried above.  Technically, we do not need to check
2488              * it here; the worst that can happen is that we fill the cache for
2489              * non-protocol nodes, and then it is never used.  However, filling
2490              * the cache requires an RCU update, so double check here to avoid
2491              * such an update if possible.
2492              *
2493              * Check mode, because we only want to update the cache when we
2494              * have accurate information about what is zero and what is data.
2495              */
2496             if (mode == BDRV_WANT_PRECISE &&
2497                 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
2498                 QLIST_EMPTY(&bs->children))
2499             {
2500                 /*
2501                  * When a protocol driver reports BLOCK_OFFSET_VALID, the
2502                  * returned local_map value must be the same as the offset we
2503                  * have passed (aligned_offset), and local_bs must be the node
2504                  * itself.
2505                  * Assert this, because we follow this rule when reading from
2506                  * the cache (see the `local_file = bs` and
2507                  * `local_map = aligned_offset` assignments above), and the
2508                  * result the cache delivers must be the same as the driver
2509                  * would deliver.
2510                  */
2511                 assert(local_file == bs);
2512                 assert(local_map == aligned_offset);
2513                 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2514             }
2515         }
2516     } else {
2517         /* Default code for filters */
2518 
2519         local_file = bdrv_filter_bs(bs);
2520         assert(local_file);
2521 
2522         *pnum = aligned_bytes;
2523         local_map = aligned_offset;
2524         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2525     }
2526     if (ret < 0) {
2527         *pnum = 0;
2528         goto out;
2529     }
2530 
2531     /*
2532      * The driver's result must be a non-zero multiple of request_alignment.
2533      * Clamp pnum and adjust map to original request.
2534      */
2535     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2536            align > offset - aligned_offset);
2537     if (ret & BDRV_BLOCK_RECURSE) {
2538         assert(ret & BDRV_BLOCK_DATA);
2539         assert(ret & BDRV_BLOCK_OFFSET_VALID);
2540         assert(!(ret & BDRV_BLOCK_ZERO));
2541     }
2542 
2543     *pnum -= offset - aligned_offset;
2544     if (*pnum > bytes) {
2545         *pnum = bytes;
2546     }
2547     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2548         local_map += offset - aligned_offset;
2549     }
2550 
2551     if (ret & BDRV_BLOCK_RAW) {
2552         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2553         ret = bdrv_co_do_block_status(local_file, mode, local_map,
2554                                       *pnum, pnum, &local_map, &local_file);
2555         goto out;
2556     }
2557 
2558     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2559         ret |= BDRV_BLOCK_ALLOCATED;
2560     } else if (bs->drv->supports_backing) {
2561         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2562 
2563         if (!cow_bs) {
2564             ret |= BDRV_BLOCK_ZERO;
2565         } else if (mode == BDRV_WANT_PRECISE) {
2566             int64_t size2 = bdrv_co_getlength(cow_bs);
2567 
2568             if (size2 >= 0 && offset >= size2) {
2569                 ret |= BDRV_BLOCK_ZERO;
2570             }
2571         }
2572     }
2573 
2574     if (mode == BDRV_WANT_PRECISE && ret & BDRV_BLOCK_RECURSE &&
2575         local_file && local_file != bs &&
2576         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2577         (ret & BDRV_BLOCK_OFFSET_VALID)) {
2578         int64_t file_pnum;
2579         int ret2;
2580 
2581         ret2 = bdrv_co_do_block_status(local_file, mode, local_map,
2582                                        *pnum, &file_pnum, NULL, NULL);
2583         if (ret2 >= 0) {
2584             /* Ignore errors.  This is just providing extra information, it
2585              * is useful but not necessary.
2586              */
2587             if (ret2 & BDRV_BLOCK_EOF &&
2588                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2589                 /*
2590                  * It is valid for the format block driver to read
2591                  * beyond the end of the underlying file's current
2592                  * size; such areas read as zero.
2593                  */
2594                 ret |= BDRV_BLOCK_ZERO;
2595             } else {
2596                 /* Limit request to the range reported by the protocol driver */
2597                 *pnum = file_pnum;
2598                 ret |= (ret2 & BDRV_BLOCK_ZERO);
2599             }
2600         }
2601 
2602         /*
2603          * Now that the recursive search was done, clear the flag. Otherwise,
2604          * with more complicated block graphs like snapshot-access ->
2605          * copy-before-write -> qcow2, where the return value will be propagated
2606          * further up to a parent bdrv_co_do_block_status() call, both the
2607          * BDRV_BLOCK_RECURSE and BDRV_BLOCK_ZERO flags would be set, which is
2608          * not allowed.
2609          */
2610         ret &= ~BDRV_BLOCK_RECURSE;
2611     }
2612 
2613 out:
2614     bdrv_dec_in_flight(bs);
2615     if (ret >= 0 && offset + *pnum == total_size) {
2616         ret |= BDRV_BLOCK_EOF;
2617     }
2618 early_out:
2619     if (file) {
2620         *file = local_file;
2621     }
2622     if (map) {
2623         *map = local_map;
2624     }
2625     return ret;
2626 }
2627 
2628 int coroutine_fn
2629 bdrv_co_common_block_status_above(BlockDriverState *bs,
2630                                   BlockDriverState *base,
2631                                   bool include_base,
2632                                   unsigned int mode,
2633                                   int64_t offset,
2634                                   int64_t bytes,
2635                                   int64_t *pnum,
2636                                   int64_t *map,
2637                                   BlockDriverState **file,
2638                                   int *depth)
2639 {
2640     int ret;
2641     BlockDriverState *p;
2642     int64_t eof = 0;
2643     int dummy;
2644     IO_CODE();
2645 
2646     assert(!include_base || base); /* Can't include NULL base */
2647     assert_bdrv_graph_readable();
2648 
2649     if (!depth) {
2650         depth = &dummy;
2651     }
2652     *depth = 0;
2653 
2654     if (!include_base && bs == base) {
2655         *pnum = bytes;
2656         return 0;
2657     }
2658 
2659     ret = bdrv_co_do_block_status(bs, mode, offset, bytes, pnum,
2660                                   map, file);
2661     ++*depth;
2662     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2663         return ret;
2664     }
2665 
2666     if (ret & BDRV_BLOCK_EOF) {
2667         eof = offset + *pnum;
2668     }
2669 
2670     assert(*pnum <= bytes);
2671     bytes = *pnum;
2672 
2673     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2674          p = bdrv_filter_or_cow_bs(p))
2675     {
2676         ret = bdrv_co_do_block_status(p, mode, offset, bytes, pnum,
2677                                       map, file);
2678         ++*depth;
2679         if (ret < 0) {
2680             return ret;
2681         }
2682         if (*pnum == 0) {
2683             /*
2684              * The top layer deferred to this layer, and because this layer is
2685              * short, any zeroes that we synthesize beyond EOF behave as if they
2686              * were allocated at this layer.
2687              *
2688              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2689              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2690              * below.
2691              */
2692             assert(ret & BDRV_BLOCK_EOF);
2693             *pnum = bytes;
2694             if (file) {
2695                 *file = p;
2696             }
2697             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2698             break;
2699         }
2700         if (ret & BDRV_BLOCK_ALLOCATED) {
2701             /*
2702              * We've found the node and the status, we must break.
2703              *
2704              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2705              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2706              * below.
2707              */
2708             ret &= ~BDRV_BLOCK_EOF;
2709             break;
2710         }
2711 
2712         if (p == base) {
2713             assert(include_base);
2714             break;
2715         }
2716 
2717         /*
2718          * OK, [offset, offset + *pnum) region is unallocated on this layer,
2719          * let's continue the diving.
2720          */
2721         assert(*pnum <= bytes);
2722         bytes = *pnum;
2723     }
2724 
2725     if (offset + *pnum == eof) {
2726         ret |= BDRV_BLOCK_EOF;
2727     }
2728 
2729     return ret;
2730 }
2731 
2732 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2733                                             BlockDriverState *base,
2734                                             int64_t offset, int64_t bytes,
2735                                             int64_t *pnum, int64_t *map,
2736                                             BlockDriverState **file)
2737 {
2738     IO_CODE();
2739     return bdrv_co_common_block_status_above(bs, base, false,
2740                                              BDRV_WANT_PRECISE, offset,
2741                                              bytes, pnum, map, file, NULL);
2742 }
2743 
2744 int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
2745                                       int64_t bytes, int64_t *pnum,
2746                                       int64_t *map, BlockDriverState **file)
2747 {
2748     IO_CODE();
2749     return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2750                                       offset, bytes, pnum, map, file);
2751 }
2752 
2753 /*
2754  * Check @bs (and its backing chain) to see if the range defined
2755  * by @offset and @bytes is known to read as zeroes.
2756  * Return 1 if that is the case, 0 otherwise and -errno on error.
2757  * This test is meant to be fast rather than accurate so returning 0
2758  * does not guarantee non-zero data; but a return of 1 is reliable.
2759  */
2760 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2761                                       int64_t bytes)
2762 {
2763     int ret;
2764     int64_t pnum;
2765     IO_CODE();
2766 
2767     while (bytes) {
2768         ret = bdrv_co_common_block_status_above(bs, NULL, false,
2769                                                 BDRV_WANT_ZERO, offset, bytes,
2770                                                 &pnum, NULL, NULL, NULL);
2771 
2772         if (ret < 0) {
2773             return ret;
2774         }
2775         if (!(ret & BDRV_BLOCK_ZERO)) {
2776             return 0;
2777         }
2778         offset += pnum;
2779         bytes -= pnum;
2780     }
2781 
2782     return 1;
2783 }
2784 
2785 /*
2786  * Check @bs (and its backing chain) to see if the entire image is known
2787  * to read as zeroes.
2788  * Return 1 if that is the case, 0 otherwise and -errno on error.
2789  * This test is meant to be fast rather than accurate so returning 0
2790  * does not guarantee non-zero data; however, a return of 1 is reliable,
2791  * and this function can report 1 in more cases than bdrv_co_is_zero_fast.
2792  */
2793 int coroutine_fn bdrv_co_is_all_zeroes(BlockDriverState *bs)
2794 {
2795     int ret;
2796     int64_t pnum, bytes;
2797     char *buf;
2798     QEMUIOVector local_qiov;
2799     IO_CODE();
2800 
2801     bytes = bdrv_co_getlength(bs);
2802     if (bytes < 0) {
2803         return bytes;
2804     }
2805 
2806     /* First probe - see if the entire image reads as zero */
2807     ret = bdrv_co_common_block_status_above(bs, NULL, false, BDRV_WANT_ZERO,
2808                                             0, bytes, &pnum, NULL, NULL,
2809                                             NULL);
2810     if (ret < 0) {
2811         return ret;
2812     }
2813     if (ret & BDRV_BLOCK_ZERO) {
2814         return bdrv_co_is_zero_fast(bs, pnum, bytes - pnum);
2815     }
2816 
2817     /*
2818      * Because of the way 'blockdev-create' works, raw files tend to
2819      * be created with a non-sparse region at the front to make
2820      * alignment probing easier.  If the block starts with only a
2821      * small allocated region, it is still worth the effort to see if
2822      * the rest of the image is still sparse, coupled with manually
2823      * reading the first region to see if it reads zero after all.
2824      */
2825     if (pnum > MAX_ZERO_CHECK_BUFFER) {
2826         return 0;
2827     }
2828     ret = bdrv_co_is_zero_fast(bs, pnum, bytes - pnum);
2829     if (ret <= 0) {
2830         return ret;
2831     }
2832     /* Only the head of the image is unknown, and it's small.  Read it.  */
2833     buf = qemu_blockalign(bs, pnum);
2834     qemu_iovec_init_buf(&local_qiov, buf, pnum);
2835     ret = bdrv_driver_preadv(bs, 0, pnum, &local_qiov, 0, 0);
2836     if (ret >= 0) {
2837         ret = buffer_is_zero(buf, pnum);
2838     }
2839     qemu_vfree(buf);
2840     return ret;
2841 }
2842 
2843 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
2844                                       int64_t bytes, int64_t *pnum)
2845 {
2846     int ret;
2847     int64_t dummy;
2848     IO_CODE();
2849 
2850     ret = bdrv_co_common_block_status_above(bs, bs, true, BDRV_WANT_ALLOCATED,
2851                                             offset, bytes, pnum ? pnum : &dummy,
2852                                             NULL, NULL, NULL);
2853     if (ret < 0) {
2854         return ret;
2855     }
2856     return !!(ret & BDRV_BLOCK_ALLOCATED);
2857 }
2858 
2859 /*
2860  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2861  *
2862  * Return a positive depth if (a prefix of) the given range is allocated
2863  * in any image between BASE and TOP (BASE is only included if include_base
2864  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2865  * BASE can be NULL to check if the given offset is allocated in any
2866  * image of the chain.  Return 0 otherwise, or negative errno on
2867  * failure.
2868  *
2869  * 'pnum' is set to the number of bytes (including and immediately
2870  * following the specified offset) that are known to be in the same
2871  * allocated/unallocated state.  Note that a subsequent call starting
2872  * at 'offset + *pnum' may return the same allocation status (in other
2873  * words, the result is not necessarily the maximum possible range);
2874  * but 'pnum' will only be 0 when end of file is reached.
2875  */
2876 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
2877                                             BlockDriverState *base,
2878                                             bool include_base, int64_t offset,
2879                                             int64_t bytes, int64_t *pnum)
2880 {
2881     int depth;
2882     int ret;
2883     IO_CODE();
2884 
2885     ret = bdrv_co_common_block_status_above(bs, base, include_base,
2886                                             BDRV_WANT_ALLOCATED,
2887                                             offset, bytes, pnum, NULL, NULL,
2888                                             &depth);
2889     if (ret < 0) {
2890         return ret;
2891     }
2892 
2893     if (ret & BDRV_BLOCK_ALLOCATED) {
2894         return depth;
2895     }
2896     return 0;
2897 }
2898 
2899 int coroutine_fn
2900 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2901 {
2902     BlockDriver *drv = bs->drv;
2903     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2904     int ret;
2905     IO_CODE();
2906     assert_bdrv_graph_readable();
2907 
2908     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2909     if (ret < 0) {
2910         return ret;
2911     }
2912 
2913     if (!drv) {
2914         return -ENOMEDIUM;
2915     }
2916 
2917     bdrv_inc_in_flight(bs);
2918 
2919     if (drv->bdrv_co_load_vmstate) {
2920         ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2921     } else if (child_bs) {
2922         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2923     } else {
2924         ret = -ENOTSUP;
2925     }
2926 
2927     bdrv_dec_in_flight(bs);
2928 
2929     return ret;
2930 }
2931 
2932 int coroutine_fn
2933 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2934 {
2935     BlockDriver *drv = bs->drv;
2936     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2937     int ret;
2938     IO_CODE();
2939     assert_bdrv_graph_readable();
2940 
2941     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2942     if (ret < 0) {
2943         return ret;
2944     }
2945 
2946     if (!drv) {
2947         return -ENOMEDIUM;
2948     }
2949 
2950     bdrv_inc_in_flight(bs);
2951 
2952     if (drv->bdrv_co_save_vmstate) {
2953         ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2954     } else if (child_bs) {
2955         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2956     } else {
2957         ret = -ENOTSUP;
2958     }
2959 
2960     bdrv_dec_in_flight(bs);
2961 
2962     return ret;
2963 }
2964 
2965 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2966                       int64_t pos, int size)
2967 {
2968     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2969     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2970     IO_CODE();
2971 
2972     return ret < 0 ? ret : size;
2973 }
2974 
2975 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2976                       int64_t pos, int size)
2977 {
2978     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2979     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2980     IO_CODE();
2981 
2982     return ret < 0 ? ret : size;
2983 }
2984 
2985 /**************************************************************/
2986 /* async I/Os */
2987 
2988 /**
2989  * Synchronously cancels an acb. Must be called with the BQL held and the acb
2990  * must be processed with the BQL held too (IOThreads are not allowed).
2991  *
2992  * Use bdrv_aio_cancel_async() instead when possible.
2993  */
2994 void bdrv_aio_cancel(BlockAIOCB *acb)
2995 {
2996     GLOBAL_STATE_CODE();
2997     qemu_aio_ref(acb);
2998     bdrv_aio_cancel_async(acb);
2999     AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1);
3000     qemu_aio_unref(acb);
3001 }
3002 
3003 /* Async version of aio cancel. The caller is not blocked if the acb implements
3004  * cancel_async, otherwise we do nothing and let the request normally complete.
3005  * In either case the completion callback must be called. */
3006 void bdrv_aio_cancel_async(BlockAIOCB *acb)
3007 {
3008     IO_CODE();
3009     if (acb->aiocb_info->cancel_async) {
3010         acb->aiocb_info->cancel_async(acb);
3011     }
3012 }
3013 
3014 /**************************************************************/
3015 /* Coroutine block device emulation */
3016 
3017 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
3018 {
3019     BdrvChild *primary_child = bdrv_primary_child(bs);
3020     BdrvChild *child;
3021     int current_gen;
3022     int ret = 0;
3023     IO_CODE();
3024 
3025     assert_bdrv_graph_readable();
3026     bdrv_inc_in_flight(bs);
3027 
3028     if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
3029         bdrv_is_sg(bs)) {
3030         goto early_exit;
3031     }
3032 
3033     qemu_mutex_lock(&bs->reqs_lock);
3034     current_gen = qatomic_read(&bs->write_gen);
3035 
3036     /* Wait until any previous flushes are completed */
3037     while (bs->active_flush_req) {
3038         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
3039     }
3040 
3041     /* Flushes reach this point in nondecreasing current_gen order.  */
3042     bs->active_flush_req = true;
3043     qemu_mutex_unlock(&bs->reqs_lock);
3044 
3045     /* Write back all layers by calling one driver function */
3046     if (bs->drv->bdrv_co_flush) {
3047         ret = bs->drv->bdrv_co_flush(bs);
3048         goto out;
3049     }
3050 
3051     /* Write back cached data to the OS even with cache=unsafe */
3052     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
3053     if (bs->drv->bdrv_co_flush_to_os) {
3054         ret = bs->drv->bdrv_co_flush_to_os(bs);
3055         if (ret < 0) {
3056             goto out;
3057         }
3058     }
3059 
3060     /* But don't actually force it to the disk with cache=unsafe */
3061     if (bs->open_flags & BDRV_O_NO_FLUSH) {
3062         goto flush_children;
3063     }
3064 
3065     /* Check if we really need to flush anything */
3066     if (bs->flushed_gen == current_gen) {
3067         goto flush_children;
3068     }
3069 
3070     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
3071     if (!bs->drv) {
3072         /* bs->drv->bdrv_co_flush() might have ejected the BDS
3073          * (even in case of apparent success) */
3074         ret = -ENOMEDIUM;
3075         goto out;
3076     }
3077     if (bs->drv->bdrv_co_flush_to_disk) {
3078         ret = bs->drv->bdrv_co_flush_to_disk(bs);
3079     } else if (bs->drv->bdrv_aio_flush) {
3080         BlockAIOCB *acb;
3081         CoroutineIOCompletion co = {
3082             .coroutine = qemu_coroutine_self(),
3083         };
3084 
3085         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3086         if (acb == NULL) {
3087             ret = -EIO;
3088         } else {
3089             qemu_coroutine_yield();
3090             ret = co.ret;
3091         }
3092     } else {
3093         /*
3094          * Some block drivers always operate in either writethrough or unsafe
3095          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3096          * know how the server works (because the behaviour is hardcoded or
3097          * depends on server-side configuration), so we can't ensure that
3098          * everything is safe on disk. Returning an error doesn't work because
3099          * that would break guests even if the server operates in writethrough
3100          * mode.
3101          *
3102          * Let's hope the user knows what he's doing.
3103          */
3104         ret = 0;
3105     }
3106 
3107     if (ret < 0) {
3108         goto out;
3109     }
3110 
3111     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
3112      * in the case of cache=unsafe, so there are no useless flushes.
3113      */
3114 flush_children:
3115     ret = 0;
3116     QLIST_FOREACH(child, &bs->children, next) {
3117         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3118             int this_child_ret = bdrv_co_flush(child->bs);
3119             if (!ret) {
3120                 ret = this_child_ret;
3121             }
3122         }
3123     }
3124 
3125 out:
3126     /* Notify any pending flushes that we have completed */
3127     if (ret == 0) {
3128         bs->flushed_gen = current_gen;
3129     }
3130 
3131     qemu_mutex_lock(&bs->reqs_lock);
3132     bs->active_flush_req = false;
3133     /* Return value is ignored - it's ok if wait queue is empty */
3134     qemu_co_queue_next(&bs->flush_queue);
3135     qemu_mutex_unlock(&bs->reqs_lock);
3136 
3137 early_exit:
3138     bdrv_dec_in_flight(bs);
3139     return ret;
3140 }
3141 
3142 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3143                                   int64_t bytes)
3144 {
3145     BdrvTrackedRequest req;
3146     int ret;
3147     int64_t max_pdiscard;
3148     int head, tail, align;
3149     BlockDriverState *bs = child->bs;
3150     IO_CODE();
3151     assert_bdrv_graph_readable();
3152 
3153     if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
3154         return -ENOMEDIUM;
3155     }
3156 
3157     if (bdrv_has_readonly_bitmaps(bs)) {
3158         return -EPERM;
3159     }
3160 
3161     ret = bdrv_check_request(offset, bytes, NULL);
3162     if (ret < 0) {
3163         return ret;
3164     }
3165 
3166     /* Do nothing if disabled.  */
3167     if (!(bs->open_flags & BDRV_O_UNMAP)) {
3168         return 0;
3169     }
3170 
3171     if (!bs->drv->bdrv_co_pdiscard) {
3172         return 0;
3173     }
3174 
3175     /* Invalidate the cached block-status data range if this discard overlaps */
3176     bdrv_bsc_invalidate_range(bs, offset, bytes);
3177 
3178     /*
3179      * Discard is advisory, but some devices track and coalesce
3180      * unaligned requests, so we must pass everything down rather than
3181      * round here.  Still, most devices reject unaligned requests with
3182      * -EINVAL or -ENOTSUP, so we must fragment the request accordingly.
3183      */
3184     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3185     assert(align % bs->bl.request_alignment == 0);
3186     head = offset % align;
3187     tail = (offset + bytes) % align;
3188 
3189     bdrv_inc_in_flight(bs);
3190     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3191 
3192     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3193     if (ret < 0) {
3194         goto out;
3195     }
3196 
3197     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
3198                                    align);
3199     assert(max_pdiscard >= bs->bl.request_alignment);
3200 
3201     while (bytes > 0) {
3202         int64_t num = bytes;
3203 
3204         if (head) {
3205             /* Make small requests to get to alignment boundaries. */
3206             num = MIN(bytes, align - head);
3207             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3208                 num %= bs->bl.request_alignment;
3209             }
3210             head = (head + num) % align;
3211             assert(num < max_pdiscard);
3212         } else if (tail) {
3213             if (num > align) {
3214                 /* Shorten the request to the last aligned cluster.  */
3215                 num -= tail;
3216             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3217                        tail > bs->bl.request_alignment) {
3218                 tail %= bs->bl.request_alignment;
3219                 num -= tail;
3220             }
3221         }
3222         /* limit request size */
3223         if (num > max_pdiscard) {
3224             num = max_pdiscard;
3225         }
3226 
3227         if (!bs->drv) {
3228             ret = -ENOMEDIUM;
3229             goto out;
3230         }
3231 
3232         ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3233         if (ret && ret != -ENOTSUP) {
3234             if (ret == -EINVAL && (offset % align != 0 || num % align != 0)) {
3235                 /* Silently skip rejected unaligned head/tail requests */
3236             } else {
3237                 goto out; /* bail out */
3238             }
3239         }
3240 
3241         offset += num;
3242         bytes -= num;
3243     }
3244     ret = 0;
3245 out:
3246     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3247     tracked_request_end(&req);
3248     bdrv_dec_in_flight(bs);
3249     return ret;
3250 }
3251 
3252 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3253 {
3254     BlockDriver *drv = bs->drv;
3255     CoroutineIOCompletion co = {
3256         .coroutine = qemu_coroutine_self(),
3257     };
3258     BlockAIOCB *acb;
3259     IO_CODE();
3260     assert_bdrv_graph_readable();
3261 
3262     bdrv_inc_in_flight(bs);
3263     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3264         co.ret = -ENOTSUP;
3265         goto out;
3266     }
3267 
3268     if (drv->bdrv_co_ioctl) {
3269         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3270     } else {
3271         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3272         if (!acb) {
3273             co.ret = -ENOTSUP;
3274             goto out;
3275         }
3276         qemu_coroutine_yield();
3277     }
3278 out:
3279     bdrv_dec_in_flight(bs);
3280     return co.ret;
3281 }
3282 
3283 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
3284                         unsigned int *nr_zones,
3285                         BlockZoneDescriptor *zones)
3286 {
3287     BlockDriver *drv = bs->drv;
3288     CoroutineIOCompletion co = {
3289             .coroutine = qemu_coroutine_self(),
3290     };
3291     IO_CODE();
3292 
3293     bdrv_inc_in_flight(bs);
3294     if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
3295         co.ret = -ENOTSUP;
3296         goto out;
3297     }
3298     co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
3299 out:
3300     bdrv_dec_in_flight(bs);
3301     return co.ret;
3302 }
3303 
3304 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
3305         int64_t offset, int64_t len)
3306 {
3307     BlockDriver *drv = bs->drv;
3308     CoroutineIOCompletion co = {
3309             .coroutine = qemu_coroutine_self(),
3310     };
3311     IO_CODE();
3312 
3313     bdrv_inc_in_flight(bs);
3314     if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
3315         co.ret = -ENOTSUP;
3316         goto out;
3317     }
3318     co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
3319 out:
3320     bdrv_dec_in_flight(bs);
3321     return co.ret;
3322 }
3323 
3324 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
3325                         QEMUIOVector *qiov,
3326                         BdrvRequestFlags flags)
3327 {
3328     int ret;
3329     BlockDriver *drv = bs->drv;
3330     CoroutineIOCompletion co = {
3331             .coroutine = qemu_coroutine_self(),
3332     };
3333     IO_CODE();
3334 
3335     ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
3336     if (ret < 0) {
3337         return ret;
3338     }
3339 
3340     bdrv_inc_in_flight(bs);
3341     if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
3342         co.ret = -ENOTSUP;
3343         goto out;
3344     }
3345     co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
3346 out:
3347     bdrv_dec_in_flight(bs);
3348     return co.ret;
3349 }
3350 
3351 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3352 {
3353     IO_CODE();
3354     return qemu_memalign(bdrv_opt_mem_align(bs), size);
3355 }
3356 
3357 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3358 {
3359     IO_CODE();
3360     return memset(qemu_blockalign(bs, size), 0, size);
3361 }
3362 
3363 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3364 {
3365     size_t align = bdrv_opt_mem_align(bs);
3366     IO_CODE();
3367 
3368     /* Ensure that NULL is never returned on success */
3369     assert(align > 0);
3370     if (size == 0) {
3371         size = align;
3372     }
3373 
3374     return qemu_try_memalign(align, size);
3375 }
3376 
3377 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3378 {
3379     void *mem = qemu_try_blockalign(bs, size);
3380     IO_CODE();
3381 
3382     if (mem) {
3383         memset(mem, 0, size);
3384     }
3385 
3386     return mem;
3387 }
3388 
3389 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3390 static void GRAPH_RDLOCK
3391 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3392                            BdrvChild *final_child)
3393 {
3394     BdrvChild *child;
3395 
3396     GLOBAL_STATE_CODE();
3397     assert_bdrv_graph_readable();
3398 
3399     QLIST_FOREACH(child, &bs->children, next) {
3400         if (child == final_child) {
3401             break;
3402         }
3403 
3404         bdrv_unregister_buf(child->bs, host, size);
3405     }
3406 
3407     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3408         bs->drv->bdrv_unregister_buf(bs, host, size);
3409     }
3410 }
3411 
3412 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3413                        Error **errp)
3414 {
3415     BdrvChild *child;
3416 
3417     GLOBAL_STATE_CODE();
3418     GRAPH_RDLOCK_GUARD_MAINLOOP();
3419 
3420     if (bs->drv && bs->drv->bdrv_register_buf) {
3421         if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3422             return false;
3423         }
3424     }
3425     QLIST_FOREACH(child, &bs->children, next) {
3426         if (!bdrv_register_buf(child->bs, host, size, errp)) {
3427             bdrv_register_buf_rollback(bs, host, size, child);
3428             return false;
3429         }
3430     }
3431     return true;
3432 }
3433 
3434 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
3435 {
3436     BdrvChild *child;
3437 
3438     GLOBAL_STATE_CODE();
3439     GRAPH_RDLOCK_GUARD_MAINLOOP();
3440 
3441     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3442         bs->drv->bdrv_unregister_buf(bs, host, size);
3443     }
3444     QLIST_FOREACH(child, &bs->children, next) {
3445         bdrv_unregister_buf(child->bs, host, size);
3446     }
3447 }
3448 
3449 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3450         BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3451         int64_t dst_offset, int64_t bytes,
3452         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3453         bool recurse_src)
3454 {
3455     BdrvTrackedRequest req;
3456     int ret;
3457     assert_bdrv_graph_readable();
3458 
3459     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3460     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3461     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3462     assert(!(read_flags & BDRV_REQ_NO_WAIT));
3463     assert(!(write_flags & BDRV_REQ_NO_WAIT));
3464 
3465     if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3466         return -ENOMEDIUM;
3467     }
3468     ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3469     if (ret) {
3470         return ret;
3471     }
3472     if (write_flags & BDRV_REQ_ZERO_WRITE) {
3473         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3474     }
3475 
3476     if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3477         return -ENOMEDIUM;
3478     }
3479     ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3480     if (ret) {
3481         return ret;
3482     }
3483 
3484     if (!src->bs->drv->bdrv_co_copy_range_from
3485         || !dst->bs->drv->bdrv_co_copy_range_to
3486         || src->bs->encrypted || dst->bs->encrypted) {
3487         return -ENOTSUP;
3488     }
3489 
3490     if (recurse_src) {
3491         bdrv_inc_in_flight(src->bs);
3492         tracked_request_begin(&req, src->bs, src_offset, bytes,
3493                               BDRV_TRACKED_READ);
3494 
3495         /* BDRV_REQ_SERIALISING is only for write operation */
3496         assert(!(read_flags & BDRV_REQ_SERIALISING));
3497         bdrv_wait_serialising_requests(&req);
3498 
3499         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3500                                                     src, src_offset,
3501                                                     dst, dst_offset,
3502                                                     bytes,
3503                                                     read_flags, write_flags);
3504 
3505         tracked_request_end(&req);
3506         bdrv_dec_in_flight(src->bs);
3507     } else {
3508         bdrv_inc_in_flight(dst->bs);
3509         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3510                               BDRV_TRACKED_WRITE);
3511         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3512                                         write_flags);
3513         if (!ret) {
3514             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3515                                                       src, src_offset,
3516                                                       dst, dst_offset,
3517                                                       bytes,
3518                                                       read_flags, write_flags);
3519         }
3520         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3521         tracked_request_end(&req);
3522         bdrv_dec_in_flight(dst->bs);
3523     }
3524 
3525     return ret;
3526 }
3527 
3528 /* Copy range from @src to @dst.
3529  *
3530  * See the comment of bdrv_co_copy_range for the parameter and return value
3531  * semantics. */
3532 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3533                                          BdrvChild *dst, int64_t dst_offset,
3534                                          int64_t bytes,
3535                                          BdrvRequestFlags read_flags,
3536                                          BdrvRequestFlags write_flags)
3537 {
3538     IO_CODE();
3539     assert_bdrv_graph_readable();
3540     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3541                                   read_flags, write_flags);
3542     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3543                                        bytes, read_flags, write_flags, true);
3544 }
3545 
3546 /* Copy range from @src to @dst.
3547  *
3548  * See the comment of bdrv_co_copy_range for the parameter and return value
3549  * semantics. */
3550 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3551                                        BdrvChild *dst, int64_t dst_offset,
3552                                        int64_t bytes,
3553                                        BdrvRequestFlags read_flags,
3554                                        BdrvRequestFlags write_flags)
3555 {
3556     IO_CODE();
3557     assert_bdrv_graph_readable();
3558     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3559                                 read_flags, write_flags);
3560     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3561                                        bytes, read_flags, write_flags, false);
3562 }
3563 
3564 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3565                                     BdrvChild *dst, int64_t dst_offset,
3566                                     int64_t bytes, BdrvRequestFlags read_flags,
3567                                     BdrvRequestFlags write_flags)
3568 {
3569     IO_CODE();
3570     assert_bdrv_graph_readable();
3571 
3572     return bdrv_co_copy_range_from(src, src_offset,
3573                                    dst, dst_offset,
3574                                    bytes, read_flags, write_flags);
3575 }
3576 
3577 static void coroutine_fn GRAPH_RDLOCK
3578 bdrv_parent_cb_resize(BlockDriverState *bs)
3579 {
3580     BdrvChild *c;
3581 
3582     assert_bdrv_graph_readable();
3583 
3584     QLIST_FOREACH(c, &bs->parents, next_parent) {
3585         if (c->klass->resize) {
3586             c->klass->resize(c);
3587         }
3588     }
3589 }
3590 
3591 /**
3592  * Truncate file to 'offset' bytes (needed only for file protocols)
3593  *
3594  * If 'exact' is true, the file must be resized to exactly the given
3595  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3596  * 'offset' bytes in length.
3597  */
3598 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3599                                   PreallocMode prealloc, BdrvRequestFlags flags,
3600                                   Error **errp)
3601 {
3602     BlockDriverState *bs = child->bs;
3603     BdrvChild *filtered, *backing;
3604     BlockDriver *drv = bs->drv;
3605     BdrvTrackedRequest req;
3606     int64_t old_size, new_bytes;
3607     int ret;
3608     IO_CODE();
3609     assert_bdrv_graph_readable();
3610 
3611     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3612     if (!drv) {
3613         error_setg(errp, "No medium inserted");
3614         return -ENOMEDIUM;
3615     }
3616     if (offset < 0) {
3617         error_setg(errp, "Image size cannot be negative");
3618         return -EINVAL;
3619     }
3620 
3621     ret = bdrv_check_request(offset, 0, errp);
3622     if (ret < 0) {
3623         return ret;
3624     }
3625 
3626     old_size = bdrv_co_getlength(bs);
3627     if (old_size < 0) {
3628         error_setg_errno(errp, -old_size, "Failed to get old image size");
3629         return old_size;
3630     }
3631 
3632     if (bdrv_is_read_only(bs)) {
3633         error_setg(errp, "Image is read-only");
3634         return -EACCES;
3635     }
3636 
3637     if (offset > old_size) {
3638         new_bytes = offset - old_size;
3639     } else {
3640         new_bytes = 0;
3641     }
3642 
3643     bdrv_inc_in_flight(bs);
3644     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3645                           BDRV_TRACKED_TRUNCATE);
3646 
3647     /* If we are growing the image and potentially using preallocation for the
3648      * new area, we need to make sure that no write requests are made to it
3649      * concurrently or they might be overwritten by preallocation. */
3650     if (new_bytes) {
3651         bdrv_make_request_serialising(&req, 1);
3652     }
3653     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3654                                     0);
3655     if (ret < 0) {
3656         error_setg_errno(errp, -ret,
3657                          "Failed to prepare request for truncation");
3658         goto out;
3659     }
3660 
3661     filtered = bdrv_filter_child(bs);
3662     backing = bdrv_cow_child(bs);
3663 
3664     /*
3665      * If the image has a backing file that is large enough that it would
3666      * provide data for the new area, we cannot leave it unallocated because
3667      * then the backing file content would become visible. Instead, zero-fill
3668      * the new area.
3669      *
3670      * Note that if the image has a backing file, but was opened without the
3671      * backing file, taking care of keeping things consistent with that backing
3672      * file is the user's responsibility.
3673      */
3674     if (new_bytes && backing) {
3675         int64_t backing_len;
3676 
3677         backing_len = bdrv_co_getlength(backing->bs);
3678         if (backing_len < 0) {
3679             ret = backing_len;
3680             error_setg_errno(errp, -ret, "Could not get backing file size");
3681             goto out;
3682         }
3683 
3684         if (backing_len > old_size) {
3685             flags |= BDRV_REQ_ZERO_WRITE;
3686         }
3687     }
3688 
3689     if (drv->bdrv_co_truncate) {
3690         if (flags & ~bs->supported_truncate_flags) {
3691             error_setg(errp, "Block driver does not support requested flags");
3692             ret = -ENOTSUP;
3693             goto out;
3694         }
3695         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3696     } else if (filtered) {
3697         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3698     } else {
3699         error_setg(errp, "Image format driver does not support resize");
3700         ret = -ENOTSUP;
3701         goto out;
3702     }
3703     if (ret < 0) {
3704         goto out;
3705     }
3706 
3707     ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3708     if (ret < 0) {
3709         error_setg_errno(errp, -ret, "Could not refresh total sector count");
3710     } else {
3711         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3712     }
3713     /*
3714      * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3715      * failed, but the latter doesn't affect how we should finish the request.
3716      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3717      */
3718     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3719 
3720 out:
3721     tracked_request_end(&req);
3722     bdrv_dec_in_flight(bs);
3723 
3724     return ret;
3725 }
3726 
3727 void bdrv_cancel_in_flight(BlockDriverState *bs)
3728 {
3729     GLOBAL_STATE_CODE();
3730     GRAPH_RDLOCK_GUARD_MAINLOOP();
3731 
3732     if (!bs || !bs->drv) {
3733         return;
3734     }
3735 
3736     if (bs->drv->bdrv_cancel_in_flight) {
3737         bs->drv->bdrv_cancel_in_flight(bs);
3738     }
3739 }
3740 
3741 int coroutine_fn
3742 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3743                         QEMUIOVector *qiov, size_t qiov_offset)
3744 {
3745     BlockDriverState *bs = child->bs;
3746     BlockDriver *drv = bs->drv;
3747     int ret;
3748     IO_CODE();
3749     assert_bdrv_graph_readable();
3750 
3751     if (!drv) {
3752         return -ENOMEDIUM;
3753     }
3754 
3755     if (!drv->bdrv_co_preadv_snapshot) {
3756         return -ENOTSUP;
3757     }
3758 
3759     bdrv_inc_in_flight(bs);
3760     ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3761     bdrv_dec_in_flight(bs);
3762 
3763     return ret;
3764 }
3765 
3766 int coroutine_fn
3767 bdrv_co_snapshot_block_status(BlockDriverState *bs, unsigned int mode,
3768                               int64_t offset, int64_t bytes,
3769                               int64_t *pnum, int64_t *map,
3770                               BlockDriverState **file)
3771 {
3772     BlockDriver *drv = bs->drv;
3773     int ret;
3774     IO_CODE();
3775     assert_bdrv_graph_readable();
3776 
3777     if (!drv) {
3778         return -ENOMEDIUM;
3779     }
3780 
3781     if (!drv->bdrv_co_snapshot_block_status) {
3782         return -ENOTSUP;
3783     }
3784 
3785     bdrv_inc_in_flight(bs);
3786     ret = drv->bdrv_co_snapshot_block_status(bs, mode, offset, bytes,
3787                                              pnum, map, file);
3788     bdrv_dec_in_flight(bs);
3789 
3790     return ret;
3791 }
3792 
3793 int coroutine_fn
3794 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3795 {
3796     BlockDriver *drv = bs->drv;
3797     int ret;
3798     IO_CODE();
3799     assert_bdrv_graph_readable();
3800 
3801     if (!drv) {
3802         return -ENOMEDIUM;
3803     }
3804 
3805     if (!drv->bdrv_co_pdiscard_snapshot) {
3806         return -ENOTSUP;
3807     }
3808 
3809     bdrv_inc_in_flight(bs);
3810     ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3811     bdrv_dec_in_flight(bs);
3812 
3813     return ret;
3814 }
3815