xref: /qemu/blockjob.c (revision 83c2201fc47bd0dfa656bde7202bd0e2539d54a0)
1 /*
2  * QEMU System Emulator block driver
3  *
4  * Copyright (c) 2011 IBM Corp.
5  * Copyright (c) 2012 Red Hat, Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "block/aio-wait.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "system/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/timer.h"
38 
is_block_job(Job * job)39 static bool is_block_job(Job *job)
40 {
41     return job_type(job) == JOB_TYPE_BACKUP ||
42            job_type(job) == JOB_TYPE_COMMIT ||
43            job_type(job) == JOB_TYPE_MIRROR ||
44            job_type(job) == JOB_TYPE_STREAM;
45 }
46 
block_job_next_locked(BlockJob * bjob)47 BlockJob *block_job_next_locked(BlockJob *bjob)
48 {
49     Job *job = bjob ? &bjob->job : NULL;
50     GLOBAL_STATE_CODE();
51 
52     do {
53         job = job_next_locked(job);
54     } while (job && !is_block_job(job));
55 
56     return job ? container_of(job, BlockJob, job) : NULL;
57 }
58 
block_job_get_locked(const char * id)59 BlockJob *block_job_get_locked(const char *id)
60 {
61     Job *job = job_get_locked(id);
62     GLOBAL_STATE_CODE();
63 
64     if (job && is_block_job(job)) {
65         return container_of(job, BlockJob, job);
66     } else {
67         return NULL;
68     }
69 }
70 
block_job_get(const char * id)71 BlockJob *block_job_get(const char *id)
72 {
73     JOB_LOCK_GUARD();
74     return block_job_get_locked(id);
75 }
76 
block_job_free(Job * job)77 void block_job_free(Job *job)
78 {
79     BlockJob *bjob = container_of(job, BlockJob, job);
80     GLOBAL_STATE_CODE();
81 
82     block_job_remove_all_bdrv(bjob);
83     ratelimit_destroy(&bjob->limit);
84     error_free(bjob->blocker);
85 }
86 
child_job_get_parent_desc(BdrvChild * c)87 static char *child_job_get_parent_desc(BdrvChild *c)
88 {
89     BlockJob *job = c->opaque;
90     return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
91 }
92 
child_job_drained_begin(BdrvChild * c)93 static void child_job_drained_begin(BdrvChild *c)
94 {
95     BlockJob *job = c->opaque;
96     job_pause(&job->job);
97 }
98 
child_job_drained_poll(BdrvChild * c)99 static bool child_job_drained_poll(BdrvChild *c)
100 {
101     BlockJob *bjob = c->opaque;
102     Job *job = &bjob->job;
103     const BlockJobDriver *drv = block_job_driver(bjob);
104 
105     /* An inactive or completed job doesn't have any pending requests. Jobs
106      * with !job->busy are either already paused or have a pause point after
107      * being reentered, so no job driver code will run before they pause. */
108     WITH_JOB_LOCK_GUARD() {
109         if (!job->busy || job_is_completed_locked(job)) {
110             return false;
111         }
112     }
113 
114     /* Otherwise, assume that it isn't fully stopped yet, but allow the job to
115      * override this assumption. */
116     if (drv->drained_poll) {
117         return drv->drained_poll(bjob);
118     } else {
119         return true;
120     }
121 }
122 
child_job_drained_end(BdrvChild * c)123 static void child_job_drained_end(BdrvChild *c)
124 {
125     BlockJob *job = c->opaque;
126     job_resume(&job->job);
127 }
128 
129 typedef struct BdrvStateChildJobContext {
130     AioContext *new_ctx;
131     BlockJob *job;
132 } BdrvStateChildJobContext;
133 
child_job_set_aio_ctx_commit(void * opaque)134 static void child_job_set_aio_ctx_commit(void *opaque)
135 {
136     BdrvStateChildJobContext *s = opaque;
137     BlockJob *job = s->job;
138 
139     job_set_aio_context(&job->job, s->new_ctx);
140 }
141 
142 static TransactionActionDrv change_child_job_context = {
143     .commit = child_job_set_aio_ctx_commit,
144     .clean = g_free,
145 };
146 
147 static bool GRAPH_RDLOCK
child_job_change_aio_ctx(BdrvChild * c,AioContext * ctx,GHashTable * visited,Transaction * tran,Error ** errp)148 child_job_change_aio_ctx(BdrvChild *c, AioContext *ctx, GHashTable *visited,
149                          Transaction *tran, Error **errp)
150 {
151     BlockJob *job = c->opaque;
152     BdrvStateChildJobContext *s;
153     GSList *l;
154 
155     for (l = job->nodes; l; l = l->next) {
156         BdrvChild *sibling = l->data;
157         if (!bdrv_child_change_aio_context(sibling, ctx, visited,
158                                            tran, errp)) {
159             return false;
160         }
161     }
162 
163     s = g_new(BdrvStateChildJobContext, 1);
164     *s = (BdrvStateChildJobContext) {
165         .new_ctx = ctx,
166         .job = job,
167     };
168 
169     tran_add(tran, &change_child_job_context, s);
170     return true;
171 }
172 
child_job_get_parent_aio_context(BdrvChild * c)173 static AioContext *child_job_get_parent_aio_context(BdrvChild *c)
174 {
175     BlockJob *job = c->opaque;
176     IO_CODE();
177     JOB_LOCK_GUARD();
178 
179     return job->job.aio_context;
180 }
181 
182 static const BdrvChildClass child_job = {
183     .get_parent_desc    = child_job_get_parent_desc,
184     .drained_begin      = child_job_drained_begin,
185     .drained_poll       = child_job_drained_poll,
186     .drained_end        = child_job_drained_end,
187     .change_aio_ctx     = child_job_change_aio_ctx,
188     .stay_at_node       = true,
189     .get_parent_aio_context = child_job_get_parent_aio_context,
190 };
191 
block_job_remove_all_bdrv(BlockJob * job)192 void block_job_remove_all_bdrv(BlockJob *job)
193 {
194     GLOBAL_STATE_CODE();
195     /*
196      * bdrv_root_unref_child() may reach child_job_[can_]set_aio_ctx(),
197      * which will also traverse job->nodes, so consume the list one by
198      * one to make sure that such a concurrent access does not attempt
199      * to process an already freed BdrvChild.
200      */
201     bdrv_drain_all_begin();
202     bdrv_graph_wrlock();
203     while (job->nodes) {
204         GSList *l = job->nodes;
205         BdrvChild *c = l->data;
206 
207         job->nodes = l->next;
208 
209         bdrv_op_unblock_all(c->bs, job->blocker);
210         bdrv_root_unref_child(c);
211 
212         g_slist_free_1(l);
213     }
214     bdrv_graph_wrunlock();
215     bdrv_drain_all_end();
216 }
217 
block_job_has_bdrv(BlockJob * job,BlockDriverState * bs)218 bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
219 {
220     GSList *el;
221     GLOBAL_STATE_CODE();
222 
223     for (el = job->nodes; el; el = el->next) {
224         BdrvChild *c = el->data;
225         if (c->bs == bs) {
226             return true;
227         }
228     }
229 
230     return false;
231 }
232 
block_job_add_bdrv(BlockJob * job,const char * name,BlockDriverState * bs,uint64_t perm,uint64_t shared_perm,Error ** errp)233 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
234                        uint64_t perm, uint64_t shared_perm, Error **errp)
235 {
236     BdrvChild *c;
237     GLOBAL_STATE_CODE();
238 
239     bdrv_ref(bs);
240 
241     c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
242                                errp);
243     if (c == NULL) {
244         return -EPERM;
245     }
246 
247     job->nodes = g_slist_prepend(job->nodes, c);
248     bdrv_op_block_all(bs, job->blocker);
249 
250     return 0;
251 }
252 
253 /* Called with job_mutex lock held. */
block_job_on_idle_locked(Notifier * n,void * opaque)254 static void block_job_on_idle_locked(Notifier *n, void *opaque)
255 {
256     aio_wait_kick();
257 }
258 
block_job_is_internal(BlockJob * job)259 bool block_job_is_internal(BlockJob *job)
260 {
261     return (job->job.id == NULL);
262 }
263 
block_job_driver(BlockJob * job)264 const BlockJobDriver *block_job_driver(BlockJob *job)
265 {
266     return container_of(job->job.driver, BlockJobDriver, job_driver);
267 }
268 
269 /* Assumes the job_mutex is held */
job_timer_pending(Job * job)270 static bool job_timer_pending(Job *job)
271 {
272     return timer_pending(&job->sleep_timer);
273 }
274 
block_job_set_speed_locked(BlockJob * job,int64_t speed,Error ** errp)275 bool block_job_set_speed_locked(BlockJob *job, int64_t speed, Error **errp)
276 {
277     const BlockJobDriver *drv = block_job_driver(job);
278     int64_t old_speed = job->speed;
279 
280     GLOBAL_STATE_CODE();
281 
282     if (job_apply_verb_locked(&job->job, JOB_VERB_SET_SPEED, errp) < 0) {
283         return false;
284     }
285     if (speed < 0) {
286         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "speed",
287                    "a non-negative value");
288         return false;
289     }
290 
291     ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
292 
293     job->speed = speed;
294 
295     if (drv->set_speed) {
296         job_unlock();
297         drv->set_speed(job, speed);
298         job_lock();
299     }
300 
301     if (speed && speed <= old_speed) {
302         return true;
303     }
304 
305     /* kick only if a timer is pending */
306     job_enter_cond_locked(&job->job, job_timer_pending);
307 
308     return true;
309 }
310 
block_job_set_speed(BlockJob * job,int64_t speed,Error ** errp)311 static bool block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
312 {
313     JOB_LOCK_GUARD();
314     return block_job_set_speed_locked(job, speed, errp);
315 }
316 
block_job_change_locked(BlockJob * job,BlockJobChangeOptions * opts,Error ** errp)317 void block_job_change_locked(BlockJob *job, BlockJobChangeOptions *opts,
318                              Error **errp)
319 {
320     const BlockJobDriver *drv = block_job_driver(job);
321 
322     GLOBAL_STATE_CODE();
323 
324     if (job_apply_verb_locked(&job->job, JOB_VERB_CHANGE, errp)) {
325         return;
326     }
327 
328     if (drv->change) {
329         job_unlock();
330         drv->change(job, opts, errp);
331         job_lock();
332     } else {
333         error_setg(errp, "Job type does not support change");
334     }
335 }
336 
block_job_ratelimit_processed_bytes(BlockJob * job,uint64_t n)337 void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n)
338 {
339     IO_CODE();
340     ratelimit_calculate_delay(&job->limit, n);
341 }
342 
block_job_ratelimit_sleep(BlockJob * job)343 void block_job_ratelimit_sleep(BlockJob *job)
344 {
345     uint64_t delay_ns;
346 
347     /*
348      * Sleep at least once. If the job is reentered early, keep waiting until
349      * we've waited for the full time that is necessary to keep the job at the
350      * right speed.
351      *
352      * Make sure to recalculate the delay after each (possibly interrupted)
353      * sleep because the speed can change while the job has yielded.
354      */
355     do {
356         delay_ns = ratelimit_calculate_delay(&job->limit, 0);
357         job_sleep_ns(&job->job, delay_ns);
358     } while (delay_ns && !job_is_cancelled(&job->job));
359 }
360 
block_job_query_locked(BlockJob * job,Error ** errp)361 BlockJobInfo *block_job_query_locked(BlockJob *job, Error **errp)
362 {
363     BlockJobInfo *info;
364     uint64_t progress_current, progress_total;
365     const BlockJobDriver *drv = block_job_driver(job);
366 
367     GLOBAL_STATE_CODE();
368 
369     if (block_job_is_internal(job)) {
370         error_setg(errp, "Cannot query QEMU internal jobs");
371         return NULL;
372     }
373 
374     progress_get_snapshot(&job->job.progress, &progress_current,
375                           &progress_total);
376 
377     info = g_new0(BlockJobInfo, 1);
378     info->type      = job_type(&job->job);
379     info->device    = g_strdup(job->job.id);
380     info->busy      = job->job.busy;
381     info->paused    = job->job.pause_count > 0;
382     info->offset    = progress_current;
383     info->len       = progress_total;
384     info->speed     = job->speed;
385     info->io_status = job->iostatus;
386     info->ready     = job_is_ready_locked(&job->job),
387     info->status    = job->job.status;
388     info->auto_finalize = job->job.auto_finalize;
389     info->auto_dismiss  = job->job.auto_dismiss;
390     if (job->job.ret) {
391         info->error = job->job.err ?
392                         g_strdup(error_get_pretty(job->job.err)) :
393                         g_strdup(strerror(-job->job.ret));
394     }
395     if (drv->query) {
396         job_unlock();
397         drv->query(job, info);
398         job_lock();
399     }
400     return info;
401 }
402 
403 /* Called with job lock held */
block_job_iostatus_set_err_locked(BlockJob * job,int error)404 static void block_job_iostatus_set_err_locked(BlockJob *job, int error)
405 {
406     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
407         job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
408                                           BLOCK_DEVICE_IO_STATUS_FAILED;
409     }
410 }
411 
412 /* Called with job_mutex lock held. */
block_job_event_cancelled_locked(Notifier * n,void * opaque)413 static void block_job_event_cancelled_locked(Notifier *n, void *opaque)
414 {
415     BlockJob *job = opaque;
416     uint64_t progress_current, progress_total;
417 
418     if (block_job_is_internal(job)) {
419         return;
420     }
421 
422     progress_get_snapshot(&job->job.progress, &progress_current,
423                           &progress_total);
424 
425     qapi_event_send_block_job_cancelled(job_type(&job->job),
426                                         job->job.id,
427                                         progress_total,
428                                         progress_current,
429                                         job->speed);
430 }
431 
432 /* Called with job_mutex lock held. */
block_job_event_completed_locked(Notifier * n,void * opaque)433 static void block_job_event_completed_locked(Notifier *n, void *opaque)
434 {
435     BlockJob *job = opaque;
436     const char *msg = NULL;
437     uint64_t progress_current, progress_total;
438 
439     if (block_job_is_internal(job)) {
440         return;
441     }
442 
443     if (job->job.ret < 0) {
444         msg = error_get_pretty(job->job.err);
445     }
446 
447     progress_get_snapshot(&job->job.progress, &progress_current,
448                           &progress_total);
449 
450     qapi_event_send_block_job_completed(job_type(&job->job),
451                                         job->job.id,
452                                         progress_total,
453                                         progress_current,
454                                         job->speed,
455                                         msg);
456 }
457 
458 /* Called with job_mutex lock held. */
block_job_event_pending_locked(Notifier * n,void * opaque)459 static void block_job_event_pending_locked(Notifier *n, void *opaque)
460 {
461     BlockJob *job = opaque;
462 
463     if (block_job_is_internal(job)) {
464         return;
465     }
466 
467     qapi_event_send_block_job_pending(job_type(&job->job),
468                                       job->job.id);
469 }
470 
471 /* Called with job_mutex lock held. */
block_job_event_ready_locked(Notifier * n,void * opaque)472 static void block_job_event_ready_locked(Notifier *n, void *opaque)
473 {
474     BlockJob *job = opaque;
475     uint64_t progress_current, progress_total;
476 
477     if (block_job_is_internal(job)) {
478         return;
479     }
480 
481     progress_get_snapshot(&job->job.progress, &progress_current,
482                           &progress_total);
483 
484     qapi_event_send_block_job_ready(job_type(&job->job),
485                                     job->job.id,
486                                     progress_total,
487                                     progress_current,
488                                     job->speed);
489 }
490 
491 
block_job_create(const char * job_id,const BlockJobDriver * driver,JobTxn * txn,BlockDriverState * bs,uint64_t perm,uint64_t shared_perm,int64_t speed,int flags,BlockCompletionFunc * cb,void * opaque,Error ** errp)492 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
493                        JobTxn *txn, BlockDriverState *bs, uint64_t perm,
494                        uint64_t shared_perm, int64_t speed, int flags,
495                        BlockCompletionFunc *cb, void *opaque, Error **errp)
496 {
497     BlockJob *job;
498     int ret;
499     GLOBAL_STATE_CODE();
500 
501     bdrv_drain_all_begin();
502     bdrv_graph_wrlock();
503 
504     if (job_id == NULL && !(flags & JOB_INTERNAL)) {
505         job_id = bdrv_get_device_name(bs);
506     }
507 
508     job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
509                      flags, cb, opaque, errp);
510     if (job == NULL) {
511         bdrv_graph_wrunlock();
512         bdrv_drain_all_end();
513         return NULL;
514     }
515 
516     assert(is_block_job(&job->job));
517     assert(job->job.driver->free == &block_job_free);
518     assert(job->job.driver->user_resume == &block_job_user_resume);
519 
520     ratelimit_init(&job->limit);
521 
522     job->finalize_cancelled_notifier.notify = block_job_event_cancelled_locked;
523     job->finalize_completed_notifier.notify = block_job_event_completed_locked;
524     job->pending_notifier.notify = block_job_event_pending_locked;
525     job->ready_notifier.notify = block_job_event_ready_locked;
526     job->idle_notifier.notify = block_job_on_idle_locked;
527 
528     WITH_JOB_LOCK_GUARD() {
529         notifier_list_add(&job->job.on_finalize_cancelled,
530                           &job->finalize_cancelled_notifier);
531         notifier_list_add(&job->job.on_finalize_completed,
532                           &job->finalize_completed_notifier);
533         notifier_list_add(&job->job.on_pending, &job->pending_notifier);
534         notifier_list_add(&job->job.on_ready, &job->ready_notifier);
535         notifier_list_add(&job->job.on_idle, &job->idle_notifier);
536     }
537 
538     error_setg(&job->blocker, "block device is in use by block job: %s",
539                job_type_str(&job->job));
540 
541     ret = block_job_add_bdrv(job, "main node", bs, perm, shared_perm, errp);
542     if (ret < 0) {
543         goto fail;
544     }
545 
546     if (!block_job_set_speed(job, speed, errp)) {
547         goto fail;
548     }
549 
550     bdrv_graph_wrunlock();
551     bdrv_drain_all_end();
552     return job;
553 
554 fail:
555     bdrv_graph_wrunlock();
556     bdrv_drain_all_end();
557     job_early_fail(&job->job);
558     return NULL;
559 }
560 
block_job_iostatus_reset_locked(BlockJob * job)561 void block_job_iostatus_reset_locked(BlockJob *job)
562 {
563     GLOBAL_STATE_CODE();
564     if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
565         return;
566     }
567     assert(job->job.user_paused && job->job.pause_count > 0);
568     job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
569 }
570 
block_job_iostatus_reset(BlockJob * job)571 static void block_job_iostatus_reset(BlockJob *job)
572 {
573     JOB_LOCK_GUARD();
574     block_job_iostatus_reset_locked(job);
575 }
576 
block_job_user_resume(Job * job)577 void block_job_user_resume(Job *job)
578 {
579     BlockJob *bjob = container_of(job, BlockJob, job);
580     GLOBAL_STATE_CODE();
581     block_job_iostatus_reset(bjob);
582 }
583 
block_job_error_action(BlockJob * job,BlockdevOnError on_err,int is_read,int error)584 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
585                                         int is_read, int error)
586 {
587     BlockErrorAction action;
588     IO_CODE();
589 
590     switch (on_err) {
591     case BLOCKDEV_ON_ERROR_ENOSPC:
592     case BLOCKDEV_ON_ERROR_AUTO:
593         action = (error == ENOSPC) ?
594                  BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
595         break;
596     case BLOCKDEV_ON_ERROR_STOP:
597         action = BLOCK_ERROR_ACTION_STOP;
598         break;
599     case BLOCKDEV_ON_ERROR_REPORT:
600         action = BLOCK_ERROR_ACTION_REPORT;
601         break;
602     case BLOCKDEV_ON_ERROR_IGNORE:
603         action = BLOCK_ERROR_ACTION_IGNORE;
604         break;
605     default:
606         abort();
607     }
608     if (!block_job_is_internal(job)) {
609         qapi_event_send_block_job_error(job->job.id,
610                                         is_read ? IO_OPERATION_TYPE_READ :
611                                         IO_OPERATION_TYPE_WRITE,
612                                         action);
613     }
614     if (action == BLOCK_ERROR_ACTION_STOP) {
615         WITH_JOB_LOCK_GUARD() {
616             if (!job->job.user_paused) {
617                 job_pause_locked(&job->job);
618                 /*
619                  * make the pause user visible, which will be
620                  * resumed from QMP.
621                  */
622                 job->job.user_paused = true;
623             }
624             block_job_iostatus_set_err_locked(job, error);
625         }
626     }
627     return action;
628 }
629 
block_job_get_aio_context(BlockJob * job)630 AioContext *block_job_get_aio_context(BlockJob *job)
631 {
632     GLOBAL_STATE_CODE();
633     return job->job.aio_context;
634 }
635