Lines Matching +full:- +full:- +full:enable +full:- +full:sparse
24 #include "block/dirty-bitmap.h"
28 #include "nbd-internal.h"
117 bool allocation_depth; /* export qemu:allocation-depth */
119 * export qemu:dirty-bitmap:<export bitmap name>,
120 * sized by exp->nr_export_bitmaps
150 uint32_t check_align; /* If non-zero, check for aligned client requests */
174 ----
192 stq_be_p(&rep->magic, NBD_REP_MAGIC); in set_be_option_rep()
193 stl_be_p(&rep->option, option); in set_be_option_rep()
194 stl_be_p(&rep->type, type); in set_be_option_rep()
195 stl_be_p(&rep->length, length); in set_be_option_rep()
199 * Return -errno on error, 0 on success. */
206 trace_nbd_negotiate_send_rep_len(client->opt, nbd_opt_lookup(client->opt), in nbd_negotiate_send_rep_len()
211 set_be_option_rep(&rep, client->opt, type, len); in nbd_negotiate_send_rep_len()
212 return nbd_write(client->ioc, &rep, sizeof(rep), errp); in nbd_negotiate_send_rep_len()
216 * Return -errno on error, 0 on success. */
224 * Return -errno on error, 0 on success. */
242 if (nbd_write(client->ioc, msg, len, errp) < 0) { in nbd_negotiate_send_rep_verr()
244 return -EIO; in nbd_negotiate_send_rep_verr()
264 * Return -errno on error, 0 on success. */
279 * given error type and message. Return -errno on read or write
285 int ret = nbd_drop(client->ioc, client->optlen, errp); in nbd_opt_vdrop()
287 client->optlen = 0; in nbd_opt_vdrop()
323 * Return -errno on I/O error, 0 if option was completely handled by
329 if (size > client->optlen) { in nbd_opt_read()
332 nbd_opt_lookup(client->opt)); in nbd_opt_read()
334 client->optlen -= size; in nbd_opt_read()
335 if (qio_channel_read_all(client->ioc, buffer, size, errp) < 0) { in nbd_opt_read()
336 return -EIO; in nbd_opt_read()
342 nbd_opt_lookup(client->opt)); in nbd_opt_read()
348 * Return -errno on I/O error, 0 if option was completely handled by
353 if (size > client->optlen) { in nbd_opt_skip()
356 nbd_opt_lookup(client->opt)); in nbd_opt_skip()
358 client->optlen -= size; in nbd_opt_skip()
359 return nbd_drop(client->ioc, size, errp) < 0 ? -EIO : 1; in nbd_opt_skip()
366 * len bytes string (not 0-terminated)
369 * If @length is non-null, it will be set to the actual string length.
371 * Return -errno on I/O error, 0 if option was completely handled by
410 * Return -errno on error, 0 on success. */
417 const char *name = exp->name ? exp->name : ""; in nbd_negotiate_send_rep_list()
418 const char *desc = exp->description ? exp->description : ""; in nbd_negotiate_send_rep_list()
419 QIOChannel *ioc = client->ioc; in nbd_negotiate_send_rep_list()
435 return -EINVAL; in nbd_negotiate_send_rep_list()
440 return -EINVAL; in nbd_negotiate_send_rep_list()
445 return -EINVAL; in nbd_negotiate_send_rep_list()
452 * Return -errno on error, 0 on success. */
457 assert(client->opt == NBD_OPT_LIST); in nbd_negotiate_handle_list()
462 return -EINVAL; in nbd_negotiate_handle_list()
472 if (exp != client->contexts.exp) { in nbd_check_meta_export()
473 client->contexts.count = 0; in nbd_check_meta_export()
478 * Return -errno on error, 0 on success. */
498 if (client->mode >= NBD_MODE_EXTENDED) { in nbd_negotiate_handle_export_name()
500 return -EINVAL; in nbd_negotiate_handle_export_name()
502 if (client->optlen > NBD_MAX_STRING_SIZE) { in nbd_negotiate_handle_export_name()
504 return -EINVAL; in nbd_negotiate_handle_export_name()
506 name = g_malloc(client->optlen + 1); in nbd_negotiate_handle_export_name()
507 if (nbd_read(client->ioc, name, client->optlen, "export name", errp) < 0) { in nbd_negotiate_handle_export_name()
508 return -EIO; in nbd_negotiate_handle_export_name()
510 name[client->optlen] = '\0'; in nbd_negotiate_handle_export_name()
511 client->optlen = 0; in nbd_negotiate_handle_export_name()
515 client->exp = nbd_export_find(name); in nbd_negotiate_handle_export_name()
516 if (!client->exp) { in nbd_negotiate_handle_export_name()
518 return -EINVAL; in nbd_negotiate_handle_export_name()
520 nbd_check_meta_export(client, client->exp); in nbd_negotiate_handle_export_name()
522 myflags = client->exp->nbdflags; in nbd_negotiate_handle_export_name()
523 if (client->mode >= NBD_MODE_STRUCTURED) { in nbd_negotiate_handle_export_name()
526 if (client->mode >= NBD_MODE_EXTENDED && client->contexts.count) { in nbd_negotiate_handle_export_name()
529 trace_nbd_negotiate_new_style_size_flags(client->exp->size, myflags); in nbd_negotiate_handle_export_name()
530 stq_be_p(buf, client->exp->size); in nbd_negotiate_handle_export_name()
533 ret = nbd_write(client->ioc, buf, len, errp); in nbd_negotiate_handle_export_name()
539 QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); in nbd_negotiate_handle_export_name()
540 blk_exp_ref(&client->exp->common); in nbd_negotiate_handle_export_name()
547 * Return -errno on error, 0 if ready to send more. */
561 if (nbd_write(client->ioc, &info, sizeof(info), errp) < 0) { in nbd_negotiate_send_info()
562 return -EIO; in nbd_negotiate_send_info()
564 if (nbd_write(client->ioc, buf, length, errp) < 0) { in nbd_negotiate_send_info()
565 return -EIO; in nbd_negotiate_send_info()
574 * -errno transmission error occurred or @fatal was requested, errp is set
582 assert(client->optlen); in nbd_reject_length()
584 nbd_opt_lookup(client->opt)); in nbd_reject_length()
587 nbd_opt_lookup(client->opt)); in nbd_reject_length()
588 return -EINVAL; in nbd_reject_length()
594 * Return -errno on error, 0 if ready for next option, and 1 to move
630 while (requests--) { in nbd_negotiate_handle_info()
650 if (client->optlen) { in nbd_negotiate_handle_info()
662 if (client->opt == NBD_OPT_GO) { in nbd_negotiate_handle_info()
677 if (exp->description) { in nbd_negotiate_handle_info()
678 size_t len = strlen(exp->description); in nbd_negotiate_handle_info()
682 len, exp->description, errp); in nbd_negotiate_handle_info()
691 /* minimum - 1 for back-compat, or actual if client will obey it. */ in nbd_negotiate_handle_info()
692 if (client->opt == NBD_OPT_INFO || blocksize) { in nbd_negotiate_handle_info()
693 check_align = sizes[0] = blk_get_request_alignment(exp->common.blk); in nbd_negotiate_handle_info()
698 /* preferred - Hard-code to 4096 for now. in nbd_negotiate_handle_info()
699 * TODO: is blk_bs(blk)->bl.opt_transfer appropriate? */ in nbd_negotiate_handle_info()
701 /* maximum - At most 32M, but smaller as appropriate. */ in nbd_negotiate_handle_info()
702 sizes[2] = MIN(blk_get_max_transfer(exp->common.blk), NBD_MAX_BUFFER_SIZE); in nbd_negotiate_handle_info()
714 myflags = exp->nbdflags; in nbd_negotiate_handle_info()
715 if (client->mode >= NBD_MODE_STRUCTURED) { in nbd_negotiate_handle_info()
718 if (client->mode >= NBD_MODE_EXTENDED && in nbd_negotiate_handle_info()
719 (client->contexts.count || client->opt == NBD_OPT_INFO)) { in nbd_negotiate_handle_info()
722 trace_nbd_negotiate_new_style_size_flags(exp->size, myflags); in nbd_negotiate_handle_info()
723 stq_be_p(buf, exp->size); in nbd_negotiate_handle_info()
737 if (client->opt == NBD_OPT_INFO && !blocksize && in nbd_negotiate_handle_info()
738 blk_get_request_alignment(exp->common.blk) > 1) { in nbd_negotiate_handle_info()
752 if (client->opt == NBD_OPT_GO) { in nbd_negotiate_handle_info()
753 client->exp = exp; in nbd_negotiate_handle_info()
754 client->check_align = check_align; in nbd_negotiate_handle_info()
755 QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); in nbd_negotiate_handle_info()
756 blk_exp_ref(&client->exp->common); in nbd_negotiate_handle_info()
774 qio_task_propagate_error(task, &data->error); in nbd_server_tls_handshake()
775 data->complete = true; in nbd_server_tls_handshake()
776 if (!qemu_coroutine_entered(data->co)) { in nbd_server_tls_handshake()
777 aio_co_wake(data->co); in nbd_server_tls_handshake()
782 * new channel for all further (now-encrypted) communication. */
790 assert(client->opt == NBD_OPT_STARTTLS); in nbd_negotiate_handle_starttls()
793 ioc = client->ioc; in nbd_negotiate_handle_starttls()
800 client->tlscreds, in nbd_negotiate_handle_starttls()
801 client->tlsauthz, in nbd_negotiate_handle_starttls()
807 qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-server-tls"); in nbd_negotiate_handle_starttls()
847 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { in nbd_negotiate_send_meta_context()
852 set_be_option_rep(&opt.h, client->opt, NBD_REP_META_CONTEXT, in nbd_negotiate_send_meta_context()
853 sizeof(opt) - sizeof(opt.h) + iov[1].iov_len); in nbd_negotiate_send_meta_context()
856 return qio_channel_writev_all(client->ioc, iov, 2, errp) < 0 ? -EIO : 0; in nbd_negotiate_send_meta_context()
869 return client->opt == NBD_OPT_LIST_META_CONTEXT; in nbd_meta_empty_or_pattern()
909 meta->base_allocation = true; in nbd_meta_base_query()
916 * Handle queries to 'qemu' namespace. For now, only the qemu:dirty-bitmap:
917 * and qemu:allocation-depth contexts are available. Return true if @query
932 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { in nbd_meta_qemu_query()
933 meta->allocation_depth = meta->exp->allocation_depth; in nbd_meta_qemu_query()
934 if (meta->exp->nr_export_bitmaps) { in nbd_meta_qemu_query()
935 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); in nbd_meta_qemu_query()
942 if (strcmp(query, "allocation-depth") == 0) { in nbd_meta_qemu_query()
943 trace_nbd_negotiate_meta_query_parse("allocation-depth"); in nbd_meta_qemu_query()
944 meta->allocation_depth = meta->exp->allocation_depth; in nbd_meta_qemu_query()
948 if (nbd_strshift(&query, "dirty-bitmap:")) { in nbd_meta_qemu_query()
949 trace_nbd_negotiate_meta_query_parse("dirty-bitmap:"); in nbd_meta_qemu_query()
951 if (client->opt == NBD_OPT_LIST_META_CONTEXT && in nbd_meta_qemu_query()
952 meta->exp->nr_export_bitmaps) { in nbd_meta_qemu_query()
953 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); in nbd_meta_qemu_query()
959 for (i = 0; i < meta->exp->nr_export_bitmaps; i++) { in nbd_meta_qemu_query()
962 bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]); in nbd_meta_qemu_query()
964 meta->bitmaps[i] = true; in nbd_meta_qemu_query()
969 trace_nbd_negotiate_meta_query_skip("no dirty-bitmap match"); in nbd_meta_qemu_query()
984 * Return -errno on I/O error, 0 if option was completely handled by
1026 * Return -errno on I/O error, or 0 if option was completely handled. */
1040 if (client->opt == NBD_OPT_SET_META_CONTEXT && in nbd_negotiate_meta_queries()
1041 client->mode < NBD_MODE_STRUCTURED) { in nbd_negotiate_meta_queries()
1045 nbd_opt_lookup(client->opt)); in nbd_negotiate_meta_queries()
1048 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { in nbd_negotiate_meta_queries()
1052 meta = &client->contexts; in nbd_negotiate_meta_queries()
1055 g_free(meta->bitmaps); in nbd_negotiate_meta_queries()
1063 meta->exp = nbd_export_find(export_name); in nbd_negotiate_meta_queries()
1064 if (meta->exp == NULL) { in nbd_negotiate_meta_queries()
1070 meta->bitmaps = g_new0(bool, meta->exp->nr_export_bitmaps); in nbd_negotiate_meta_queries()
1071 if (client->opt == NBD_OPT_LIST_META_CONTEXT) { in nbd_negotiate_meta_queries()
1072 bitmaps = meta->bitmaps; in nbd_negotiate_meta_queries()
1080 trace_nbd_negotiate_meta_context(nbd_opt_lookup(client->opt), in nbd_negotiate_meta_queries()
1083 if (client->opt == NBD_OPT_LIST_META_CONTEXT && !nb_queries) { in nbd_negotiate_meta_queries()
1084 /* enable all known contexts */ in nbd_negotiate_meta_queries()
1085 meta->base_allocation = true; in nbd_negotiate_meta_queries()
1086 meta->allocation_depth = meta->exp->allocation_depth; in nbd_negotiate_meta_queries()
1087 if (meta->exp->nr_export_bitmaps) { in nbd_negotiate_meta_queries()
1088 memset(meta->bitmaps, 1, meta->exp->nr_export_bitmaps); in nbd_negotiate_meta_queries()
1099 if (meta->base_allocation) { in nbd_negotiate_meta_queries()
1109 if (meta->allocation_depth) { in nbd_negotiate_meta_queries()
1110 ret = nbd_negotiate_send_meta_context(client, "qemu:allocation-depth", in nbd_negotiate_meta_queries()
1119 for (i = 0; i < meta->exp->nr_export_bitmaps; i++) { in nbd_negotiate_meta_queries()
1123 if (!meta->bitmaps[i]) { in nbd_negotiate_meta_queries()
1127 bm_name = bdrv_dirty_bitmap_name(meta->exp->export_bitmaps[i]); in nbd_negotiate_meta_queries()
1128 context = g_strdup_printf("qemu:dirty-bitmap:%s", bm_name); in nbd_negotiate_meta_queries()
1141 meta->count = count; in nbd_negotiate_meta_queries()
1151 * -errno on error, errp is set
1179 * Intentionally ignore errors on this first read - we do not want in nbd_negotiate_options()
1183 if (nbd_read32(client->ioc, &flags, "flags", NULL) < 0) { in nbd_negotiate_options()
1186 client->mode = NBD_MODE_EXPORT_NAME; in nbd_negotiate_options()
1191 client->mode = NBD_MODE_SIMPLE; in nbd_negotiate_options()
1199 return -EINVAL; in nbd_negotiate_options()
1207 if (nbd_read64(client->ioc, &magic, "opts magic", errp) < 0) { in nbd_negotiate_options()
1208 return -EINVAL; in nbd_negotiate_options()
1213 return -EINVAL; in nbd_negotiate_options()
1216 if (nbd_read32(client->ioc, &option, "option", errp) < 0) { in nbd_negotiate_options()
1217 return -EINVAL; in nbd_negotiate_options()
1219 client->opt = option; in nbd_negotiate_options()
1221 if (nbd_read32(client->ioc, &length, "option length", errp) < 0) { in nbd_negotiate_options()
1222 return -EINVAL; in nbd_negotiate_options()
1224 assert(!client->optlen); in nbd_negotiate_options()
1225 client->optlen = length; in nbd_negotiate_options()
1230 return -EINVAL; in nbd_negotiate_options()
1235 if (client->tlscreds && in nbd_negotiate_options()
1236 client->ioc == (QIOChannel *)client->sioc) { in nbd_negotiate_options()
1240 return -EINVAL; in nbd_negotiate_options()
1251 return -EIO; in nbd_negotiate_options()
1254 object_unref(OBJECT(client->ioc)); in nbd_negotiate_options()
1255 client->ioc = tioc; in nbd_negotiate_options()
1262 return -EINVAL; in nbd_negotiate_options()
1313 } else if (client->tlscreds) { in nbd_negotiate_options()
1327 } else if (client->mode >= NBD_MODE_EXTENDED) { in nbd_negotiate_options()
1331 } else if (client->mode >= NBD_MODE_STRUCTURED) { in nbd_negotiate_options()
1337 client->mode = NBD_MODE_STRUCTURED; in nbd_negotiate_options()
1349 } else if (client->mode >= NBD_MODE_EXTENDED) { in nbd_negotiate_options()
1355 client->mode = NBD_MODE_EXTENDED; in nbd_negotiate_options()
1367 * If broken new-style we should drop the connection in nbd_negotiate_options()
1378 return -EINVAL; in nbd_negotiate_options()
1389 * -errno on error, errp is set
1404 [24 .. 27] export flags (zero-extended) in nbd_negotiate()
1414 qio_channel_set_blocking(client->ioc, false, NULL); in nbd_negotiate()
1415 qio_channel_set_follow_coroutine_ctx(client->ioc, true); in nbd_negotiate()
1427 if (nbd_write(client->ioc, buf, 18, NULL) < 0) { in nbd_negotiate()
1438 assert(!client->optlen); in nbd_negotiate()
1462 len = qio_channel_readv(client->ioc, &iov, 1, errp); in nbd_read_eof()
1464 WITH_QEMU_LOCK_GUARD(&client->lock) { in nbd_read_eof()
1465 client->read_yielding = true; in nbd_read_eof()
1467 /* Prompt main loop thread to re-run nbd_drained_poll() */ in nbd_read_eof()
1470 qio_channel_yield(client->ioc, G_IO_IN); in nbd_read_eof()
1471 WITH_QEMU_LOCK_GUARD(&client->lock) { in nbd_read_eof()
1472 client->read_yielding = false; in nbd_read_eof()
1473 if (client->quiescing) { in nbd_read_eof()
1474 return -EAGAIN; in nbd_read_eof()
1479 return -EIO; in nbd_read_eof()
1483 "Unexpected end-of-file before all bytes were read"); in nbd_read_eof()
1484 return -EIO; in nbd_read_eof()
1491 size -= len; in nbd_read_eof()
1503 size_t size = client->mode >= NBD_MODE_EXTENDED ? in nbd_receive_request()
1511 return -EIO; in nbd_receive_request()
1532 request->flags = lduw_be_p(buf + 4); in nbd_receive_request()
1533 request->type = lduw_be_p(buf + 6); in nbd_receive_request()
1534 request->cookie = ldq_be_p(buf + 8); in nbd_receive_request()
1535 request->from = ldq_be_p(buf + 16); in nbd_receive_request()
1536 if (client->mode >= NBD_MODE_EXTENDED) { in nbd_receive_request()
1537 request->len = ldq_be_p(buf + 24); in nbd_receive_request()
1540 request->len = (uint32_t)ldl_be_p(buf + 24); /* widen 32 to 64 bits */ in nbd_receive_request()
1544 trace_nbd_receive_request(magic, request->flags, request->type, in nbd_receive_request()
1545 request->from, request->len); in nbd_receive_request()
1550 return -EINVAL; in nbd_receive_request()
1560 qatomic_inc(&client->refcount); in nbd_client_get()
1567 if (qatomic_fetch_dec(&client->refcount) == 1) { in nbd_client_put()
1568 /* The last reference should be dropped by client->close, in nbd_client_put()
1571 assert(client->closing); in nbd_client_put()
1573 object_unref(OBJECT(client->sioc)); in nbd_client_put()
1574 object_unref(OBJECT(client->ioc)); in nbd_client_put()
1575 if (client->tlscreds) { in nbd_client_put()
1576 object_unref(OBJECT(client->tlscreds)); in nbd_client_put()
1578 g_free(client->tlsauthz); in nbd_client_put()
1579 if (client->exp) { in nbd_client_put()
1580 QTAILQ_REMOVE(&client->exp->clients, client, next); in nbd_client_put()
1581 blk_exp_unref(&client->exp->common); in nbd_client_put()
1583 g_free(client->contexts.bitmaps); in nbd_client_put()
1584 qemu_mutex_destroy(&client->lock); in nbd_client_put()
1599 int old = qatomic_read(&client->refcount); in nbd_client_put_nonzero()
1608 old = qatomic_cmpxchg(&client->refcount, expected, expected - 1); in nbd_client_put_nonzero()
1618 WITH_QEMU_LOCK_GUARD(&client->lock) { in client_close()
1619 if (client->closing) { in client_close()
1623 client->closing = true; in client_close()
1629 qio_channel_shutdown(client->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, in client_close()
1633 if (client->close_fn) { in client_close()
1634 client->close_fn(client, negotiated); in client_close()
1638 /* Runs in export AioContext with client->lock held */
1643 assert(client->nb_requests <= MAX_NBD_REQUESTS - 1); in nbd_request_get()
1644 client->nb_requests++; in nbd_request_get()
1647 req->client = client; in nbd_request_get()
1651 /* Runs in export AioContext with client->lock held */
1654 NBDClient *client = req->client; in nbd_request_put()
1656 if (req->data) { in nbd_request_put()
1657 qemu_vfree(req->data); in nbd_request_put()
1661 client->nb_requests--; in nbd_request_put()
1663 if (client->quiescing && client->nb_requests == 0) { in nbd_request_put()
1677 trace_nbd_blk_aio_attached(exp->name, ctx); in blk_aio_attached()
1679 exp->common.ctx = ctx; in blk_aio_attached()
1681 QTAILQ_FOREACH(client, &exp->clients, next) { in blk_aio_attached()
1682 WITH_QEMU_LOCK_GUARD(&client->lock) { in blk_aio_attached()
1683 assert(client->nb_requests == 0); in blk_aio_attached()
1684 assert(client->recv_coroutine == NULL); in blk_aio_attached()
1685 assert(client->send_coroutine == NULL); in blk_aio_attached()
1696 trace_nbd_blk_aio_detach(exp->name, exp->common.ctx); in blk_aio_detach()
1698 exp->common.ctx = NULL; in blk_aio_detach()
1708 QTAILQ_FOREACH(client, &exp->clients, next) { in nbd_drained_begin()
1709 WITH_QEMU_LOCK_GUARD(&client->lock) { in nbd_drained_begin()
1710 client->quiescing = true; in nbd_drained_begin()
1722 QTAILQ_FOREACH(client, &exp->clients, next) { in nbd_drained_end()
1723 WITH_QEMU_LOCK_GUARD(&client->lock) { in nbd_drained_end()
1724 client->quiescing = false; in nbd_drained_end()
1734 qio_channel_wake_read(client->ioc); in nbd_wake_read_bh()
1744 QTAILQ_FOREACH(client, &exp->clients, next) { in nbd_drained_poll()
1745 WITH_QEMU_LOCK_GUARD(&client->lock) { in nbd_drained_poll()
1746 if (client->nb_requests != 0) { in nbd_drained_poll()
1755 if (client->recv_coroutine != NULL && client->read_yielding) { in nbd_drained_poll()
1756 aio_bh_schedule_oneshot(nbd_export_aio_context(client->exp), in nbd_drained_poll()
1774 blk_exp_request_shutdown(&exp->common); in nbd_eject_notifier()
1780 assert(exp->drv == &blk_exp_nbd); in nbd_export_set_on_eject_blk()
1781 assert(nbd_exp->eject_notifier_blk == NULL); in nbd_export_set_on_eject_blk()
1784 nbd_exp->eject_notifier_blk = blk; in nbd_export_set_on_eject_blk()
1785 nbd_exp->eject_notifier.notify = nbd_eject_notifier; in nbd_export_set_on_eject_blk()
1786 blk_add_remove_bs_notifier(blk, &nbd_exp->eject_notifier); in nbd_export_set_on_eject_blk()
1799 BlockExportOptionsNbd *arg = &exp_args->u.nbd; in nbd_export_create()
1800 const char *name = arg->name ?: exp_args->node_name; in nbd_export_create()
1801 BlockBackend *blk = blk_exp->blk; in nbd_export_create()
1804 bool readonly = !exp_args->writable; in nbd_export_create()
1810 assert(exp_args->type == BLOCK_EXPORT_TYPE_NBD); in nbd_export_create()
1814 return -EINVAL; in nbd_export_create()
1819 return -EINVAL; in nbd_export_create()
1822 if (arg->description && strlen(arg->description) > NBD_MAX_STRING_SIZE) { in nbd_export_create()
1823 error_setg(errp, "description '%s' too long", arg->description); in nbd_export_create()
1824 return -EINVAL; in nbd_export_create()
1829 return -EEXIST; in nbd_export_create()
1834 error_setg_errno(errp, -size, in nbd_export_create()
1847 QTAILQ_INIT(&exp->clients); in nbd_export_create()
1848 exp->name = g_strdup(name); in nbd_export_create()
1849 exp->description = g_strdup(arg->description); in nbd_export_create()
1850 exp->nbdflags = (NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_FLUSH | in nbd_export_create()
1854 exp->nbdflags |= NBD_FLAG_CAN_MULTI_CONN; in nbd_export_create()
1857 exp->nbdflags |= NBD_FLAG_READ_ONLY; in nbd_export_create()
1859 exp->nbdflags |= (NBD_FLAG_SEND_TRIM | NBD_FLAG_SEND_WRITE_ZEROES | in nbd_export_create()
1862 exp->size = QEMU_ALIGN_DOWN(size, BDRV_SECTOR_SIZE); in nbd_export_create()
1866 for (bitmaps = arg->bitmaps; bitmaps; bitmaps = bitmaps->next) { in nbd_export_create()
1867 exp->nr_export_bitmaps++; in nbd_export_create()
1869 exp->export_bitmaps = g_new0(BdrvDirtyBitmap *, exp->nr_export_bitmaps); in nbd_export_create()
1870 for (i = 0, bitmaps = arg->bitmaps; bitmaps; in nbd_export_create()
1871 i++, bitmaps = bitmaps->next) in nbd_export_create()
1877 switch (bitmaps->value->type) { in nbd_export_create()
1879 bitmap = bitmaps->value->u.local; in nbd_export_create()
1890 ret = -ENOENT; in nbd_export_create()
1892 bitmaps->value->u.local); in nbd_export_create()
1898 ret = -EINVAL; in nbd_export_create()
1905 bitmap = bitmaps->value->u.external.name; in nbd_export_create()
1906 bm = block_dirty_bitmap_lookup(bitmaps->value->u.external.node, in nbd_export_create()
1909 ret = -ENOENT; in nbd_export_create()
1920 ret = -EINVAL; in nbd_export_create()
1924 exp->export_bitmaps[i] = bm; in nbd_export_create()
1928 /* Mark bitmaps busy in a separate loop, to simplify roll-back concerns. */ in nbd_export_create()
1929 for (i = 0; i < exp->nr_export_bitmaps; i++) { in nbd_export_create()
1930 bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], true); in nbd_export_create()
1933 exp->allocation_depth = arg->allocation_depth; in nbd_export_create()
1954 g_free(exp->export_bitmaps); in nbd_export_create()
1955 g_free(exp->name); in nbd_export_create()
1956 g_free(exp->description); in nbd_export_create()
1964 if (strcmp(name, exp->name) == 0) { in nbd_export_find()
1975 return exp->common.ctx; in nbd_export_aio_context()
1983 blk_exp_ref(&exp->common); in nbd_export_request_shutdown()
1991 QTAILQ_FOREACH_SAFE(client, &exp->clients, next, next) { in nbd_export_request_shutdown()
1994 if (exp->name) { in nbd_export_request_shutdown()
1995 g_free(exp->name); in nbd_export_request_shutdown()
1996 exp->name = NULL; in nbd_export_request_shutdown()
1999 blk_exp_unref(&exp->common); in nbd_export_request_shutdown()
2007 assert(exp->name == NULL); in nbd_export_delete()
2008 assert(QTAILQ_EMPTY(&exp->clients)); in nbd_export_delete()
2010 g_free(exp->description); in nbd_export_delete()
2011 exp->description = NULL; in nbd_export_delete()
2013 if (exp->eject_notifier_blk) { in nbd_export_delete()
2014 notifier_remove(&exp->eject_notifier); in nbd_export_delete()
2015 blk_unref(exp->eject_notifier_blk); in nbd_export_delete()
2017 blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached, in nbd_export_delete()
2019 blk_set_disable_request_queuing(exp->common.blk, false); in nbd_export_delete()
2021 for (i = 0; i < exp->nr_export_bitmaps; i++) { in nbd_export_delete()
2022 bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], false); in nbd_export_delete()
2041 qemu_co_mutex_lock(&client->send_lock); in nbd_co_send_iov()
2042 client->send_coroutine = qemu_coroutine_self(); in nbd_co_send_iov()
2044 ret = qio_channel_writev_all(client->ioc, iov, niov, errp) < 0 ? -EIO : 0; in nbd_co_send_iov()
2046 client->send_coroutine = NULL; in nbd_co_send_iov()
2047 qemu_co_mutex_unlock(&client->send_lock); in nbd_co_send_iov()
2055 stl_be_p(&reply->magic, NBD_SIMPLE_REPLY_MAGIC); in set_be_simple_reply()
2056 stl_be_p(&reply->error, error); in set_be_simple_reply()
2057 stq_be_p(&reply->cookie, cookie); in set_be_simple_reply()
2076 assert(client->mode < NBD_MODE_STRUCTURED || in nbd_co_send_simple_reply()
2077 (client->mode == NBD_MODE_STRUCTURED && in nbd_co_send_simple_reply()
2078 request->type != NBD_CMD_READ)); in nbd_co_send_simple_reply()
2079 trace_nbd_co_send_simple_reply(request->cookie, nbd_err, in nbd_co_send_simple_reply()
2081 set_be_simple_reply(&reply, nbd_err, request->cookie); in nbd_co_send_simple_reply()
2105 if (client->mode >= NBD_MODE_EXTENDED) { in set_be_chunk()
2106 NBDExtendedReplyChunk *chunk = iov->iov_base; in set_be_chunk()
2109 stl_be_p(&chunk->magic, NBD_EXTENDED_REPLY_MAGIC); in set_be_chunk()
2110 stw_be_p(&chunk->flags, flags); in set_be_chunk()
2111 stw_be_p(&chunk->type, type); in set_be_chunk()
2112 stq_be_p(&chunk->cookie, request->cookie); in set_be_chunk()
2113 stq_be_p(&chunk->offset, request->from); in set_be_chunk()
2114 stq_be_p(&chunk->length, length); in set_be_chunk()
2116 NBDStructuredReplyChunk *chunk = iov->iov_base; in set_be_chunk()
2119 stl_be_p(&chunk->magic, NBD_STRUCTURED_REPLY_MAGIC); in set_be_chunk()
2120 stw_be_p(&chunk->flags, flags); in set_be_chunk()
2121 stw_be_p(&chunk->type, type); in set_be_chunk()
2122 stq_be_p(&chunk->cookie, request->cookie); in set_be_chunk()
2123 stl_be_p(&chunk->length, length); in set_be_chunk()
2136 trace_nbd_co_send_chunk_done(request->cookie); in nbd_co_send_chunk_done()
2159 trace_nbd_co_send_chunk_read(request->cookie, offset, data, size); in nbd_co_send_chunk_read()
2183 trace_nbd_co_send_chunk_error(request->cookie, nbd_err, in nbd_co_send_chunk_error()
2193 /* Do a sparse read and send the structured reply to the client.
2194 * Returns -errno if sending fails. blk_co_block_status_above() failure is
2205 NBDExport *exp = client->exp; in nbd_co_send_sparse_read()
2211 int status = blk_co_block_status_above(exp->common.blk, NULL, in nbd_co_send_sparse_read()
2213 size - progress, &pnum, NULL, in nbd_co_send_sparse_read()
2219 strerror(-status)); in nbd_co_send_sparse_read()
2221 ret = nbd_co_send_chunk_error(client, request, -status, msg, errp); in nbd_co_send_sparse_read()
2225 assert(pnum && pnum <= size - progress); in nbd_co_send_sparse_read()
2235 trace_nbd_co_send_chunk_read_hole(request->cookie, in nbd_co_send_sparse_read()
2244 ret = blk_co_pread(exp->common.blk, offset + progress, pnum, in nbd_co_send_sparse_read()
2247 error_setg_errno(errp, -ret, "reading from file failed"); in nbd_co_send_sparse_read()
2278 ea->nb_alloc = nb_alloc; in nbd_extent_array_new()
2279 ea->extents = g_new(NBDExtent64, nb_alloc); in nbd_extent_array_new()
2280 ea->extended = mode >= NBD_MODE_EXTENDED; in nbd_extent_array_new()
2281 ea->can_add = true; in nbd_extent_array_new()
2288 g_free(ea->extents); in nbd_extent_array_free()
2298 assert(!ea->converted_to_be); in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2299 assert(ea->extended); in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2300 ea->can_add = false; in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2301 ea->converted_to_be = true; in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2303 for (i = 0; i < ea->count; i++) { in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2304 ea->extents[i].length = cpu_to_be64(ea->extents[i].length); in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2305 ea->extents[i].flags = cpu_to_be64(ea->extents[i].flags); in G_DEFINE_AUTOPTR_CLEANUP_FUNC()
2313 NBDExtent32 *extents = g_new(NBDExtent32, ea->count); in nbd_extent_array_convert_to_narrow()
2315 assert(!ea->converted_to_be); in nbd_extent_array_convert_to_narrow()
2316 assert(!ea->extended); in nbd_extent_array_convert_to_narrow()
2317 ea->can_add = false; in nbd_extent_array_convert_to_narrow()
2318 ea->converted_to_be = true; in nbd_extent_array_convert_to_narrow()
2320 for (i = 0; i < ea->count; i++) { in nbd_extent_array_convert_to_narrow()
2321 assert((ea->extents[i].length | ea->extents[i].flags) <= UINT32_MAX); in nbd_extent_array_convert_to_narrow()
2322 extents[i].length = cpu_to_be32(ea->extents[i].length); in nbd_extent_array_convert_to_narrow()
2323 extents[i].flags = cpu_to_be32(ea->extents[i].flags); in nbd_extent_array_convert_to_narrow()
2331 * return -1.
2332 * For safety, when returning -1 for the first time, .can_add is set to false,
2341 assert(ea->can_add); in nbd_extent_array_add()
2346 if (!ea->extended) { in nbd_extent_array_add()
2351 if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) { in nbd_extent_array_add()
2352 uint64_t sum = length + ea->extents[ea->count - 1].length; in nbd_extent_array_add()
2356 * 2^63, and ea->extents[].length comes from the block layer. in nbd_extent_array_add()
2359 if (sum <= UINT32_MAX || ea->extended) { in nbd_extent_array_add()
2360 ea->extents[ea->count - 1].length = sum; in nbd_extent_array_add()
2361 ea->total_length += length; in nbd_extent_array_add()
2366 if (ea->count >= ea->nb_alloc) { in nbd_extent_array_add()
2367 ea->can_add = false; in nbd_extent_array_add()
2368 return -1; in nbd_extent_array_add()
2371 ea->total_length += length; in nbd_extent_array_add()
2372 ea->extents[ea->count] = (NBDExtent64) {.length = length, .flags = flags}; in nbd_extent_array_add()
2373 ea->count++; in nbd_extent_array_add()
2400 bytes -= num; in blockstatus_to_extents()
2424 bytes -= num; in blockalloc_to_extents()
2447 if (client->mode >= NBD_MODE_EXTENDED) { in nbd_co_send_extents()
2453 stl_be_p(&meta_ext.count, ea->count); in nbd_co_send_extents()
2456 iov[2].iov_base = ea->extents; in nbd_co_send_extents()
2457 iov[2].iov_len = ea->count * sizeof(ea->extents[0]); in nbd_co_send_extents()
2467 iov[2].iov_len = ea->count * sizeof(extents[0]); in nbd_co_send_extents()
2470 trace_nbd_co_send_extents(request->cookie, ea->count, context_id, in nbd_co_send_extents()
2471 ea->total_length, last); in nbd_co_send_extents()
2489 nbd_extent_array_new(nb_extents, client->mode); in nbd_co_send_block_status()
2497 return nbd_co_send_chunk_error(client, request, -ret, in nbd_co_send_block_status()
2512 int64_t bound = es->extended ? INT64_MAX : INT32_MAX; in bitmap_to_extents()
2521 if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) || in bitmap_to_extents()
2531 (void) nbd_extent_array_add(es, end - start, 0); in bitmap_to_extents()
2547 nbd_extent_array_new(nb_extents, client->mode); in nbd_co_send_bitmap()
2560 * with request->len and request->contexts->count set to 0 (which will
2568 uint64_t payload_len = request->len; in nbd_co_block_status_payload_read()
2575 request->len, NBD_MAX_BUFFER_SIZE); in nbd_co_block_status_payload_read()
2576 return -EINVAL; in nbd_co_block_status_payload_read()
2579 assert(client->contexts.exp == client->exp); in nbd_co_block_status_payload_read()
2580 nr_bitmaps = client->exp->nr_export_bitmaps; in nbd_co_block_status_payload_read()
2581 request->contexts = g_new0(NBDMetaContexts, 1); in nbd_co_block_status_payload_read()
2582 request->contexts->exp = client->exp; in nbd_co_block_status_payload_read()
2587 sizeof(id) * client->contexts.count)) { in nbd_co_block_status_payload_read()
2592 if (nbd_read(client->ioc, buf, payload_len, in nbd_co_block_status_payload_read()
2594 return -EIO; in nbd_co_block_status_payload_read()
2596 trace_nbd_co_receive_request_payload_received(request->cookie, in nbd_co_block_status_payload_read()
2598 request->contexts->bitmaps = g_new0(bool, nr_bitmaps); in nbd_co_block_status_payload_read()
2599 count = (payload_len - sizeof(NBDBlockStatusPayload)) / sizeof(id); in nbd_co_block_status_payload_read()
2605 if (!client->contexts.base_allocation || in nbd_co_block_status_payload_read()
2606 request->contexts->base_allocation) { in nbd_co_block_status_payload_read()
2609 request->contexts->base_allocation = true; in nbd_co_block_status_payload_read()
2611 if (!client->contexts.allocation_depth || in nbd_co_block_status_payload_read()
2612 request->contexts->allocation_depth) { in nbd_co_block_status_payload_read()
2615 request->contexts->allocation_depth = true; in nbd_co_block_status_payload_read()
2617 unsigned idx = id - NBD_META_ID_DIRTY_BITMAP; in nbd_co_block_status_payload_read()
2619 if (idx >= nr_bitmaps || !client->contexts.bitmaps[idx] || in nbd_co_block_status_payload_read()
2620 request->contexts->bitmaps[idx]) { in nbd_co_block_status_payload_read()
2623 request->contexts->bitmaps[idx] = true; in nbd_co_block_status_payload_read()
2627 request->len = ldq_be_p(buf); in nbd_co_block_status_payload_read()
2628 request->contexts->count = count; in nbd_co_block_status_payload_read()
2632 trace_nbd_co_receive_block_status_payload_compliance(request->from, in nbd_co_block_status_payload_read()
2633 request->len); in nbd_co_block_status_payload_read()
2634 request->len = request->contexts->count = 0; in nbd_co_block_status_payload_read()
2635 return nbd_drop(client->ioc, payload_len, errp); in nbd_co_block_status_payload_read()
2639 * Collect a client request. Return 0 if request looks valid, -EIO to drop
2640 * connection right away, -EAGAIN to indicate we were interrupted and the
2649 NBDClient *client = req->client; in nbd_co_receive_request()
2665 trace_nbd_co_receive_request_decode_type(request->cookie, request->type, in nbd_co_receive_request()
2666 nbd_cmd_lookup(request->type)); in nbd_co_receive_request()
2667 extended_with_payload = client->mode >= NBD_MODE_EXTENDED && in nbd_co_receive_request()
2668 request->flags & NBD_CMD_FLAG_PAYLOAD_LEN; in nbd_co_receive_request()
2670 payload_len = request->len; in nbd_co_receive_request()
2674 switch (request->type) { in nbd_co_receive_request()
2678 req->complete = true; in nbd_co_receive_request()
2679 return -EIO; in nbd_co_receive_request()
2682 if (client->mode >= NBD_MODE_STRUCTURED) { in nbd_co_receive_request()
2690 if (client->mode >= NBD_MODE_EXTENDED) { in nbd_co_receive_request()
2693 trace_nbd_co_receive_ext_payload_compliance(request->from, in nbd_co_receive_request()
2694 request->len); in nbd_co_receive_request()
2699 payload_len = request->len; in nbd_co_receive_request()
2732 request->contexts = &client->contexts; in nbd_co_receive_request()
2744 req->complete = true; in nbd_co_receive_request()
2746 if (check_length && request->len > NBD_MAX_BUFFER_SIZE) { in nbd_co_receive_request()
2749 request->len, NBD_MAX_BUFFER_SIZE); in nbd_co_receive_request()
2750 return -EINVAL; in nbd_co_receive_request()
2759 assert(request->type != NBD_CMD_WRITE); in nbd_co_receive_request()
2760 request->len = 0; in nbd_co_receive_request()
2764 req->data = blk_try_blockalign(client->exp->common.blk, in nbd_co_receive_request()
2765 request->len); in nbd_co_receive_request()
2766 if (req->data == NULL) { in nbd_co_receive_request()
2768 return -ENOMEM; in nbd_co_receive_request()
2774 assert(req->data); in nbd_co_receive_request()
2775 ret = nbd_read(client->ioc, req->data, payload_len, in nbd_co_receive_request()
2778 ret = nbd_drop(client->ioc, payload_len, errp); in nbd_co_receive_request()
2781 return -EIO; in nbd_co_receive_request()
2783 req->complete = true; in nbd_co_receive_request()
2784 trace_nbd_co_receive_request_payload_received(request->cookie, in nbd_co_receive_request()
2789 if (client->exp->nbdflags & NBD_FLAG_READ_ONLY && check_rofs) { in nbd_co_receive_request()
2791 error_setg(errp, "Export is read-only"); in nbd_co_receive_request()
2792 return -EROFS; in nbd_co_receive_request()
2794 if (request->from > client->exp->size || in nbd_co_receive_request()
2795 request->len > client->exp->size - request->from) { in nbd_co_receive_request()
2797 ", Size: %" PRIu64, request->from, request->len, in nbd_co_receive_request()
2798 client->exp->size); in nbd_co_receive_request()
2799 return (request->type == NBD_CMD_WRITE || in nbd_co_receive_request()
2800 request->type == NBD_CMD_WRITE_ZEROES) ? -ENOSPC : -EINVAL; in nbd_co_receive_request()
2802 if (client->check_align && !QEMU_IS_ALIGNED(request->from | request->len, in nbd_co_receive_request()
2803 client->check_align)) { in nbd_co_receive_request()
2806 * it's still worth tracing client non-compliance in nbd_co_receive_request()
2808 trace_nbd_co_receive_align_compliance(nbd_cmd_lookup(request->type), in nbd_co_receive_request()
2809 request->from, in nbd_co_receive_request()
2810 request->len, in nbd_co_receive_request()
2811 client->check_align); in nbd_co_receive_request()
2813 if (request->flags & ~valid_flags) { in nbd_co_receive_request()
2815 nbd_cmd_lookup(request->type), request->flags); in nbd_co_receive_request()
2816 return -EINVAL; in nbd_co_receive_request()
2824 * Returns 0 if connection is still live, -errno on failure to talk to client
2832 if (client->mode >= NBD_MODE_STRUCTURED && ret < 0) { in nbd_send_generic_reply()
2833 return nbd_co_send_chunk_error(client, request, -ret, error_msg, errp); in nbd_send_generic_reply()
2834 } else if (client->mode >= NBD_MODE_EXTENDED) { in nbd_send_generic_reply()
2837 return nbd_co_send_simple_reply(client, request, ret < 0 ? -ret : 0, in nbd_send_generic_reply()
2843 * Return -errno if sending fails. Other errors are reported directly to the
2849 NBDExport *exp = client->exp; in nbd_do_cmd_read()
2851 assert(request->type == NBD_CMD_READ); in nbd_do_cmd_read()
2852 assert(request->len <= NBD_MAX_BUFFER_SIZE); in nbd_do_cmd_read()
2855 if (request->flags & NBD_CMD_FLAG_FUA) { in nbd_do_cmd_read()
2856 ret = blk_co_flush(exp->common.blk); in nbd_do_cmd_read()
2863 if (client->mode >= NBD_MODE_STRUCTURED && in nbd_do_cmd_read()
2864 !(request->flags & NBD_CMD_FLAG_DF) && request->len) in nbd_do_cmd_read()
2866 return nbd_co_send_sparse_read(client, request, request->from, in nbd_do_cmd_read()
2867 data, request->len, errp); in nbd_do_cmd_read()
2870 ret = blk_co_pread(exp->common.blk, request->from, request->len, data, 0); in nbd_do_cmd_read()
2876 if (client->mode >= NBD_MODE_STRUCTURED) { in nbd_do_cmd_read()
2877 if (request->len) { in nbd_do_cmd_read()
2878 return nbd_co_send_chunk_read(client, request, request->from, data, in nbd_do_cmd_read()
2879 request->len, true, errp); in nbd_do_cmd_read()
2885 data, request->len, errp); in nbd_do_cmd_read()
2893 * Return -errno if sending fails. Other errors are reported directly to the
2900 NBDExport *exp = client->exp; in nbd_do_cmd_cache()
2902 assert(request->type == NBD_CMD_CACHE); in nbd_do_cmd_cache()
2903 assert(request->len <= NBD_MAX_BUFFER_SIZE); in nbd_do_cmd_cache()
2905 ret = blk_co_preadv(exp->common.blk, request->from, request->len, in nbd_do_cmd_cache()
2913 * Return -errno if sending fails. Other errors are reported directly to the
2921 NBDExport *exp = client->exp; in nbd_handle_request()
2927 inactive = bdrv_is_inactive(blk_bs(exp->common.blk)); in nbd_handle_request()
2929 switch (request->type) { in nbd_handle_request()
2935 return nbd_send_generic_reply(client, request, -EPERM, in nbd_handle_request()
2941 switch (request->type) { in nbd_handle_request()
2950 if (request->flags & NBD_CMD_FLAG_FUA) { in nbd_handle_request()
2953 assert(request->len <= NBD_MAX_BUFFER_SIZE); in nbd_handle_request()
2954 ret = blk_co_pwrite(exp->common.blk, request->from, request->len, data, in nbd_handle_request()
2961 if (request->flags & NBD_CMD_FLAG_FUA) { in nbd_handle_request()
2964 if (!(request->flags & NBD_CMD_FLAG_NO_HOLE)) { in nbd_handle_request()
2967 if (request->flags & NBD_CMD_FLAG_FAST_ZERO) { in nbd_handle_request()
2970 ret = blk_co_pwrite_zeroes(exp->common.blk, request->from, request->len, in nbd_handle_request()
2980 ret = blk_co_flush(exp->common.blk); in nbd_handle_request()
2985 ret = blk_co_pdiscard(exp->common.blk, request->from, request->len); in nbd_handle_request()
2986 if (ret >= 0 && request->flags & NBD_CMD_FLAG_FUA) { in nbd_handle_request()
2987 ret = blk_co_flush(exp->common.blk); in nbd_handle_request()
2993 assert(request->contexts); in nbd_handle_request()
2994 assert(client->mode >= NBD_MODE_EXTENDED || in nbd_handle_request()
2995 request->len <= UINT32_MAX); in nbd_handle_request()
2996 if (request->contexts->count) { in nbd_handle_request()
2997 bool dont_fragment = request->flags & NBD_CMD_FLAG_REQ_ONE; in nbd_handle_request()
2998 int contexts_remaining = request->contexts->count; in nbd_handle_request()
3000 if (!request->len) { in nbd_handle_request()
3001 return nbd_send_generic_reply(client, request, -EINVAL, in nbd_handle_request()
3002 "need non-zero length", errp); in nbd_handle_request()
3004 if (request->contexts->base_allocation) { in nbd_handle_request()
3006 exp->common.blk, in nbd_handle_request()
3007 request->from, in nbd_handle_request()
3008 request->len, dont_fragment, in nbd_handle_request()
3009 !--contexts_remaining, in nbd_handle_request()
3017 if (request->contexts->allocation_depth) { in nbd_handle_request()
3019 exp->common.blk, in nbd_handle_request()
3020 request->from, request->len, in nbd_handle_request()
3022 !--contexts_remaining, in nbd_handle_request()
3030 assert(request->contexts->exp == client->exp); in nbd_handle_request()
3031 for (i = 0; i < client->exp->nr_export_bitmaps; i++) { in nbd_handle_request()
3032 if (!request->contexts->bitmaps[i]) { in nbd_handle_request()
3036 client->exp->export_bitmaps[i], in nbd_handle_request()
3037 request->from, request->len, in nbd_handle_request()
3038 dont_fragment, !--contexts_remaining, in nbd_handle_request()
3048 } else if (client->contexts.count) { in nbd_handle_request()
3049 return nbd_send_generic_reply(client, request, -EINVAL, in nbd_handle_request()
3053 return nbd_send_generic_reply(client, request, -EINVAL, in nbd_handle_request()
3060 request->type); in nbd_handle_request()
3061 ret = nbd_send_generic_reply(client, request, -EINVAL, msg, in nbd_handle_request()
3072 NBDClient *client = req->client; in nbd_trip()
3085 qemu_mutex_lock(&client->lock); in nbd_trip()
3087 if (client->closing) { in nbd_trip()
3091 if (client->quiescing) { in nbd_trip()
3096 client->recv_coroutine = NULL; in nbd_trip()
3102 * nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has in nbd_trip()
3103 * set client->quiescing but by the time we get back nbd_drained_end() may in nbd_trip()
3104 * have already cleared client->quiescing. In that case we try again in nbd_trip()
3106 * client->recv_coroutine = NULL further down. in nbd_trip()
3109 assert(client->recv_coroutine == qemu_coroutine_self()); in nbd_trip()
3110 qemu_mutex_unlock(&client->lock); in nbd_trip()
3112 qemu_mutex_lock(&client->lock); in nbd_trip()
3113 } while (ret == -EAGAIN && !client->quiescing); in nbd_trip()
3115 client->recv_coroutine = NULL; in nbd_trip()
3117 if (client->closing) { in nbd_trip()
3125 if (ret == -EAGAIN) { in nbd_trip()
3131 if (ret == -EIO) { in nbd_trip()
3135 qemu_mutex_unlock(&client->lock); in nbd_trip()
3136 qio_channel_set_cork(client->ioc, true); in nbd_trip()
3139 /* It wasn't -EIO, so, according to nbd_co_receive_request() in nbd_trip()
3144 ret = nbd_send_generic_reply(client, &request, -EINVAL, in nbd_trip()
3148 ret = nbd_handle_request(client, &request, req->data, &local_err); in nbd_trip()
3150 if (request.contexts && request.contexts != &client->contexts) { in nbd_trip()
3152 g_free(request.contexts->bitmaps); in nbd_trip()
3156 qio_channel_set_cork(client->ioc, false); in nbd_trip()
3157 qemu_mutex_lock(&client->lock); in nbd_trip()
3168 if (!req->complete) { in nbd_trip()
3176 qemu_mutex_unlock(&client->lock); in nbd_trip()
3190 qemu_mutex_unlock(&client->lock); in nbd_trip()
3199 * client->lock.
3205 if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS && in nbd_client_receive_next_request()
3206 !client->quiescing) { in nbd_client_receive_next_request()
3209 client->recv_coroutine = qemu_coroutine_create(nbd_trip, req); in nbd_client_receive_next_request()
3210 aio_co_schedule(client->exp->common.ctx, client->recv_coroutine); in nbd_client_receive_next_request()
3228 qemu_co_mutex_init(&client->send_lock); in nbd_co_client_start()
3235 if (client->handshake_max_secs > 0) { in nbd_co_client_start()
3240 client->sioc); in nbd_co_client_start()
3243 client->handshake_max_secs * NANOSECONDS_PER_SECOND); in nbd_co_client_start()
3256 WITH_QEMU_LOCK_GUARD(&client->lock) { in nbd_co_client_start()
3278 qemu_mutex_init(&client->lock); in nbd_client_new()
3279 client->refcount = 1; in nbd_client_new()
3280 client->tlscreds = tlscreds; in nbd_client_new()
3282 object_ref(OBJECT(client->tlscreds)); in nbd_client_new()
3284 client->tlsauthz = g_strdup(tlsauthz); in nbd_client_new()
3285 client->handshake_max_secs = handshake_max_secs; in nbd_client_new()
3286 client->sioc = sioc; in nbd_client_new()
3288 object_ref(OBJECT(client->sioc)); in nbd_client_new()
3289 client->ioc = QIO_CHANNEL(sioc); in nbd_client_new()
3290 object_ref(OBJECT(client->ioc)); in nbd_client_new()
3291 client->close_fn = close_fn; in nbd_client_new()
3292 client->owner = owner; in nbd_client_new()
3303 return client->owner; in nbd_client_owner()