Lines Matching +full:closed +full:- +full:loop
36 #include "qemu/main-loop.h"
38 #include "qapi/qapi-visit-sockets.h"
40 #include "qapi/clone-visitor.h"
52 #define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
115 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_clear_bdrvstate()
117 nbd_client_connection_release(s->conn); in nbd_clear_bdrvstate()
118 s->conn = NULL; in nbd_clear_bdrvstate()
120 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name)); in nbd_clear_bdrvstate()
123 assert(!s->reconnect_delay_timer); in nbd_clear_bdrvstate()
124 assert(!s->open_timer); in nbd_clear_bdrvstate()
126 object_unref(OBJECT(s->tlscreds)); in nbd_clear_bdrvstate()
127 qapi_free_SocketAddress(s->saddr); in nbd_clear_bdrvstate()
128 s->saddr = NULL; in nbd_clear_bdrvstate()
129 g_free(s->export); in nbd_clear_bdrvstate()
130 s->export = NULL; in nbd_clear_bdrvstate()
131 g_free(s->tlscredsid); in nbd_clear_bdrvstate()
132 s->tlscredsid = NULL; in nbd_clear_bdrvstate()
133 g_free(s->tlshostname); in nbd_clear_bdrvstate()
134 s->tlshostname = NULL; in nbd_clear_bdrvstate()
135 g_free(s->x_dirty_bitmap); in nbd_clear_bdrvstate()
136 s->x_dirty_bitmap = NULL; in nbd_clear_bdrvstate()
139 /* Called with s->receive_mutex taken. */
142 if (req->receiving) { in nbd_recv_coroutine_wake_one()
143 req->receiving = false; in nbd_recv_coroutine_wake_one()
144 aio_co_wake(req->coroutine); in nbd_recv_coroutine_wake_one()
155 QEMU_LOCK_GUARD(&s->receive_mutex); in nbd_recv_coroutines_wake()
157 if (nbd_recv_coroutine_wake_one(&s->requests[i])) { in nbd_recv_coroutines_wake()
163 /* Called with s->requests_lock held. */
166 if (s->state == NBD_CLIENT_CONNECTED) { in nbd_channel_error_locked()
167 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); in nbd_channel_error_locked()
170 if (ret == -EIO) { in nbd_channel_error_locked()
171 if (s->state == NBD_CLIENT_CONNECTED) { in nbd_channel_error_locked()
172 s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT : in nbd_channel_error_locked()
176 s->state = NBD_CLIENT_QUIT; in nbd_channel_error_locked()
182 QEMU_LOCK_GUARD(&s->requests_lock); in nbd_channel_error()
188 if (s->reconnect_delay_timer) { in reconnect_delay_timer_del()
189 timer_free(s->reconnect_delay_timer); in reconnect_delay_timer_del()
190 s->reconnect_delay_timer = NULL; in reconnect_delay_timer_del()
199 WITH_QEMU_LOCK_GUARD(&s->requests_lock) { in reconnect_delay_timer_cb()
200 if (s->state != NBD_CLIENT_CONNECTING_WAIT) { in reconnect_delay_timer_cb()
203 s->state = NBD_CLIENT_CONNECTING_NOWAIT; in reconnect_delay_timer_cb()
205 nbd_co_establish_connection_cancel(s->conn); in reconnect_delay_timer_cb()
210 assert(!s->reconnect_delay_timer); in reconnect_delay_timer_init()
211 s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs), in reconnect_delay_timer_init()
215 timer_mod(s->reconnect_delay_timer, expire_time_ns); in reconnect_delay_timer_init()
220 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_teardown_connection()
222 assert(!s->in_flight); in nbd_teardown_connection()
224 if (s->ioc) { in nbd_teardown_connection()
225 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); in nbd_teardown_connection()
226 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), in nbd_teardown_connection()
227 nbd_yank, s->bs); in nbd_teardown_connection()
228 object_unref(OBJECT(s->ioc)); in nbd_teardown_connection()
229 s->ioc = NULL; in nbd_teardown_connection()
232 WITH_QEMU_LOCK_GUARD(&s->requests_lock) { in nbd_teardown_connection()
233 s->state = NBD_CLIENT_QUIT; in nbd_teardown_connection()
239 if (s->open_timer) { in open_timer_del()
240 timer_free(s->open_timer); in open_timer_del()
241 s->open_timer = NULL; in open_timer_del()
249 nbd_co_establish_connection_cancel(s->conn); in open_timer_cb()
255 assert(!s->open_timer); in open_timer_init()
256 s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs), in open_timer_init()
260 timer_mod(s->open_timer, expire_time_ns); in open_timer_init()
268 QEMU_LOCK_GUARD(&s->requests_lock); in nbd_client_will_reconnect()
269 return s->state == NBD_CLIENT_CONNECTING_WAIT; in nbd_client_will_reconnect()
280 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_handle_updated_info()
283 if (s->x_dirty_bitmap) { in nbd_handle_updated_info()
284 if (!s->info.base_allocation) { in nbd_handle_updated_info()
285 error_setg(errp, "requested x-dirty-bitmap %s not found", in nbd_handle_updated_info()
286 s->x_dirty_bitmap); in nbd_handle_updated_info()
287 return -EINVAL; in nbd_handle_updated_info()
289 if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) { in nbd_handle_updated_info()
290 s->alloc_depth = true; in nbd_handle_updated_info()
294 if (s->info.flags & NBD_FLAG_READ_ONLY) { in nbd_handle_updated_info()
295 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp); in nbd_handle_updated_info()
301 if (s->info.flags & NBD_FLAG_SEND_FUA) { in nbd_handle_updated_info()
302 bs->supported_write_flags = BDRV_REQ_FUA; in nbd_handle_updated_info()
303 bs->supported_zero_flags |= BDRV_REQ_FUA; in nbd_handle_updated_info()
306 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) { in nbd_handle_updated_info()
307 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP; in nbd_handle_updated_info()
308 if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) { in nbd_handle_updated_info()
309 bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK; in nbd_handle_updated_info()
313 trace_nbd_client_handshake_success(s->export); in nbd_handle_updated_info()
321 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_co_do_establish_connection()
326 assert(!s->ioc); in nbd_co_do_establish_connection()
328 s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp); in nbd_co_do_establish_connection()
329 if (!s->ioc) { in nbd_co_do_establish_connection()
330 return -ECONNREFUSED; in nbd_co_do_establish_connection()
333 yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank, in nbd_co_do_establish_connection()
336 ret = nbd_handle_updated_info(s->bs, NULL); in nbd_co_do_establish_connection()
342 NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode }; in nbd_co_do_establish_connection()
344 nbd_send_request(s->ioc, &request); in nbd_co_do_establish_connection()
346 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), in nbd_co_do_establish_connection()
348 object_unref(OBJECT(s->ioc)); in nbd_co_do_establish_connection()
349 s->ioc = NULL; in nbd_co_do_establish_connection()
354 qio_channel_set_blocking(s->ioc, false, NULL); in nbd_co_do_establish_connection()
355 qio_channel_set_follow_coroutine_ctx(s->ioc, true); in nbd_co_do_establish_connection()
358 WITH_QEMU_LOCK_GUARD(&s->requests_lock) { in nbd_co_do_establish_connection()
359 s->state = NBD_CLIENT_CONNECTED; in nbd_co_do_establish_connection()
365 /* Called with s->requests_lock held. */
368 return s->state == NBD_CLIENT_CONNECTING_WAIT || in nbd_client_connecting()
369 s->state == NBD_CLIENT_CONNECTING_NOWAIT; in nbd_client_connecting()
372 /* Called with s->requests_lock taken. */
376 bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT; in nbd_reconnect_attempt()
383 assert(s->in_flight == 1); in nbd_reconnect_attempt()
385 trace_nbd_reconnect_attempt(s->bs->in_flight); in nbd_reconnect_attempt()
387 if (blocking && !s->reconnect_delay_timer) { in nbd_reconnect_attempt()
392 g_assert(s->reconnect_delay); in nbd_reconnect_attempt()
395 s->reconnect_delay * NANOSECONDS_PER_SECOND); in nbd_reconnect_attempt()
399 if (s->ioc) { in nbd_reconnect_attempt()
400 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), in nbd_reconnect_attempt()
401 nbd_yank, s->bs); in nbd_reconnect_attempt()
402 object_unref(OBJECT(s->ioc)); in nbd_reconnect_attempt()
403 s->ioc = NULL; in nbd_reconnect_attempt()
406 qemu_mutex_unlock(&s->requests_lock); in nbd_reconnect_attempt()
407 ret = nbd_co_do_establish_connection(s->bs, blocking, NULL); in nbd_reconnect_attempt()
408 trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight); in nbd_reconnect_attempt()
409 qemu_mutex_lock(&s->requests_lock); in nbd_reconnect_attempt()
424 QEMU_LOCK_GUARD(&s->receive_mutex); in nbd_receive_replies()
427 if (s->reply.cookie == cookie) { in nbd_receive_replies()
432 if (s->reply.cookie != 0) { in nbd_receive_replies()
435 * woken by whoever set s->reply.cookie (or never wait in this in nbd_receive_replies()
438 ind2 = COOKIE_TO_INDEX(s->reply.cookie); in nbd_receive_replies()
439 assert(!s->requests[ind2].receiving); in nbd_receive_replies()
441 s->requests[ind].receiving = true; in nbd_receive_replies()
442 qemu_co_mutex_unlock(&s->receive_mutex); in nbd_receive_replies()
450 * finished and s->reply.cookie set to 0. in nbd_receive_replies()
454 qemu_co_mutex_lock(&s->receive_mutex); in nbd_receive_replies()
455 assert(!s->requests[ind].receiving); in nbd_receive_replies()
460 assert(s->reply.cookie == 0); in nbd_receive_replies()
461 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, s->info.mode, errp); in nbd_receive_replies()
463 ret = -EIO; in nbd_receive_replies()
470 if (nbd_reply_is_structured(&s->reply) && in nbd_receive_replies()
471 s->info.mode < NBD_MODE_STRUCTURED) { in nbd_receive_replies()
472 nbd_channel_error(s, -EINVAL); in nbd_receive_replies()
474 return -EINVAL; in nbd_receive_replies()
476 ind2 = COOKIE_TO_INDEX(s->reply.cookie); in nbd_receive_replies()
477 if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) { in nbd_receive_replies()
478 nbd_channel_error(s, -EINVAL); in nbd_receive_replies()
480 return -EINVAL; in nbd_receive_replies()
482 if (s->reply.cookie == cookie) { in nbd_receive_replies()
486 nbd_recv_coroutine_wake_one(&s->requests[ind2]); in nbd_receive_replies()
494 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_co_send_request()
495 int rc, i = -1; in nbd_co_send_request()
497 qemu_mutex_lock(&s->requests_lock); in nbd_co_send_request()
498 while (s->in_flight == MAX_NBD_REQUESTS || in nbd_co_send_request()
499 (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) { in nbd_co_send_request()
500 qemu_co_queue_wait(&s->free_sema, &s->requests_lock); in nbd_co_send_request()
503 s->in_flight++; in nbd_co_send_request()
504 if (s->state != NBD_CLIENT_CONNECTED) { in nbd_co_send_request()
507 qemu_co_queue_restart_all(&s->free_sema); in nbd_co_send_request()
509 if (s->state != NBD_CLIENT_CONNECTED) { in nbd_co_send_request()
510 rc = -EIO; in nbd_co_send_request()
516 if (s->requests[i].coroutine == NULL) { in nbd_co_send_request()
522 s->requests[i].coroutine = qemu_coroutine_self(); in nbd_co_send_request()
523 s->requests[i].offset = request->from; in nbd_co_send_request()
524 s->requests[i].receiving = false; in nbd_co_send_request()
525 qemu_mutex_unlock(&s->requests_lock); in nbd_co_send_request()
527 qemu_co_mutex_lock(&s->send_mutex); in nbd_co_send_request()
528 request->cookie = INDEX_TO_COOKIE(i); in nbd_co_send_request()
529 request->mode = s->info.mode; in nbd_co_send_request()
531 assert(s->ioc); in nbd_co_send_request()
534 qio_channel_set_cork(s->ioc, true); in nbd_co_send_request()
535 rc = nbd_send_request(s->ioc, request); in nbd_co_send_request()
536 if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov, in nbd_co_send_request()
538 rc = -EIO; in nbd_co_send_request()
540 qio_channel_set_cork(s->ioc, false); in nbd_co_send_request()
542 rc = nbd_send_request(s->ioc, request); in nbd_co_send_request()
544 qemu_co_mutex_unlock(&s->send_mutex); in nbd_co_send_request()
547 qemu_mutex_lock(&s->requests_lock); in nbd_co_send_request()
550 if (i != -1) { in nbd_co_send_request()
551 s->requests[i].coroutine = NULL; in nbd_co_send_request()
553 s->in_flight--; in nbd_co_send_request()
554 qemu_co_queue_next(&s->free_sema); in nbd_co_send_request()
555 qemu_mutex_unlock(&s->requests_lock); in nbd_co_send_request()
563 return lduw_be_p(*payload - 2); in payload_advance16()
569 return ldl_be_p(*payload - 4); in payload_advance32()
575 return ldq_be_p(*payload - 8); in payload_advance64()
586 if (chunk->length != sizeof(offset) + sizeof(hole_size)) { in nbd_parse_offset_hole_payload()
589 return -EINVAL; in nbd_parse_offset_hole_payload()
595 if (!hole_size || offset < orig_offset || hole_size > qiov->size || in nbd_parse_offset_hole_payload()
596 offset > orig_offset + qiov->size - hole_size) { in nbd_parse_offset_hole_payload()
599 return -EINVAL; in nbd_parse_offset_hole_payload()
601 if (s->info.min_block && in nbd_parse_offset_hole_payload()
602 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) { in nbd_parse_offset_hole_payload()
606 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size); in nbd_parse_offset_hole_payload()
628 if (chunk->length < pay_len) { in nbd_parse_blockstatus_payload()
631 return -EINVAL; in nbd_parse_blockstatus_payload()
635 if (s->info.context_id != context_id) { in nbd_parse_blockstatus_payload()
639 s->info.context_id); in nbd_parse_blockstatus_payload()
640 return -EINVAL; in nbd_parse_blockstatus_payload()
645 extent->length = payload_advance64(&payload); in nbd_parse_blockstatus_payload()
646 extent->flags = payload_advance64(&payload); in nbd_parse_blockstatus_payload()
649 extent->length = payload_advance32(&payload); in nbd_parse_blockstatus_payload()
650 extent->flags = payload_advance32(&payload); in nbd_parse_blockstatus_payload()
653 if (extent->length == 0) { in nbd_parse_blockstatus_payload()
656 return -EINVAL; in nbd_parse_blockstatus_payload()
661 * protocol, but as qemu-nbd 3.1 is such a server (at least for in nbd_parse_blockstatus_payload()
663 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE) in nbd_parse_blockstatus_payload()
668 * up to the full block and change the status to fully-allocated in nbd_parse_blockstatus_payload()
671 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length, in nbd_parse_blockstatus_payload()
672 s->info.min_block)) { in nbd_parse_blockstatus_payload()
674 if (extent->length > s->info.min_block) { in nbd_parse_blockstatus_payload()
675 extent->length = QEMU_ALIGN_DOWN(extent->length, in nbd_parse_blockstatus_payload()
676 s->info.min_block); in nbd_parse_blockstatus_payload()
678 extent->length = s->info.min_block; in nbd_parse_blockstatus_payload()
679 extent->flags = 0; in nbd_parse_blockstatus_payload()
693 if (count != wide || chunk->length > pay_len) { in nbd_parse_blockstatus_payload()
696 if (extent->length > orig_length) { in nbd_parse_blockstatus_payload()
697 extent->length = orig_length; in nbd_parse_blockstatus_payload()
702 * HACK: if we are using x-dirty-bitmaps to access in nbd_parse_blockstatus_payload()
703 * qemu:allocation-depth, treat all depths > 2 the same as 2, in nbd_parse_blockstatus_payload()
707 if (s->alloc_depth && extent->flags > 2) { in nbd_parse_blockstatus_payload()
708 extent->flags = 2; in nbd_parse_blockstatus_payload()
725 assert(chunk->type & (1 << 15)); in nbd_parse_error_payload()
727 if (chunk->length < sizeof(error) + sizeof(message_size)) { in nbd_parse_error_payload()
730 return -EINVAL; in nbd_parse_error_payload()
737 return -EINVAL; in nbd_parse_error_payload()
740 *request_ret = -error; in nbd_parse_error_payload()
743 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) { in nbd_parse_error_payload()
746 return -EINVAL; in nbd_parse_error_payload()
764 NBDStructuredReplyChunk *chunk = &s->reply.structured; in nbd_co_receive_offset_data_payload()
766 assert(nbd_reply_is_structured(&s->reply)); in nbd_co_receive_offset_data_payload()
769 if (chunk->length <= sizeof(offset)) { in nbd_co_receive_offset_data_payload()
772 return -EINVAL; in nbd_co_receive_offset_data_payload()
775 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) { in nbd_co_receive_offset_data_payload()
776 return -EIO; in nbd_co_receive_offset_data_payload()
779 data_size = chunk->length - sizeof(offset); in nbd_co_receive_offset_data_payload()
781 if (offset < orig_offset || data_size > qiov->size || in nbd_co_receive_offset_data_payload()
782 offset > orig_offset + qiov->size - data_size) { in nbd_co_receive_offset_data_payload()
785 return -EINVAL; in nbd_co_receive_offset_data_payload()
787 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) { in nbd_co_receive_offset_data_payload()
791 qemu_iovec_init(&sub_qiov, qiov->niov); in nbd_co_receive_offset_data_payload()
792 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size); in nbd_co_receive_offset_data_payload()
793 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp); in nbd_co_receive_offset_data_payload()
796 return ret < 0 ? -EIO : 0; in nbd_co_receive_offset_data_payload()
806 assert(nbd_reply_is_structured(&s->reply)); in nbd_co_receive_structured_payload()
808 len = s->reply.structured.length; in nbd_co_receive_structured_payload()
816 return -EINVAL; in nbd_co_receive_structured_payload()
821 return -EINVAL; in nbd_co_receive_structured_payload()
825 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp); in nbd_co_receive_structured_payload()
867 error_prepend(errp, "Connection closed: "); in nbd_co_do_receive_one_chunk()
868 return -EIO; in nbd_co_do_receive_one_chunk()
870 assert(s->ioc); in nbd_co_do_receive_one_chunk()
872 assert(s->reply.cookie == cookie); in nbd_co_do_receive_one_chunk()
874 if (nbd_reply_is_simple(&s->reply)) { in nbd_co_do_receive_one_chunk()
878 return -EINVAL; in nbd_co_do_receive_one_chunk()
881 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error); in nbd_co_do_receive_one_chunk()
886 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov, in nbd_co_do_receive_one_chunk()
887 errp) < 0 ? -EIO : 0; in nbd_co_do_receive_one_chunk()
891 assert(s->info.mode >= NBD_MODE_STRUCTURED); in nbd_co_do_receive_one_chunk()
892 chunk = &s->reply.structured; in nbd_co_do_receive_one_chunk()
894 if (chunk->type == NBD_REPLY_TYPE_NONE) { in nbd_co_do_receive_one_chunk()
895 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) { in nbd_co_do_receive_one_chunk()
898 return -EINVAL; in nbd_co_do_receive_one_chunk()
900 if (chunk->length) { in nbd_co_do_receive_one_chunk()
903 return -EINVAL; in nbd_co_do_receive_one_chunk()
908 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) { in nbd_co_do_receive_one_chunk()
911 return -EINVAL; in nbd_co_do_receive_one_chunk()
914 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset, in nbd_co_do_receive_one_chunk()
918 if (nbd_reply_type_is_error(chunk->type)) { in nbd_co_do_receive_one_chunk()
927 if (nbd_reply_type_is_error(chunk->type)) { in nbd_co_do_receive_one_chunk()
938 * Read reply, wake up connection_co and set s->quit if needed.
953 /* For assert at loop start in nbd_connection_entry */ in nbd_co_receive_one_chunk()
954 *reply = s->reply; in nbd_co_receive_one_chunk()
956 s->reply.cookie = 0; in nbd_co_receive_one_chunk()
976 if (!iter->ret) { in nbd_iter_channel_error()
977 iter->ret = ret; in nbd_iter_channel_error()
978 error_propagate(&iter->err, *local_err); in nbd_iter_channel_error()
990 if (!iter->request_ret) { in nbd_iter_request_error()
991 iter->request_ret = ret; in nbd_iter_request_error()
1020 if (iter->done) { in nbd_reply_chunk_iter_receive()
1029 ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured, in nbd_reply_chunk_iter_receive()
1039 if (nbd_reply_is_simple(reply) || iter->ret < 0) { in nbd_reply_chunk_iter_receive()
1043 chunk = &reply->structured; in nbd_reply_chunk_iter_receive()
1044 iter->only_structured = true; in nbd_reply_chunk_iter_receive()
1046 if (chunk->type == NBD_REPLY_TYPE_NONE) { in nbd_reply_chunk_iter_receive()
1048 assert(chunk->flags & NBD_REPLY_FLAG_DONE); in nbd_reply_chunk_iter_receive()
1052 if (chunk->flags & NBD_REPLY_FLAG_DONE) { in nbd_reply_chunk_iter_receive()
1054 iter->done = true; in nbd_reply_chunk_iter_receive()
1057 /* Execute the loop body */ in nbd_reply_chunk_iter_receive()
1061 qemu_mutex_lock(&s->requests_lock); in nbd_reply_chunk_iter_receive()
1062 s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL; in nbd_reply_chunk_iter_receive()
1063 s->in_flight--; in nbd_reply_chunk_iter_receive()
1064 qemu_co_queue_next(&s->free_sema); in nbd_reply_chunk_iter_receive()
1065 qemu_mutex_unlock(&s->requests_lock); in nbd_reply_chunk_iter_receive()
1096 s->info.mode >= NBD_MODE_STRUCTURED, in nbd_co_receive_cmdread_reply()
1104 switch (chunk->type) { in nbd_co_receive_cmdread_reply()
1120 if (!nbd_reply_type_is_error(chunk->type)) { in nbd_co_receive_cmdread_reply()
1122 nbd_channel_error(s, -EINVAL); in nbd_co_receive_cmdread_reply()
1125 chunk->type, nbd_reply_type_lookup(chunk->type)); in nbd_co_receive_cmdread_reply()
1126 nbd_iter_channel_error(&iter, -EINVAL, &local_err); in nbd_co_receive_cmdread_reply()
1150 assert(!extent->length); in nbd_co_receive_blockstatus_reply()
1158 switch (chunk->type) { in nbd_co_receive_blockstatus_reply()
1161 wide = chunk->type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT; in nbd_co_receive_blockstatus_reply()
1162 if ((s->info.mode >= NBD_MODE_EXTENDED) != wide) { in nbd_co_receive_blockstatus_reply()
1166 nbd_channel_error(s, -EINVAL); in nbd_co_receive_blockstatus_reply()
1168 nbd_iter_channel_error(&iter, -EINVAL, &local_err); in nbd_co_receive_blockstatus_reply()
1181 if (!nbd_reply_type_is_error(chunk->type)) { in nbd_co_receive_blockstatus_reply()
1182 nbd_channel_error(s, -EINVAL); in nbd_co_receive_blockstatus_reply()
1186 chunk->type, nbd_reply_type_lookup(chunk->type)); in nbd_co_receive_blockstatus_reply()
1187 nbd_iter_channel_error(&iter, -EINVAL, &local_err); in nbd_co_receive_blockstatus_reply()
1195 if (!extent->length && !iter.request_ret) { in nbd_co_receive_blockstatus_reply()
1197 nbd_iter_channel_error(&iter, -EIO, &local_err); in nbd_co_receive_blockstatus_reply()
1211 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_co_request()
1213 assert(request->type != NBD_CMD_READ); in nbd_co_request()
1215 assert(request->type == NBD_CMD_WRITE); in nbd_co_request()
1216 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov)); in nbd_co_request()
1218 assert(request->type != NBD_CMD_WRITE); in nbd_co_request()
1227 ret = nbd_co_receive_return_code(s, request->cookie, in nbd_co_request()
1230 trace_nbd_co_request_fail(request->from, request->len, in nbd_co_request()
1231 request->cookie, request->flags, in nbd_co_request()
1232 request->type, in nbd_co_request()
1233 nbd_cmd_lookup(request->type), in nbd_co_request()
1249 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_co_preadv()
1263 * byte-accurate sizing yet - if the read exceeds the server's in nbd_client_co_preadv()
1265 * truncate the request to the server and tail-pad with zero. in nbd_client_co_preadv()
1267 if (offset >= s->info.size) { in nbd_client_co_preadv()
1272 if (offset + bytes > s->info.size) { in nbd_client_co_preadv()
1273 uint64_t slop = offset + bytes - s->info.size; in nbd_client_co_preadv()
1276 qemu_iovec_memset(qiov, bytes - slop, 0, slop); in nbd_client_co_preadv()
1277 request.len -= slop; in nbd_client_co_preadv()
1305 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_co_pwritev()
1312 assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); in nbd_client_co_pwritev()
1314 assert(s->info.flags & NBD_FLAG_SEND_FUA); in nbd_client_co_pwritev()
1330 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_co_pwrite_zeroes()
1338 assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED); in nbd_client_co_pwrite_zeroes()
1340 assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); in nbd_client_co_pwrite_zeroes()
1341 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) { in nbd_client_co_pwrite_zeroes()
1342 return -ENOTSUP; in nbd_client_co_pwrite_zeroes()
1346 assert(s->info.flags & NBD_FLAG_SEND_FUA); in nbd_client_co_pwrite_zeroes()
1353 assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO); in nbd_client_co_pwrite_zeroes()
1365 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_co_flush()
1368 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) { in nbd_client_co_flush()
1381 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_co_pdiscard()
1389 assert(bytes <= UINT32_MAX || s->info.mode >= NBD_MODE_EXTENDED); in nbd_client_co_pdiscard()
1391 assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); in nbd_client_co_pdiscard()
1392 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) { in nbd_client_co_pdiscard()
1405 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_co_block_status()
1411 .len = MIN(bytes, s->info.size - offset), in nbd_client_co_block_status()
1415 if (!s->info.base_allocation) { in nbd_client_co_block_status()
1421 if (s->info.mode < NBD_MODE_EXTENDED) { in nbd_client_co_block_status()
1422 request.len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment), in nbd_client_co_block_status()
1428 * byte-accurate sizing yet - if the status request exceeds the in nbd_client_co_block_status()
1433 if (offset >= s->info.size) { in nbd_client_co_block_status()
1440 if (s->info.min_block) { in nbd_client_co_block_status()
1441 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block)); in nbd_client_co_block_status()
1478 BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque; in nbd_client_reopen_prepare()
1480 if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) { in nbd_client_reopen_prepare()
1481 error_setg(errp, "Can't reopen read-only NBD mount as read/write"); in nbd_client_reopen_prepare()
1482 return -EACCES; in nbd_client_reopen_prepare()
1490 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_yank()
1492 QEMU_LOCK_GUARD(&s->requests_lock); in nbd_yank()
1493 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); in nbd_yank()
1494 s->state = NBD_CLIENT_QUIT; in nbd_yank()
1499 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_client_close()
1500 NBDRequest request = { .type = NBD_CMD_DISC, .mode = s->info.mode }; in nbd_client_close()
1502 if (s->ioc) { in nbd_client_close()
1503 nbd_send_request(s->ioc, &request); in nbd_client_close()
1525 return -EINVAL; in nbd_parse_uri()
1537 return -EINVAL; in nbd_parse_uri()
1550 qp = g_uri_parse_params(uri_query, -1, "&", G_URI_PARAMS_NONE, NULL); in nbd_parse_uri()
1552 return -EINVAL; in nbd_parse_uri()
1556 return -EINVAL; in nbd_parse_uri()
1569 if (uri_server || uri_port != -1 || !uri_socket) { in nbd_parse_uri()
1570 return -EINVAL; in nbd_parse_uri()
1579 return -EINVAL; in nbd_parse_uri()
1599 if (!strcmp(e->key, "host") || in nbd_has_filename_options_conflict()
1600 !strcmp(e->key, "port") || in nbd_has_filename_options_conflict()
1601 !strcmp(e->key, "path") || in nbd_has_filename_options_conflict()
1602 !strcmp(e->key, "export") || in nbd_has_filename_options_conflict()
1603 strstart(e->key, "server.", NULL)) in nbd_has_filename_options_conflict()
1606 e->key); in nbd_has_filename_options_conflict()
1647 /* extract the host_spec - fail if it's not nbd:... */ in nbd_parse_filename()
1669 qdict_put_str(options, "server.host", addr->host); in nbd_parse_filename()
1670 qdict_put_str(options, "server.port", addr->port); in nbd_parse_filename()
1691 if (strstart(e->key, "server.", NULL)) { in nbd_process_legacy_socket_options()
1808 .name = "tls-creds",
1813 .name = "tls-hostname",
1818 .name = "x-dirty-bitmap",
1824 .name = "reconnect-delay",
1828 "error. During the first @reconnect-delay seconds, all "
1835 .name = "open-timeout",
1839 "If non-zero, the nbd driver will repeat connection "
1840 "attempts until successful or until @open-timeout seconds "
1850 BDRVNBDState *s = bs->opaque; in nbd_process_options()
1852 int ret = -EINVAL; in nbd_process_options()
1865 s->saddr = nbd_config(s, options, errp); in nbd_process_options()
1866 if (!s->saddr) { in nbd_process_options()
1870 s->export = g_strdup(qemu_opt_get(opts, "export")); in nbd_process_options()
1871 if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) { in nbd_process_options()
1876 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds")); in nbd_process_options()
1877 if (s->tlscredsid) { in nbd_process_options()
1878 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp); in nbd_process_options()
1879 if (!s->tlscreds) { in nbd_process_options()
1883 s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname")); in nbd_process_options()
1884 if (!s->tlshostname && in nbd_process_options()
1885 s->saddr->type == SOCKET_ADDRESS_TYPE_INET) { in nbd_process_options()
1886 s->tlshostname = g_strdup(s->saddr->u.inet.host); in nbd_process_options()
1890 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap")); in nbd_process_options()
1891 if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) { in nbd_process_options()
1892 error_setg(errp, "x-dirty-bitmap query too long to send to server"); in nbd_process_options()
1896 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0); in nbd_process_options()
1897 s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0); in nbd_process_options()
1910 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_open()
1912 s->bs = bs; in nbd_open()
1913 qemu_mutex_init(&s->requests_lock); in nbd_open()
1914 qemu_co_queue_init(&s->free_sema); in nbd_open()
1915 qemu_co_mutex_init(&s->send_mutex); in nbd_open()
1916 qemu_co_mutex_init(&s->receive_mutex); in nbd_open()
1918 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) { in nbd_open()
1919 return -EEXIST; in nbd_open()
1927 s->conn = nbd_client_connection_new(s->saddr, true, s->export, in nbd_open()
1928 s->x_dirty_bitmap, s->tlscreds, in nbd_open()
1929 s->tlshostname); in nbd_open()
1931 if (s->open_timeout) { in nbd_open()
1932 nbd_client_connection_enable_retry(s->conn); in nbd_open()
1934 s->open_timeout * NANOSECONDS_PER_SECOND); in nbd_open()
1937 s->state = NBD_CLIENT_CONNECTING_WAIT; in nbd_open()
1946 * is drained or closed. in nbd_open()
1950 nbd_client_connection_enable_retry(s->conn); in nbd_open()
1962 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_refresh_limits()
1963 uint32_t min = s->info.min_block; in nbd_refresh_limits()
1964 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block); in nbd_refresh_limits()
1968 * - a size that is not sector-aligned implies that an alignment in nbd_refresh_limits()
1970 * - advertisement of block status requires an alignment of 1, so in nbd_refresh_limits()
1973 * server will report sub-sector extents, such as a hole at EOF in nbd_refresh_limits()
1975 * - otherwise, assume the server is so old that we are safer avoiding in nbd_refresh_limits()
1976 * sub-sector requests in nbd_refresh_limits()
1979 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) || in nbd_refresh_limits()
1980 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE; in nbd_refresh_limits()
1983 bs->bl.request_alignment = min; in nbd_refresh_limits()
1984 bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min); in nbd_refresh_limits()
1985 bs->bl.max_pwrite_zeroes = max; in nbd_refresh_limits()
1986 bs->bl.max_transfer = max; in nbd_refresh_limits()
1992 if (s->info.mode >= NBD_MODE_EXTENDED) { in nbd_refresh_limits()
1993 bs->bl.max_pdiscard = bs->bl.max_pwrite_zeroes = 0; in nbd_refresh_limits()
1996 if (s->info.opt_block && in nbd_refresh_limits()
1997 s->info.opt_block > bs->bl.opt_transfer) { in nbd_refresh_limits()
1998 bs->bl.opt_transfer = s->info.opt_block; in nbd_refresh_limits()
2020 BDRVNBDState *s = bs->opaque; in nbd_co_truncate()
2022 if (offset != s->info.size && exact) { in nbd_co_truncate()
2024 return -ENOTSUP; in nbd_co_truncate()
2027 if (offset > s->info.size) { in nbd_co_truncate()
2029 return -EINVAL; in nbd_co_truncate()
2037 BDRVNBDState *s = bs->opaque; in nbd_co_getlength()
2039 return s->info.size; in nbd_co_getlength()
2044 BDRVNBDState *s = bs->opaque; in nbd_refresh_filename()
2048 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) { in nbd_refresh_filename()
2049 const InetSocketAddress *inet = &s->saddr->u.inet; in nbd_refresh_filename()
2050 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) { in nbd_refresh_filename()
2051 host = inet->host; in nbd_refresh_filename()
2052 port = inet->port; in nbd_refresh_filename()
2054 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) { in nbd_refresh_filename()
2055 path = s->saddr->u.q_unix.path; in nbd_refresh_filename()
2056 } /* else can't represent as pseudo-filename */ in nbd_refresh_filename()
2058 if (path && s->export) { in nbd_refresh_filename()
2059 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), in nbd_refresh_filename()
2060 "nbd+unix:///%s?socket=%s", s->export, path); in nbd_refresh_filename()
2061 } else if (path && !s->export) { in nbd_refresh_filename()
2062 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), in nbd_refresh_filename()
2064 } else if (host && s->export) { in nbd_refresh_filename()
2065 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), in nbd_refresh_filename()
2066 "nbd://%s:%s/%s", host, port, s->export); in nbd_refresh_filename()
2067 } else if (host && !s->export) { in nbd_refresh_filename()
2068 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename), in nbd_refresh_filename()
2071 if (len >= sizeof(bs->exact_filename)) { in nbd_refresh_filename()
2073 bs->exact_filename[0] = '\0'; in nbd_refresh_filename()
2092 "tls-creds",
2093 "tls-hostname",
2101 BDRVNBDState *s = (BDRVNBDState *)bs->opaque; in nbd_cancel_in_flight()
2105 qemu_mutex_lock(&s->requests_lock); in nbd_cancel_in_flight()
2106 if (s->state == NBD_CLIENT_CONNECTING_WAIT) { in nbd_cancel_in_flight()
2107 s->state = NBD_CLIENT_CONNECTING_NOWAIT; in nbd_cancel_in_flight()
2109 qemu_mutex_unlock(&s->requests_lock); in nbd_cancel_in_flight()
2111 nbd_co_establish_connection_cancel(s->conn); in nbd_cancel_in_flight()
2117 BDRVNBDState *s = bs->opaque; in nbd_attach_aio_context()
2120 assert(!s->open_timer); in nbd_attach_aio_context()
2131 assert(!s->reconnect_delay_timer); in nbd_attach_aio_context()
2136 BDRVNBDState *s = bs->opaque; in nbd_detach_aio_context()
2138 assert(!s->open_timer); in nbd_detach_aio_context()
2139 assert(!s->reconnect_delay_timer); in nbd_detach_aio_context()