Lines Matching +full:max +full:- +full:frequency
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
15 #include "exec/cpu-common.h" /* for qemu_ram_get_fd() */
16 #include "qemu/defer-call.h"
18 #include "qemu/error-report.h"
21 #include "system/block-backend.h"
24 #include "block/block-io.h"
38 * libblkio is not thread-safe so this lock protects ->blkio and
39 * ->blkioq.
43 struct blkioq *blkioq; /* make this multi-queue in the future... */
55 * Protects ->bounce_pool, ->bounce_bufs, ->bounce_available.
57 * Lock ordering: ->bounce_lock before ->blkio_lock.
70 /* The value of the "mem-region-alignment" property */
79 /* Are madvise(MADV_DONTNEED)-style operations unavailable? */
83 /* Called with s->bounce_lock held */
87 assert(QLIST_EMPTY(&s->bounce_bufs)); in blkio_resize_bounce_pool()
89 /* Pad size to reduce frequency of resize calls */ in blkio_resize_bounce_pool()
93 bytes = QEMU_ALIGN_UP(bytes, s->mem_region_alignment); in blkio_resize_bounce_pool()
95 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_resize_bounce_pool()
98 if (s->bounce_pool.addr) { in blkio_resize_bounce_pool()
99 blkio_unmap_mem_region(s->blkio, &s->bounce_pool); in blkio_resize_bounce_pool()
100 blkio_free_mem_region(s->blkio, &s->bounce_pool); in blkio_resize_bounce_pool()
101 memset(&s->bounce_pool, 0, sizeof(s->bounce_pool)); in blkio_resize_bounce_pool()
104 /* Automatically freed when s->blkio is destroyed */ in blkio_resize_bounce_pool()
105 ret = blkio_alloc_mem_region(s->blkio, &s->bounce_pool, bytes); in blkio_resize_bounce_pool()
110 ret = blkio_map_mem_region(s->blkio, &s->bounce_pool); in blkio_resize_bounce_pool()
112 blkio_free_mem_region(s->blkio, &s->bounce_pool); in blkio_resize_bounce_pool()
113 memset(&s->bounce_pool, 0, sizeof(s->bounce_pool)); in blkio_resize_bounce_pool()
121 /* Called with s->bounce_lock held */
126 void *addr = s->bounce_pool.addr; in blkio_do_alloc_bounce_buffer()
135 QLIST_FOREACH(cur, &s->bounce_bufs, next) { in blkio_do_alloc_bounce_buffer()
136 space = cur->buf.iov_base - addr; in blkio_do_alloc_bounce_buffer()
139 bounce->buf.iov_base = addr; in blkio_do_alloc_bounce_buffer()
140 bounce->buf.iov_len = bytes; in blkio_do_alloc_bounce_buffer()
144 addr = cur->buf.iov_base + cur->buf.iov_len; in blkio_do_alloc_bounce_buffer()
149 space = s->bounce_pool.addr + s->bounce_pool.len - addr; in blkio_do_alloc_bounce_buffer()
156 QLIST_INSERT_HEAD(&s->bounce_bufs, bounce, next); in blkio_do_alloc_bounce_buffer()
158 bounce->buf.iov_base = addr; in blkio_do_alloc_bounce_buffer()
159 bounce->buf.iov_len = bytes; in blkio_do_alloc_bounce_buffer()
173 QEMU_LOCK_GUARD(&s->bounce_lock); in blkio_alloc_bounce_buffer()
176 if (!qemu_co_queue_empty(&s->bounce_available)) { in blkio_alloc_bounce_buffer()
177 qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock, in blkio_alloc_bounce_buffer()
185 qemu_co_queue_next(&s->bounce_available); in blkio_alloc_bounce_buffer()
190 * If there are no in-flight requests then the pool was simply too in blkio_alloc_bounce_buffer()
193 if (QLIST_EMPTY(&s->bounce_bufs)) { in blkio_alloc_bounce_buffer()
200 qemu_co_queue_next(&s->bounce_available); in blkio_alloc_bounce_buffer()
209 qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock, in blkio_alloc_bounce_buffer()
218 QEMU_LOCK_GUARD(&s->bounce_lock); in blkio_free_bounce_buffer()
223 qemu_co_queue_next(&s->bounce_available); in blkio_free_bounce_buffer()
235 BDRVBlkioState *s = bs->opaque; in blkio_completion_fd_read()
240 if (s->poll_completion.user_data != NULL) { in blkio_completion_fd_read()
241 BlkioCoData *cod = s->poll_completion.user_data; in blkio_completion_fd_read()
242 cod->ret = s->poll_completion.ret; in blkio_completion_fd_read()
245 s->poll_completion.user_data = NULL; in blkio_completion_fd_read()
247 aio_co_wake(cod->coroutine); in blkio_completion_fd_read()
251 ret = read(s->completion_fd, &val, sizeof(val)); in blkio_completion_fd_read()
257 * Reading one completion at a time makes nested event loop re-entrancy in blkio_completion_fd_read()
264 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_completion_fd_read()
265 ret = blkioq_do_io(s->blkioq, &completion, 0, 1, NULL); in blkio_completion_fd_read()
272 cod->ret = completion.ret; in blkio_completion_fd_read()
273 aio_co_wake(cod->coroutine); in blkio_completion_fd_read()
280 BDRVBlkioState *s = bs->opaque; in blkio_completion_fd_poll()
284 if (s->poll_completion.user_data != NULL) { in blkio_completion_fd_poll()
288 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_completion_fd_poll()
289 ret = blkioq_do_io(s->blkioq, &s->poll_completion, 0, 1, NULL); in blkio_completion_fd_poll()
302 BDRVBlkioState *s = bs->opaque; in blkio_attach_aio_context()
304 aio_set_fd_handler(new_context, s->completion_fd, in blkio_attach_aio_context()
312 BDRVBlkioState *s = bs->opaque; in blkio_detach_aio_context()
314 aio_set_fd_handler(bdrv_get_aio_context(bs), s->completion_fd, NULL, NULL, in blkio_detach_aio_context()
326 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_deferred_fn()
327 blkioq_do_io(s->blkioq, NULL, 0, 0, NULL); in blkio_deferred_fn()
337 BDRVBlkioState *s = bs->opaque; in blkio_submit_io()
345 BDRVBlkioState *s = bs->opaque; in blkio_co_pdiscard()
350 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_co_pdiscard()
351 blkioq_discard(s->blkioq, offset, bytes, &cod, 0); in blkio_co_pdiscard()
366 BDRVBlkioState *s = bs->opaque; in blkio_co_preadv()
368 s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF); in blkio_co_preadv()
370 struct iovec *iov = qiov->iov; in blkio_co_preadv()
371 int iovcnt = qiov->niov; in blkio_co_preadv()
383 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_co_preadv()
384 blkioq_readv(s->blkioq, offset, iov, iovcnt, &cod, 0); in blkio_co_preadv()
410 BDRVBlkioState *s = bs->opaque; in blkio_co_pwritev()
412 s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF); in blkio_co_pwritev()
414 struct iovec *iov = qiov->iov; in blkio_co_pwritev()
415 int iovcnt = qiov->niov; in blkio_co_pwritev()
428 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_co_pwritev()
429 blkioq_writev(s->blkioq, offset, iov, iovcnt, &cod, blkio_flags); in blkio_co_pwritev()
444 BDRVBlkioState *s = bs->opaque; in blkio_co_flush()
449 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_co_flush()
450 blkioq_flush(s->blkioq, &cod, 0); in blkio_co_flush()
461 BDRVBlkioState *s = bs->opaque; in blkio_co_pwrite_zeroes()
477 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_co_pwrite_zeroes()
478 blkioq_write_zeroes(s->blkioq, offset, bytes, &cod, blkio_flags); in blkio_co_pwrite_zeroes()
506 BDRVBlkioState *s = bs->opaque; in blkio_mem_region_from_host()
507 int fd = -1; in blkio_mem_region_from_host()
510 if (((uintptr_t)host | size) % s->mem_region_alignment) { in blkio_mem_region_from_host()
516 if (s->needs_mem_region_fd) { in blkio_mem_region_from_host()
529 if (fd == -1) { in blkio_mem_region_from_host()
531 * Ideally every RAMBlock would have an fd. pc-bios and other in blkio_mem_region_from_host()
539 end_block = qemu_ram_block_from_host(host + size - 1, false, &offset); in blkio_mem_region_from_host()
559 BDRVBlkioState *s = bs->opaque; in blkio_register_buf()
565 * Mapping memory regions conflicts with RAM discard (virtio-mem) when in blkio_register_buf()
568 if (!s->needs_mem_regions && s->may_pin_mem_regions) { in blkio_register_buf()
579 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_register_buf()
580 ret = blkio_map_mem_region(s->blkio, ®ion); in blkio_register_buf()
593 BDRVBlkioState *s = bs->opaque; in blkio_unregister_buf()
597 if (!s->needs_mem_regions && s->may_pin_mem_regions) { in blkio_unregister_buf()
605 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_unregister_buf()
606 blkio_unmap_mem_region(s->blkio, ®ion); in blkio_unregister_buf()
614 BDRVBlkioState *s = bs->opaque; in blkio_io_uring_connect()
617 ret = blkio_set_str(s->blkio, "path", filename); in blkio_io_uring_connect()
620 error_setg_errno(errp, -ret, "failed to set path: %s", in blkio_io_uring_connect()
626 ret = blkio_set_bool(s->blkio, "direct", true); in blkio_io_uring_connect()
628 error_setg_errno(errp, -ret, "failed to set direct: %s", in blkio_io_uring_connect()
634 ret = blkio_connect(s->blkio); in blkio_io_uring_connect()
636 error_setg_errno(errp, -ret, "blkio_connect failed: %s", in blkio_io_uring_connect()
648 BDRVBlkioState *s = bs->opaque; in blkio_nvme_io_uring_connect()
653 return -EINVAL; in blkio_nvme_io_uring_connect()
656 ret = blkio_set_str(s->blkio, "path", path); in blkio_nvme_io_uring_connect()
659 error_setg_errno(errp, -ret, "failed to set path: %s", in blkio_nvme_io_uring_connect()
666 return -EINVAL; in blkio_nvme_io_uring_connect()
669 ret = blkio_connect(s->blkio); in blkio_nvme_io_uring_connect()
671 error_setg_errno(errp, -ret, "blkio_connect failed: %s", in blkio_nvme_io_uring_connect()
683 BDRVBlkioState *s = bs->opaque; in blkio_virtio_blk_connect()
685 int fd = -1, ret; in blkio_virtio_blk_connect()
689 return -EINVAL; in blkio_virtio_blk_connect()
694 return -EINVAL; in blkio_virtio_blk_connect()
697 if (blkio_set_int(s->blkio, "fd", -1) == 0) { in blkio_virtio_blk_connect()
709 * (e.g. /dev/vhost-vdpa-0 or /dev/vfio/vfio) or a unix socket. in blkio_virtio_blk_connect()
715 * In order to open the device read-only, we are using the `read-only` in blkio_virtio_blk_connect()
723 * for the virtio-blk-vhost-user driver. In such cases let's have in blkio_virtio_blk_connect()
728 ret = blkio_set_int(s->blkio, "fd", fd); in blkio_virtio_blk_connect()
732 fd = -1; in blkio_virtio_blk_connect()
738 ret = blkio_set_str(s->blkio, "path", path); in blkio_virtio_blk_connect()
740 error_setg_errno(errp, -ret, "failed to set path: %s", in blkio_virtio_blk_connect()
746 ret = blkio_connect(s->blkio); in blkio_virtio_blk_connect()
750 fd = -1; in blkio_virtio_blk_connect()
754 * Before https://gitlab.com/libblkio/libblkio/-/merge_requests/208 in blkio_virtio_blk_connect()
757 * blkio_connect() will fail with -EINVAL. in blkio_virtio_blk_connect()
761 if (fd_supported && ret == -EINVAL) { in blkio_virtio_blk_connect()
764 * it to -1. in blkio_virtio_blk_connect()
766 ret = blkio_set_int(s->blkio, "fd", -1); in blkio_virtio_blk_connect()
768 error_setg_errno(errp, -ret, "failed to set fd: %s", in blkio_virtio_blk_connect()
773 ret = blkio_set_str(s->blkio, "path", path); in blkio_virtio_blk_connect()
775 error_setg_errno(errp, -ret, "failed to set path: %s", in blkio_virtio_blk_connect()
780 ret = blkio_connect(s->blkio); in blkio_virtio_blk_connect()
784 error_setg_errno(errp, -ret, "blkio_connect failed: %s", in blkio_virtio_blk_connect()
797 const char *blkio_driver = bs->drv->protocol_name; in blkio_open()
798 BDRVBlkioState *s = bs->opaque; in blkio_open()
801 ret = blkio_create(blkio_driver, &s->blkio); in blkio_open()
803 error_setg_errno(errp, -ret, "blkio_create failed: %s", in blkio_open()
809 ret = blkio_set_bool(s->blkio, "read-only", true); in blkio_open()
811 error_setg_errno(errp, -ret, "failed to set read-only: %s", in blkio_open()
813 blkio_destroy(&s->blkio); in blkio_open()
820 } else if (strcmp(blkio_driver, "nvme-io_uring") == 0) { in blkio_open()
822 } else if (strcmp(blkio_driver, "virtio-blk-vfio-pci") == 0) { in blkio_open()
824 } else if (strcmp(blkio_driver, "virtio-blk-vhost-user") == 0) { in blkio_open()
826 } else if (strcmp(blkio_driver, "virtio-blk-vhost-vdpa") == 0) { in blkio_open()
832 blkio_destroy(&s->blkio); in blkio_open()
836 ret = blkio_get_bool(s->blkio, in blkio_open()
837 "needs-mem-regions", in blkio_open()
838 &s->needs_mem_regions); in blkio_open()
840 error_setg_errno(errp, -ret, in blkio_open()
841 "failed to get needs-mem-regions: %s", in blkio_open()
843 blkio_destroy(&s->blkio); in blkio_open()
847 ret = blkio_get_bool(s->blkio, in blkio_open()
848 "needs-mem-region-fd", in blkio_open()
849 &s->needs_mem_region_fd); in blkio_open()
851 error_setg_errno(errp, -ret, in blkio_open()
852 "failed to get needs-mem-region-fd: %s", in blkio_open()
854 blkio_destroy(&s->blkio); in blkio_open()
858 ret = blkio_get_uint64(s->blkio, in blkio_open()
859 "mem-region-alignment", in blkio_open()
860 &s->mem_region_alignment); in blkio_open()
862 error_setg_errno(errp, -ret, in blkio_open()
863 "failed to get mem-region-alignment: %s", in blkio_open()
865 blkio_destroy(&s->blkio); in blkio_open()
869 ret = blkio_get_bool(s->blkio, in blkio_open()
870 "may-pin-mem-regions", in blkio_open()
871 &s->may_pin_mem_regions); in blkio_open()
874 s->may_pin_mem_regions = s->needs_mem_regions; in blkio_open()
879 * virtio-mem from working. in blkio_open()
881 if (s->may_pin_mem_regions) { in blkio_open()
884 error_setg_errno(errp, -ret, "ram_block_discard_disable() failed"); in blkio_open()
885 blkio_destroy(&s->blkio); in blkio_open()
890 ret = blkio_start(s->blkio); in blkio_open()
892 error_setg_errno(errp, -ret, "blkio_start failed: %s", in blkio_open()
894 blkio_destroy(&s->blkio); in blkio_open()
895 if (s->may_pin_mem_regions) { in blkio_open()
901 bs->supported_write_flags = BDRV_REQ_FUA | BDRV_REQ_REGISTERED_BUF; in blkio_open()
902 bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; in blkio_open()
904 bs->supported_zero_flags |= BDRV_REQ_FUA; in blkio_open()
907 qemu_mutex_init(&s->blkio_lock); in blkio_open()
908 qemu_co_mutex_init(&s->bounce_lock); in blkio_open()
909 qemu_co_queue_init(&s->bounce_available); in blkio_open()
910 QLIST_INIT(&s->bounce_bufs); in blkio_open()
911 s->blkioq = blkio_get_queue(s->blkio, 0); in blkio_open()
912 s->completion_fd = blkioq_get_completion_fd(s->blkioq); in blkio_open()
913 blkioq_set_completion_fd_enabled(s->blkioq, true); in blkio_open()
921 BDRVBlkioState *s = bs->opaque; in blkio_close()
923 /* There is no destroy() API for s->bounce_lock */ in blkio_close()
925 qemu_mutex_destroy(&s->blkio_lock); in blkio_close()
927 blkio_destroy(&s->blkio); in blkio_close()
929 if (s->may_pin_mem_regions) { in blkio_close()
936 BDRVBlkioState *s = bs->opaque; in blkio_co_getlength()
940 WITH_QEMU_LOCK_GUARD(&s->blkio_lock) { in blkio_co_getlength()
941 ret = blkio_get_uint64(s->blkio, "capacity", &capacity); in blkio_co_getlength()
944 return -ret; in blkio_co_getlength()
959 return -ENOTSUP; in blkio_truncate()
966 return -EINVAL; in blkio_truncate()
969 return -ENOTSUP; in blkio_truncate()
983 BDRVBlkioState *s = bs->opaque; in blkio_refresh_limits()
984 QEMU_LOCK_GUARD(&s->blkio_lock); in blkio_refresh_limits()
988 ret = blkio_get_int(s->blkio, "request-alignment", &value); in blkio_refresh_limits()
990 error_setg_errno(errp, -ret, "failed to get \"request-alignment\": %s", in blkio_refresh_limits()
994 bs->bl.request_alignment = value; in blkio_refresh_limits()
995 if (bs->bl.request_alignment < 1 || in blkio_refresh_limits()
996 bs->bl.request_alignment >= INT_MAX || in blkio_refresh_limits()
997 !is_power_of_2(bs->bl.request_alignment)) { in blkio_refresh_limits()
998 error_setg(errp, "invalid \"request-alignment\" value %" PRIu32 ", " in blkio_refresh_limits()
1000 bs->bl.request_alignment); in blkio_refresh_limits()
1004 ret = blkio_get_int(s->blkio, "optimal-io-size", &value); in blkio_refresh_limits()
1006 error_setg_errno(errp, -ret, "failed to get \"optimal-io-size\": %s", in blkio_refresh_limits()
1010 bs->bl.opt_transfer = value; in blkio_refresh_limits()
1011 if (bs->bl.opt_transfer > INT_MAX || in blkio_refresh_limits()
1012 (bs->bl.opt_transfer % bs->bl.request_alignment)) { in blkio_refresh_limits()
1013 error_setg(errp, "invalid \"optimal-io-size\" value %" PRIu32 ", must " in blkio_refresh_limits()
1014 "be a multiple of %" PRIu32, bs->bl.opt_transfer, in blkio_refresh_limits()
1015 bs->bl.request_alignment); in blkio_refresh_limits()
1019 ret = blkio_get_int(s->blkio, "max-transfer", &value); in blkio_refresh_limits()
1021 error_setg_errno(errp, -ret, "failed to get \"max-transfer\": %s", in blkio_refresh_limits()
1025 bs->bl.max_transfer = value; in blkio_refresh_limits()
1026 if ((bs->bl.max_transfer % bs->bl.request_alignment) || in blkio_refresh_limits()
1027 (bs->bl.opt_transfer && (bs->bl.max_transfer % bs->bl.opt_transfer))) { in blkio_refresh_limits()
1028 error_setg(errp, "invalid \"max-transfer\" value %" PRIu32 ", must be " in blkio_refresh_limits()
1029 "a multiple of %" PRIu32 " and %" PRIu32 " (if non-zero)", in blkio_refresh_limits()
1030 bs->bl.max_transfer, bs->bl.request_alignment, in blkio_refresh_limits()
1031 bs->bl.opt_transfer); in blkio_refresh_limits()
1035 ret = blkio_get_int(s->blkio, "buf-alignment", &value); in blkio_refresh_limits()
1037 error_setg_errno(errp, -ret, "failed to get \"buf-alignment\": %s", in blkio_refresh_limits()
1042 error_setg(errp, "invalid \"buf-alignment\" value %d, must be " in blkio_refresh_limits()
1046 bs->bl.min_mem_alignment = value; in blkio_refresh_limits()
1048 ret = blkio_get_int(s->blkio, "optimal-buf-alignment", &value); in blkio_refresh_limits()
1050 error_setg_errno(errp, -ret, in blkio_refresh_limits()
1051 "failed to get \"optimal-buf-alignment\": %s", in blkio_refresh_limits()
1056 error_setg(errp, "invalid \"optimal-buf-alignment\" value %d, " in blkio_refresh_limits()
1060 bs->bl.opt_mem_alignment = value; in blkio_refresh_limits()
1062 ret = blkio_get_int(s->blkio, "max-segments", &value); in blkio_refresh_limits()
1064 error_setg_errno(errp, -ret, "failed to get \"max-segments\": %s", in blkio_refresh_limits()
1069 error_setg(errp, "invalid \"max-segments\" value %d, must be positive", in blkio_refresh_limits()
1073 bs->bl.max_iov = value; in blkio_refresh_limits()
1079 * - block_status
1080 * - co_invalidate_cache
1083 * - create
1084 * - truncate
1122 .format_name = "nvme-io_uring",
1123 .protocol_name = "nvme-io_uring",
1128 .format_name = "virtio-blk-vfio-pci",
1129 .protocol_name = "virtio-blk-vfio-pci",
1134 .format_name = "virtio-blk-vhost-user",
1135 .protocol_name = "virtio-blk-vhost-user",
1140 .format_name = "virtio-blk-vhost-vdpa",
1141 .protocol_name = "virtio-blk-vhost-vdpa",