Lines Matching +full:fiq +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2023-2024 DataDirect Networks.
17 "Enable userspace communication through io-uring");
39 pdu->ent = ring_ent; in uring_cmd_set_ring_ent()
47 return pdu->ent; in uring_cmd_to_ring_ent()
52 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
53 struct fuse_conn *fc = ring->fc; in fuse_uring_flush_bg()
55 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
56 lockdep_assert_held(&fc->bg_lock); in fuse_uring_flush_bg()
61 * eliminates the need for remote queue wake-ups when global in fuse_uring_flush_bg()
64 while ((fc->active_background < fc->max_background || in fuse_uring_flush_bg()
65 !queue->active_background) && in fuse_uring_flush_bg()
66 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
69 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
71 fc->active_background++; in fuse_uring_flush_bg()
72 queue->active_background++; in fuse_uring_flush_bg()
74 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg()
81 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end()
82 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end()
83 struct fuse_conn *fc = ring->fc; in fuse_uring_req_end()
85 lockdep_assert_not_held(&queue->lock); in fuse_uring_req_end()
86 spin_lock(&queue->lock); in fuse_uring_req_end()
87 ent->fuse_req = NULL; in fuse_uring_req_end()
88 if (test_bit(FR_BACKGROUND, &req->flags)) { in fuse_uring_req_end()
89 queue->active_background--; in fuse_uring_req_end()
90 spin_lock(&fc->bg_lock); in fuse_uring_req_end()
92 spin_unlock(&fc->bg_lock); in fuse_uring_req_end()
95 spin_unlock(&queue->lock); in fuse_uring_req_end()
98 req->out.h.error = error; in fuse_uring_req_end()
100 clear_bit(FR_SENT, &req->flags); in fuse_uring_req_end()
110 spin_lock(&queue->lock); in fuse_uring_abort_end_queue_requests()
111 list_for_each_entry(req, &queue->fuse_req_queue, list) in fuse_uring_abort_end_queue_requests()
112 clear_bit(FR_PENDING, &req->flags); in fuse_uring_abort_end_queue_requests()
113 list_splice_init(&queue->fuse_req_queue, &req_list); in fuse_uring_abort_end_queue_requests()
114 spin_unlock(&queue->lock); in fuse_uring_abort_end_queue_requests()
116 /* must not hold queue lock to avoid order issues with fi->lock */ in fuse_uring_abort_end_queue_requests()
124 struct fuse_conn *fc = ring->fc; in fuse_uring_abort_end_requests()
126 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_abort_end_requests()
127 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
131 queue->stopped = true; in fuse_uring_abort_end_requests()
133 WARN_ON_ONCE(ring->fc->max_background != UINT_MAX); in fuse_uring_abort_end_requests()
134 spin_lock(&queue->lock); in fuse_uring_abort_end_requests()
135 spin_lock(&fc->bg_lock); in fuse_uring_abort_end_requests()
137 spin_unlock(&fc->bg_lock); in fuse_uring_abort_end_requests()
138 spin_unlock(&queue->lock); in fuse_uring_abort_end_requests()
145 struct fuse_ring *ring = fc->ring; in fuse_uring_request_expired()
152 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_request_expired()
153 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_request_expired()
157 spin_lock(&queue->lock); in fuse_uring_request_expired()
158 if (fuse_request_expired(fc, &queue->fuse_req_queue) || in fuse_uring_request_expired()
159 fuse_request_expired(fc, &queue->fuse_req_bg_queue) || in fuse_uring_request_expired()
160 fuse_fpq_processing_expired(fc, queue->fpq.processing)) { in fuse_uring_request_expired()
161 spin_unlock(&queue->lock); in fuse_uring_request_expired()
164 spin_unlock(&queue->lock); in fuse_uring_request_expired()
172 struct fuse_ring *ring = fc->ring; in fuse_uring_destruct()
178 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_destruct()
179 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct()
185 WARN_ON(!list_empty(&queue->ent_avail_queue)); in fuse_uring_destruct()
186 WARN_ON(!list_empty(&queue->ent_w_req_queue)); in fuse_uring_destruct()
187 WARN_ON(!list_empty(&queue->ent_commit_queue)); in fuse_uring_destruct()
188 WARN_ON(!list_empty(&queue->ent_in_userspace)); in fuse_uring_destruct()
190 list_for_each_entry_safe(ent, next, &queue->ent_released, in fuse_uring_destruct()
192 list_del_init(&ent->list); in fuse_uring_destruct()
196 kfree(queue->fpq.processing); in fuse_uring_destruct()
198 ring->queues[qid] = NULL; in fuse_uring_destruct()
201 kfree(ring->queues); in fuse_uring_destruct()
203 fc->ring = NULL; in fuse_uring_destruct()
207 * Basic ring setup for this connection based on the provided configuration
216 ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT); in fuse_uring_create()
220 ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *), in fuse_uring_create()
222 if (!ring->queues) in fuse_uring_create()
225 max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write); in fuse_uring_create()
226 max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE); in fuse_uring_create()
228 spin_lock(&fc->lock); in fuse_uring_create()
229 if (fc->ring) { in fuse_uring_create()
231 spin_unlock(&fc->lock); in fuse_uring_create()
232 res = fc->ring; in fuse_uring_create()
236 init_waitqueue_head(&ring->stop_waitq); in fuse_uring_create()
238 ring->nr_queues = nr_queues; in fuse_uring_create()
239 ring->fc = fc; in fuse_uring_create()
240 ring->max_payload_sz = max_payload_size; in fuse_uring_create()
241 smp_store_release(&fc->ring, ring); in fuse_uring_create()
243 spin_unlock(&fc->lock); in fuse_uring_create()
247 kfree(ring->queues); in fuse_uring_create()
255 struct fuse_conn *fc = ring->fc; in fuse_uring_create_queue()
268 queue->qid = qid; in fuse_uring_create_queue()
269 queue->ring = ring; in fuse_uring_create_queue()
270 spin_lock_init(&queue->lock); in fuse_uring_create_queue()
272 INIT_LIST_HEAD(&queue->ent_avail_queue); in fuse_uring_create_queue()
273 INIT_LIST_HEAD(&queue->ent_commit_queue); in fuse_uring_create_queue()
274 INIT_LIST_HEAD(&queue->ent_w_req_queue); in fuse_uring_create_queue()
275 INIT_LIST_HEAD(&queue->ent_in_userspace); in fuse_uring_create_queue()
276 INIT_LIST_HEAD(&queue->fuse_req_queue); in fuse_uring_create_queue()
277 INIT_LIST_HEAD(&queue->fuse_req_bg_queue); in fuse_uring_create_queue()
278 INIT_LIST_HEAD(&queue->ent_released); in fuse_uring_create_queue()
280 queue->fpq.processing = pq; in fuse_uring_create_queue()
281 fuse_pqueue_init(&queue->fpq); in fuse_uring_create_queue()
283 spin_lock(&fc->lock); in fuse_uring_create_queue()
284 if (ring->queues[qid]) { in fuse_uring_create_queue()
285 spin_unlock(&fc->lock); in fuse_uring_create_queue()
286 kfree(queue->fpq.processing); in fuse_uring_create_queue()
288 return ring->queues[qid]; in fuse_uring_create_queue()
294 WRITE_ONCE(ring->queues[qid], queue); in fuse_uring_create_queue()
295 spin_unlock(&fc->lock); in fuse_uring_create_queue()
302 clear_bit(FR_SENT, &req->flags); in fuse_uring_stop_fuse_req_end()
303 req->out.h.error = -ECONNABORTED; in fuse_uring_stop_fuse_req_end()
315 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_entry_teardown()
317 spin_lock(&queue->lock); in fuse_uring_entry_teardown()
318 cmd = ent->cmd; in fuse_uring_entry_teardown()
319 ent->cmd = NULL; in fuse_uring_entry_teardown()
320 req = ent->fuse_req; in fuse_uring_entry_teardown()
321 ent->fuse_req = NULL; in fuse_uring_entry_teardown()
323 /* remove entry from queue->fpq->processing */ in fuse_uring_entry_teardown()
324 list_del_init(&req->list); in fuse_uring_entry_teardown()
329 * pointer access of entries through IO_URING_F_CANCEL - there is a risk in fuse_uring_entry_teardown()
333 list_move(&ent->list, &queue->ent_released); in fuse_uring_entry_teardown()
334 ent->state = FRRS_RELEASED; in fuse_uring_entry_teardown()
335 spin_unlock(&queue->lock); in fuse_uring_entry_teardown()
338 io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED); in fuse_uring_entry_teardown()
348 struct fuse_ring *ring = queue->ring; in fuse_uring_stop_list_entries()
353 spin_lock(&queue->lock); in fuse_uring_stop_list_entries()
355 if (ent->state != exp_state) { in fuse_uring_stop_list_entries()
357 queue->qid, ent->state, exp_state); in fuse_uring_stop_list_entries()
361 ent->state = FRRS_TEARDOWN; in fuse_uring_stop_list_entries()
362 list_move(&ent->list, &to_teardown); in fuse_uring_stop_list_entries()
364 spin_unlock(&queue->lock); in fuse_uring_stop_list_entries()
369 queue_refs = atomic_dec_return(&ring->queue_refs); in fuse_uring_stop_list_entries()
376 fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, in fuse_uring_teardown_entries()
378 fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, in fuse_uring_teardown_entries()
390 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_log_ent_state()
391 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_log_ent_state()
396 spin_lock(&queue->lock); in fuse_uring_log_ent_state()
401 list_for_each_entry(ent, &queue->ent_w_req_queue, list) { in fuse_uring_log_ent_state()
402 pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
403 ring, qid, ent, ent->state); in fuse_uring_log_ent_state()
405 list_for_each_entry(ent, &queue->ent_commit_queue, list) { in fuse_uring_log_ent_state()
406 pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", in fuse_uring_log_ent_state()
407 ring, qid, ent, ent->state); in fuse_uring_log_ent_state()
409 spin_unlock(&queue->lock); in fuse_uring_log_ent_state()
411 ring->stop_debug_log = 1; in fuse_uring_log_ent_state()
421 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_async_stop_queues()
422 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_async_stop_queues()
433 * or on the way to userspace - we could handle that with conditions in in fuse_uring_async_stop_queues()
437 if (atomic_read(&ring->queue_refs) > 0) { in fuse_uring_async_stop_queues()
439 ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT)) in fuse_uring_async_stop_queues()
442 schedule_delayed_work(&ring->async_teardown_work, in fuse_uring_async_stop_queues()
445 wake_up_all(&ring->stop_waitq); in fuse_uring_async_stop_queues()
456 for (qid = 0; qid < ring->nr_queues; qid++) { in fuse_uring_stop_queues()
457 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_stop_queues()
465 if (atomic_read(&ring->queue_refs) > 0) { in fuse_uring_stop_queues()
466 ring->teardown_time = jiffies; in fuse_uring_stop_queues()
467 INIT_DELAYED_WORK(&ring->async_teardown_work, in fuse_uring_stop_queues()
469 schedule_delayed_work(&ring->async_teardown_work, in fuse_uring_stop_queues()
472 wake_up_all(&ring->stop_waitq); in fuse_uring_stop_queues()
490 * direct access on ent - it must not be destructed as long as in fuse_uring_cancel()
493 queue = ent->queue; in fuse_uring_cancel()
494 spin_lock(&queue->lock); in fuse_uring_cancel()
495 if (ent->state == FRRS_AVAILABLE) { in fuse_uring_cancel()
496 ent->state = FRRS_USERSPACE; in fuse_uring_cancel()
497 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_cancel()
499 ent->cmd = NULL; in fuse_uring_cancel()
501 spin_unlock(&queue->lock); in fuse_uring_cancel()
505 io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags); in fuse_uring_cancel()
525 err = -EINVAL; in fuse_uring_out_header_has_err()
526 if (oh->unique == 0) { in fuse_uring_out_header_has_err()
527 /* Not supported through io-uring yet */ in fuse_uring_out_header_has_err()
528 pr_warn_once("notify through fuse-io-uring not supported\n"); in fuse_uring_out_header_has_err()
532 if (oh->error <= -ERESTARTSYS || oh->error > 0) in fuse_uring_out_header_has_err()
535 if (oh->error) { in fuse_uring_out_header_has_err()
536 err = oh->error; in fuse_uring_out_header_has_err()
540 err = -ENOENT; in fuse_uring_out_header_has_err()
541 if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) { in fuse_uring_out_header_has_err()
543 req->in.h.unique, in fuse_uring_out_header_has_err()
544 oh->unique & ~FUSE_INT_REQ_BIT); in fuse_uring_out_header_has_err()
550 * XXX: Not supported through fuse-io-uring yet, it should not even in fuse_uring_out_header_has_err()
551 * find the request - should not happen. in fuse_uring_out_header_has_err()
553 WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT); in fuse_uring_out_header_has_err()
565 struct fuse_args *args = req->args; in fuse_uring_copy_from_ring()
570 err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out, in fuse_uring_copy_from_ring()
573 return -EFAULT; in fuse_uring_copy_from_ring()
575 err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz, in fuse_uring_copy_from_ring()
594 struct fuse_args *args = req->args; in fuse_uring_args_to_ring()
595 struct fuse_in_arg *in_args = args->in_args; in fuse_uring_args_to_ring()
596 int num_args = args->in_numargs; in fuse_uring_args_to_ring()
601 .commit_id = req->in.h.unique, in fuse_uring_args_to_ring()
604 err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter); in fuse_uring_args_to_ring()
619 if (args->in_args[0].size > 0) { in fuse_uring_args_to_ring()
620 err = copy_to_user(&ent->headers->op_in, in_args->value, in fuse_uring_args_to_ring()
621 in_args->size); in fuse_uring_args_to_ring()
625 return -EFAULT; in fuse_uring_args_to_ring()
629 num_args--; in fuse_uring_args_to_ring()
633 err = fuse_copy_args(&cs, num_args, args->in_pages, in fuse_uring_args_to_ring()
641 err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out, in fuse_uring_args_to_ring()
643 return err ? -EFAULT : 0; in fuse_uring_args_to_ring()
649 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_copy_to_ring()
650 struct fuse_ring *ring = queue->ring; in fuse_uring_copy_to_ring()
653 err = -EIO; in fuse_uring_copy_to_ring()
654 if (WARN_ON(ent->state != FRRS_FUSE_REQ)) { in fuse_uring_copy_to_ring()
655 pr_err("qid=%d ring-req=%p invalid state %d on send\n", in fuse_uring_copy_to_ring()
656 queue->qid, ent, ent->state); in fuse_uring_copy_to_ring()
660 err = -EINVAL; in fuse_uring_copy_to_ring()
661 if (WARN_ON(req->in.h.unique == 0)) in fuse_uring_copy_to_ring()
672 err = copy_to_user(&ent->headers->in_out, &req->in.h, in fuse_uring_copy_to_ring()
673 sizeof(req->in.h)); in fuse_uring_copy_to_ring()
675 err = -EFAULT; in fuse_uring_copy_to_ring()
689 set_bit(FR_SENT, &req->flags); in fuse_uring_prepare_send()
705 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_next_to_ring()
713 spin_lock(&queue->lock); in fuse_uring_send_next_to_ring()
714 cmd = ent->cmd; in fuse_uring_send_next_to_ring()
715 ent->cmd = NULL; in fuse_uring_send_next_to_ring()
716 ent->state = FRRS_USERSPACE; in fuse_uring_send_next_to_ring()
717 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_send_next_to_ring()
718 spin_unlock(&queue->lock); in fuse_uring_send_next_to_ring()
730 WARN_ON_ONCE(!ent->cmd); in fuse_uring_ent_avail()
731 list_move(&ent->list, &queue->ent_avail_queue); in fuse_uring_ent_avail()
732 ent->state = FRRS_AVAILABLE; in fuse_uring_ent_avail()
739 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_to_pq()
740 struct fuse_pqueue *fpq = &queue->fpq; in fuse_uring_add_to_pq()
743 req->ring_entry = ent; in fuse_uring_add_to_pq()
744 hash = fuse_req_hash(req->in.h.unique); in fuse_uring_add_to_pq()
745 list_move_tail(&req->list, &fpq->processing[hash]); in fuse_uring_add_to_pq()
754 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_req_to_ring_ent()
756 lockdep_assert_held(&queue->lock); in fuse_uring_add_req_to_ring_ent()
758 if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE && in fuse_uring_add_req_to_ring_ent()
759 ent->state != FRRS_COMMIT)) { in fuse_uring_add_req_to_ring_ent()
760 pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, in fuse_uring_add_req_to_ring_ent()
761 ent->state); in fuse_uring_add_req_to_ring_ent()
764 clear_bit(FR_PENDING, &req->flags); in fuse_uring_add_req_to_ring_ent()
765 ent->fuse_req = req; in fuse_uring_add_req_to_ring_ent()
766 ent->state = FRRS_FUSE_REQ; in fuse_uring_add_req_to_ring_ent()
767 list_move(&ent->list, &queue->ent_w_req_queue); in fuse_uring_add_req_to_ring_ent()
773 __must_hold(&queue->lock) in fuse_uring_ent_assign_req()
776 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_ent_assign_req()
777 struct list_head *req_queue = &queue->fuse_req_queue; in fuse_uring_ent_assign_req()
779 lockdep_assert_held(&queue->lock); in fuse_uring_ent_assign_req()
797 struct fuse_ring *ring = ent->queue->ring; in fuse_uring_commit()
798 struct fuse_conn *fc = ring->fc; in fuse_uring_commit()
801 err = copy_from_user(&req->out.h, &ent->headers->in_out, in fuse_uring_commit()
802 sizeof(req->out.h)); in fuse_uring_commit()
804 req->out.h.error = -EFAULT; in fuse_uring_commit()
808 err = fuse_uring_out_header_has_err(&req->out.h, req, fc); in fuse_uring_commit()
810 /* req->out.h.error already set */ in fuse_uring_commit()
830 spin_lock(&queue->lock); in fuse_uring_next_fuse_req()
833 spin_unlock(&queue->lock); in fuse_uring_next_fuse_req()
844 struct fuse_ring_queue *queue = ent->queue; in fuse_ring_ent_set_commit()
846 lockdep_assert_held(&queue->lock); in fuse_ring_ent_set_commit()
848 if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE)) in fuse_ring_ent_set_commit()
849 return -EIO; in fuse_ring_ent_set_commit()
851 ent->state = FRRS_COMMIT; in fuse_ring_ent_set_commit()
852 list_move(&ent->list, &queue->ent_commit_queue); in fuse_ring_ent_set_commit()
861 const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); in fuse_uring_commit_fetch()
864 struct fuse_ring *ring = fc->ring; in fuse_uring_commit_fetch()
866 uint64_t commit_id = READ_ONCE(cmd_req->commit_id); in fuse_uring_commit_fetch()
867 unsigned int qid = READ_ONCE(cmd_req->qid); in fuse_uring_commit_fetch()
871 err = -ENOTCONN; in fuse_uring_commit_fetch()
875 if (qid >= ring->nr_queues) in fuse_uring_commit_fetch()
876 return -EINVAL; in fuse_uring_commit_fetch()
878 queue = ring->queues[qid]; in fuse_uring_commit_fetch()
881 fpq = &queue->fpq; in fuse_uring_commit_fetch()
883 if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) in fuse_uring_commit_fetch()
886 spin_lock(&queue->lock); in fuse_uring_commit_fetch()
887 /* Find a request based on the unique ID of the fuse request in fuse_uring_commit_fetch()
893 err = -ENOENT; in fuse_uring_commit_fetch()
895 pr_info("qid=%d commit_id %llu not found\n", queue->qid, in fuse_uring_commit_fetch()
897 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
900 list_del_init(&req->list); in fuse_uring_commit_fetch()
901 ent = req->ring_entry; in fuse_uring_commit_fetch()
902 req->ring_entry = NULL; in fuse_uring_commit_fetch()
907 queue->qid, commit_id, ent->state); in fuse_uring_commit_fetch()
908 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
909 req->out.h.error = err; in fuse_uring_commit_fetch()
910 clear_bit(FR_SENT, &req->flags); in fuse_uring_commit_fetch()
915 ent->cmd = cmd; in fuse_uring_commit_fetch()
916 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
924 * fuse requests would otherwise not get processed - committing in fuse_uring_commit_fetch()
938 for (qid = 0; qid < ring->nr_queues && ready; qid++) { in is_ring_ready()
942 queue = ring->queues[qid]; in is_ring_ready()
948 spin_lock(&queue->lock); in is_ring_ready()
949 if (list_empty(&queue->ent_avail_queue)) in is_ring_ready()
951 spin_unlock(&queue->lock); in is_ring_ready()
964 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_do_register()
965 struct fuse_ring *ring = queue->ring; in fuse_uring_do_register()
966 struct fuse_conn *fc = ring->fc; in fuse_uring_do_register()
967 struct fuse_iqueue *fiq = &fc->iq; in fuse_uring_do_register() local
971 spin_lock(&queue->lock); in fuse_uring_do_register()
972 ent->cmd = cmd; in fuse_uring_do_register()
974 spin_unlock(&queue->lock); in fuse_uring_do_register()
976 if (!ring->ready) { in fuse_uring_do_register()
977 bool ready = is_ring_ready(ring, queue->qid); in fuse_uring_do_register()
980 WRITE_ONCE(fiq->ops, &fuse_io_uring_ops); in fuse_uring_do_register()
981 WRITE_ONCE(ring->ready, true); in fuse_uring_do_register()
982 wake_up_all(&fc->blocked_waitq); in fuse_uring_do_register()
988 * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
994 struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr)); in fuse_uring_get_iovec_from_sqe()
998 if (sqe->len != FUSE_URING_IOV_SEGS) in fuse_uring_get_iovec_from_sqe()
999 return -EINVAL; in fuse_uring_get_iovec_from_sqe()
1017 struct fuse_ring *ring = queue->ring; in fuse_uring_create_ring_ent()
1023 err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov); in fuse_uring_create_ring_ent()
1030 err = -EINVAL; in fuse_uring_create_ring_ent()
1037 if (payload_size < ring->max_payload_sz) { in fuse_uring_create_ring_ent()
1043 err = -ENOMEM; in fuse_uring_create_ring_ent()
1048 INIT_LIST_HEAD(&ent->list); in fuse_uring_create_ring_ent()
1050 ent->queue = queue; in fuse_uring_create_ring_ent()
1051 ent->headers = iov[0].iov_base; in fuse_uring_create_ring_ent()
1052 ent->payload = iov[1].iov_base; in fuse_uring_create_ring_ent()
1054 atomic_inc(&ring->queue_refs); in fuse_uring_create_ring_ent()
1065 const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); in fuse_uring_register()
1066 struct fuse_ring *ring = smp_load_acquire(&fc->ring); in fuse_uring_register()
1070 unsigned int qid = READ_ONCE(cmd_req->qid); in fuse_uring_register()
1072 err = -ENOMEM; in fuse_uring_register()
1079 if (qid >= ring->nr_queues) { in fuse_uring_register()
1081 return -EINVAL; in fuse_uring_register()
1084 queue = ring->queues[qid]; in fuse_uring_register()
1113 u32 cmd_op = cmd->cmd_op; in fuse_uring_cmd()
1123 return -EINVAL; in fuse_uring_cmd()
1125 fud = fuse_get_dev(cmd->file); in fuse_uring_cmd()
1128 return -ENOTCONN; in fuse_uring_cmd()
1130 fc = fud->fc; in fuse_uring_cmd()
1132 /* Once a connection has io-uring enabled on it, it can't be disabled */ in fuse_uring_cmd()
1133 if (!enable_uring && !fc->io_uring) { in fuse_uring_cmd()
1134 pr_info_ratelimited("fuse-io-uring is disabled\n"); in fuse_uring_cmd()
1135 return -EOPNOTSUPP; in fuse_uring_cmd()
1138 if (fc->aborted) in fuse_uring_cmd()
1139 return -ECONNABORTED; in fuse_uring_cmd()
1140 if (!fc->connected) in fuse_uring_cmd()
1141 return -ENOTCONN; in fuse_uring_cmd()
1147 if (!fc->initialized) in fuse_uring_cmd()
1148 return -EAGAIN; in fuse_uring_cmd()
1156 fc->io_uring = 0; in fuse_uring_cmd()
1157 wake_up_all(&fc->blocked_waitq); in fuse_uring_cmd()
1170 return -EINVAL; in fuse_uring_cmd()
1173 return -EIOCBQUEUED; in fuse_uring_cmd()
1179 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send()
1181 spin_lock(&queue->lock); in fuse_uring_send()
1182 ent->state = FRRS_USERSPACE; in fuse_uring_send()
1183 list_move(&ent->list, &queue->ent_in_userspace); in fuse_uring_send()
1184 ent->cmd = NULL; in fuse_uring_send()
1185 spin_unlock(&queue->lock); in fuse_uring_send()
1191 * This prepares and sends the ring request in fuse-uring task context.
1192 * User buffers are not mapped yet - the application does not have permission
1193 * to write to it - this has to be executed in ring task context.
1199 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_in_task()
1203 err = fuse_uring_prepare_send(ent, ent->fuse_req); in fuse_uring_send_in_task()
1209 err = -ECANCELED; in fuse_uring_send_in_task()
1222 if (WARN_ONCE(qid >= ring->nr_queues, in fuse_uring_task_to_queue()
1224 ring->nr_queues)) in fuse_uring_task_to_queue()
1227 queue = ring->queues[qid]; in fuse_uring_task_to_queue()
1235 struct io_uring_cmd *cmd = ent->cmd; in fuse_uring_dispatch_ent()
1242 void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req) in fuse_uring_queue_fuse_req() argument
1244 struct fuse_conn *fc = req->fm->fc; in fuse_uring_queue_fuse_req()
1245 struct fuse_ring *ring = fc->ring; in fuse_uring_queue_fuse_req()
1250 err = -EINVAL; in fuse_uring_queue_fuse_req()
1255 if (req->in.h.opcode != FUSE_NOTIFY_REPLY) in fuse_uring_queue_fuse_req()
1256 req->in.h.unique = fuse_get_unique(fiq); in fuse_uring_queue_fuse_req()
1258 spin_lock(&queue->lock); in fuse_uring_queue_fuse_req()
1259 err = -ENOTCONN; in fuse_uring_queue_fuse_req()
1260 if (unlikely(queue->stopped)) in fuse_uring_queue_fuse_req()
1263 set_bit(FR_URING, &req->flags); in fuse_uring_queue_fuse_req()
1264 req->ring_queue = queue; in fuse_uring_queue_fuse_req()
1265 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_fuse_req()
1270 list_add_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_queue_fuse_req()
1271 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1279 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1281 req->out.h.error = err; in fuse_uring_queue_fuse_req()
1282 clear_bit(FR_PENDING, &req->flags); in fuse_uring_queue_fuse_req()
1288 struct fuse_conn *fc = req->fm->fc; in fuse_uring_queue_bq_req()
1289 struct fuse_ring *ring = fc->ring; in fuse_uring_queue_bq_req()
1297 spin_lock(&queue->lock); in fuse_uring_queue_bq_req()
1298 if (unlikely(queue->stopped)) { in fuse_uring_queue_bq_req()
1299 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1303 set_bit(FR_URING, &req->flags); in fuse_uring_queue_bq_req()
1304 req->ring_queue = queue; in fuse_uring_queue_bq_req()
1305 list_add_tail(&req->list, &queue->fuse_req_bg_queue); in fuse_uring_queue_bq_req()
1307 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_bq_req()
1309 spin_lock(&fc->bg_lock); in fuse_uring_queue_bq_req()
1310 fc->num_background++; in fuse_uring_queue_bq_req()
1311 if (fc->num_background == fc->max_background) in fuse_uring_queue_bq_req()
1312 fc->blocked = 1; in fuse_uring_queue_bq_req()
1314 spin_unlock(&fc->bg_lock); in fuse_uring_queue_bq_req()
1321 req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, in fuse_uring_queue_bq_req()
1325 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1329 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1337 struct fuse_ring_queue *queue = req->ring_queue; in fuse_uring_remove_pending_req()
1339 return fuse_remove_pending_req(req, &queue->lock); in fuse_uring_remove_pending_req()
1343 /* should be send over io-uring as enhancement */
1347 * could be send over io-uring, but interrupts should be rare,