Lines Matching defs:req

41 	struct fuse_req *req;
43 req = list_first_entry_or_null(list, struct fuse_req, list);
44 if (!req)
46 return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
130 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
132 INIT_LIST_HEAD(&req->list);
133 INIT_LIST_HEAD(&req->intr_entry);
134 init_waitqueue_head(&req->waitq);
135 refcount_set(&req->count, 1);
136 __set_bit(FR_PENDING, &req->flags);
137 req->fm = fm;
138 req->create_time = jiffies;
143 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
144 if (req)
145 fuse_request_init(fm, req);
147 return req;
150 static void fuse_request_free(struct fuse_req *req)
152 kmem_cache_free(fuse_req_cachep, req);
155 static void __fuse_get_request(struct fuse_req *req)
157 refcount_inc(&req->count);
161 static void __fuse_put_request(struct fuse_req *req)
163 refcount_dec(&req->count);
193 static void fuse_put_request(struct fuse_req *req);
200 struct fuse_req *req;
225 req = fuse_request_alloc(fm, GFP_KERNEL);
227 if (!req) {
233 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
235 __set_bit(FR_WAITING, &req->flags);
237 __set_bit(FR_BACKGROUND, &req->flags);
246 * req->in.h.{u,g}id will be equal to FUSE_INVALID_UIDGID.
250 req->in.h.uid = from_kuid(fc->user_ns, fsuid);
251 req->in.h.gid = from_kgid(fc->user_ns, fsgid);
253 if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) ||
254 req->in.h.gid == ((gid_t)-1))) {
255 fuse_put_request(req);
259 return req;
266 static void fuse_put_request(struct fuse_req *req)
268 struct fuse_conn *fc = req->fm->fc;
270 if (refcount_dec_and_test(&req->count)) {
271 if (test_bit(FR_BACKGROUND, &req->flags)) {
282 if (test_bit(FR_WAITING, &req->flags)) {
283 __clear_bit(FR_WAITING, &req->flags);
287 fuse_request_free(req);
351 void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
354 if (list_empty(&req->intr_entry)) {
355 list_add_tail(&req->intr_entry, &fiq->interrupts);
361 if (test_bit(FR_FINISHED, &req->flags)) {
362 list_del_init(&req->intr_entry);
372 static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
376 if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
377 req->in.h.unique = fuse_get_unique_locked(fiq);
378 list_add_tail(&req->list, &fiq->pending);
382 req->out.h.error = -ENOTCONN;
383 clear_bit(FR_PENDING, &req->flags);
384 fuse_request_end(req);
395 static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
397 req->in.h.len = sizeof(struct fuse_in_header) +
398 fuse_len_args(req->args->in_numargs,
399 (struct fuse_arg *) req->args->in_args);
400 trace_fuse_request_send(req);
401 fiq->ops->send_req(fiq, req);
421 struct fuse_req *req;
423 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
424 list_del(&req->list);
426 fuse_send_one(fiq, req);
438 void fuse_request_end(struct fuse_req *req)
440 struct fuse_mount *fm = req->fm;
444 if (test_and_set_bit(FR_FINISHED, &req->flags))
447 trace_fuse_request_end(req);
453 if (test_bit(FR_INTERRUPTED, &req->flags)) {
455 list_del_init(&req->intr_entry);
458 WARN_ON(test_bit(FR_PENDING, &req->flags));
459 WARN_ON(test_bit(FR_SENT, &req->flags));
460 if (test_bit(FR_BACKGROUND, &req->flags)) {
462 clear_bit(FR_BACKGROUND, &req->flags);
483 wake_up(&req->waitq);
486 if (test_bit(FR_ASYNC, &req->flags))
487 req->args->end(fm, req->args, req->out.h.error);
489 fuse_put_request(req);
493 static int queue_interrupt(struct fuse_req *req)
495 struct fuse_iqueue *fiq = &req->fm->fc->iq;
497 /* Check for we've sent request to interrupt this req */
498 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
501 fiq->ops->send_interrupt(fiq, req);
506 bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
509 if (test_bit(FR_PENDING, &req->flags)) {
514 list_del(&req->list);
516 __fuse_put_request(req);
517 req->out.h.error = -EINTR;
524 static void request_wait_answer(struct fuse_req *req)
526 struct fuse_conn *fc = req->fm->fc;
532 err = wait_event_interruptible(req->waitq,
533 test_bit(FR_FINISHED, &req->flags));
537 set_bit(FR_INTERRUPTED, &req->flags);
540 if (test_bit(FR_SENT, &req->flags))
541 queue_interrupt(req);
544 if (!test_bit(FR_FORCE, &req->flags)) {
548 err = wait_event_killable(req->waitq,
549 test_bit(FR_FINISHED, &req->flags));
553 if (test_bit(FR_URING, &req->flags))
554 removed = fuse_uring_remove_pending_req(req);
556 removed = fuse_remove_pending_req(req, &fiq->lock);
565 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
568 static void __fuse_request_send(struct fuse_req *req)
570 struct fuse_iqueue *fiq = &req->fm->fc->iq;
572 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
576 __fuse_get_request(req);
577 fuse_send_one(fiq, req);
579 request_wait_answer(req);
617 static void fuse_force_creds(struct fuse_req *req)
619 struct fuse_conn *fc = req->fm->fc;
621 if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) {
622 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
623 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
625 req->in.h.uid = FUSE_INVALID_UIDGID;
626 req->in.h.gid = FUSE_INVALID_UIDGID;
629 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
632 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
634 req->in.h.opcode = args->opcode;
635 req->in.h.nodeid = args->nodeid;
636 req->args = args;
638 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
640 __set_bit(FR_ASYNC, &req->flags);
648 struct fuse_req *req;
653 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
656 fuse_force_creds(req);
658 __set_bit(FR_WAITING, &req->flags);
659 __set_bit(FR_FORCE, &req->flags);
662 req = fuse_get_req(idmap, fm, false);
663 if (IS_ERR(req))
664 return PTR_ERR(req);
669 fuse_args_to_req(req, args);
672 __set_bit(FR_ISREPLY, &req->flags);
673 __fuse_request_send(req);
674 ret = req->out.h.error;
679 fuse_put_request(req);
686 struct fuse_req *req)
690 req->in.h.unique = fuse_get_unique(fiq);
691 req->in.h.len = sizeof(struct fuse_in_header) +
692 fuse_len_args(req->args->in_numargs,
693 (struct fuse_arg *) req->args->in_args);
695 return fuse_uring_queue_bq_req(req);
702 static int fuse_request_queue_background(struct fuse_req *req)
704 struct fuse_mount *fm = req->fm;
708 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
709 if (!test_bit(FR_WAITING, &req->flags)) {
710 __set_bit(FR_WAITING, &req->flags);
713 __set_bit(FR_ISREPLY, &req->flags);
717 return fuse_request_queue_background_uring(fc, req);
725 list_add_tail(&req->list, &fc->bg_queue);
737 struct fuse_req *req;
741 req = fuse_request_alloc(fm, gfp_flags);
742 if (!req)
744 __set_bit(FR_BACKGROUND, &req->flags);
747 req = fuse_get_req(&invalid_mnt_idmap, fm, true);
748 if (IS_ERR(req))
749 return PTR_ERR(req);
752 fuse_args_to_req(req, args);
754 if (!fuse_request_queue_background(req)) {
755 fuse_put_request(req);
766 struct fuse_req *req;
769 req = fuse_get_req(&invalid_mnt_idmap, fm, false);
770 if (IS_ERR(req))
771 return PTR_ERR(req);
773 __clear_bit(FR_ISREPLY, &req->flags);
774 req->in.h.unique = unique;
776 fuse_args_to_req(req, args);
778 fuse_send_one(fiq, req);
788 static int lock_request(struct fuse_req *req)
791 if (req) {
792 spin_lock(&req->waitq.lock);
793 if (test_bit(FR_ABORTED, &req->flags))
796 set_bit(FR_LOCKED, &req->flags);
797 spin_unlock(&req->waitq.lock);
806 static int unlock_request(struct fuse_req *req)
809 if (req) {
810 spin_lock(&req->waitq.lock);
811 if (test_bit(FR_ABORTED, &req->flags))
814 clear_bit(FR_LOCKED, &req->flags);
815 spin_unlock(&req->waitq.lock);
856 err = unlock_request(cs->req);
906 return lock_request(cs->req);
967 err = unlock_request(cs->req);
1025 spin_lock(&cs->req->waitq.lock);
1026 if (test_bit(FR_ABORTED, &cs->req->flags))
1030 spin_unlock(&cs->req->waitq.lock);
1055 err = lock_request(cs->req);
1072 err = unlock_request(cs->req);
1115 if (cs->req->args->user_pages) {
1160 struct fuse_req *req = cs->req;
1161 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1230 size_t nbytes, struct fuse_req *req)
1238 list_del_init(&req->intr_entry);
1243 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1244 arg.unique = req->in.h.unique;
1389 struct fuse_req *req;
1433 req = list_entry(fiq->interrupts.next, struct fuse_req,
1435 return fuse_read_interrupt(fiq, cs, nbytes, req);
1446 req = list_entry(fiq->pending.next, struct fuse_req, list);
1447 clear_bit(FR_PENDING, &req->flags);
1448 list_del_init(&req->list);
1451 args = req->args;
1452 reqsize = req->in.h.len;
1456 req->out.h.error = -EIO;
1459 req->out.h.error = -E2BIG;
1460 fuse_request_end(req);
1469 req->out.h.error = err = -ECONNABORTED;
1473 list_add(&req->list, &fpq->io);
1475 cs->req = req;
1476 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1482 clear_bit(FR_LOCKED, &req->flags);
1488 req->out.h.error = -EIO;
1491 if (!test_bit(FR_ISREPLY, &req->flags)) {
1495 hash = fuse_req_hash(req->in.h.unique);
1496 list_move_tail(&req->list, &fpq->processing[hash]);
1497 __fuse_get_request(req);
1498 set_bit(FR_SENT, &req->flags);
1502 if (test_bit(FR_INTERRUPTED, &req->flags))
1503 queue_interrupt(req);
1504 fuse_put_request(req);
1509 if (!test_bit(FR_PRIVATE, &req->flags))
1510 list_del_init(&req->list);
1512 fuse_request_end(req);
1987 struct fuse_req *req, *next;
2008 list_for_each_entry_safe(req, next, &to_queue, list) {
2009 set_bit(FR_PENDING, &req->flags);
2010 clear_bit(FR_SENT, &req->flags);
2012 req->in.h.unique |= FUSE_UNIQUE_RESEND;
2018 list_for_each_entry(req, &to_queue, list)
2019 clear_bit(FR_PENDING, &req->flags);
2088 struct fuse_req *req;
2090 list_for_each_entry(req, &fpq->processing[hash], list) {
2091 if (req->in.h.unique == unique)
2092 return req;
2138 struct fuse_req *req;
2167 req = NULL;
2169 req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
2172 if (!req) {
2179 __fuse_get_request(req);
2188 err = queue_interrupt(req);
2190 fuse_put_request(req);
2195 clear_bit(FR_SENT, &req->flags);
2196 list_move(&req->list, &fpq->io);
2197 req->out.h = oh;
2198 set_bit(FR_LOCKED, &req->flags);
2200 cs->req = req;
2201 if (!req->args->page_replace)
2207 err = fuse_copy_out_args(cs, req->args, nbytes);
2211 clear_bit(FR_LOCKED, &req->flags);
2215 req->out.h.error = -EIO;
2216 if (!test_bit(FR_PRIVATE, &req->flags))
2217 list_del_init(&req->list);
2220 fuse_request_end(req);
2364 struct fuse_req *req;
2365 req = list_entry(head->next, struct fuse_req, list);
2366 req->out.h.error = -ECONNABORTED;
2367 clear_bit(FR_SENT, &req->flags);
2368 list_del_init(&req->list);
2369 fuse_request_end(req);
2413 struct fuse_req *req, *next;
2431 list_for_each_entry_safe(req, next, &fpq->io, list) {
2432 req->out.h.error = -ECONNABORTED;
2433 spin_lock(&req->waitq.lock);
2434 set_bit(FR_ABORTED, &req->flags);
2435 if (!test_bit(FR_LOCKED, &req->flags)) {
2436 set_bit(FR_PRIVATE, &req->flags);
2437 __fuse_get_request(req);
2438 list_move(&req->list, &to_end);
2440 spin_unlock(&req->waitq.lock);
2455 list_for_each_entry(req, &fiq->pending, list)
2456 clear_bit(FR_PENDING, &req->flags);