Lines Matching full:work
34 IO_WORKER_F_BOUND = 16, /* is doing bounded work */
39 IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
202 struct io_wq_work *work) in io_work_get_acct() argument
204 if (work->flags & IO_WQ_WORK_UNBOUND) in io_work_get_acct()
299 * Most likely an attempt to queue unbounded work on an io_wq that in io_wqe_wake_worker()
342 * Worker will start processing some work. Move it to the busy list, if
346 struct io_wq_work *work) in __io_worker_busy() argument
361 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0; in __io_worker_busy()
380 * No work, worker going to sleep. Move to freelist, and unuse mm if we
397 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
399 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
406 struct io_wq_work *work, *tail; in io_get_next_work() local
410 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
413 if (!io_wq_is_hashed(work)) { in io_get_next_work()
415 return work; in io_get_next_work()
419 hash = io_get_work_hash(work); in io_get_next_work()
422 /* all items with this hash lie in [work, tail] */ in io_get_next_work()
426 return work; in io_get_next_work()
433 static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work) in io_wq_switch_mm() argument
441 if (mmget_not_zero(work->identity->mm)) { in io_wq_switch_mm()
442 kthread_use_mm(work->identity->mm); in io_wq_switch_mm()
443 worker->mm = work->identity->mm; in io_wq_switch_mm()
447 /* failed grabbing mm, ensure work gets cancelled */ in io_wq_switch_mm()
448 work->flags |= IO_WQ_WORK_CANCEL; in io_wq_switch_mm()
452 struct io_wq_work *work) in io_wq_switch_blkcg() argument
455 if (!(work->flags & IO_WQ_WORK_BLKCG)) in io_wq_switch_blkcg()
457 if (work->identity->blkcg_css != worker->blkcg_css) { in io_wq_switch_blkcg()
458 kthread_associate_blkcg(work->identity->blkcg_css); in io_wq_switch_blkcg()
459 worker->blkcg_css = work->identity->blkcg_css; in io_wq_switch_blkcg()
465 struct io_wq_work *work) in io_wq_switch_creds() argument
467 const struct cred *old_creds = override_creds(work->identity->creds); in io_wq_switch_creds()
469 worker->cur_creds = work->identity->creds; in io_wq_switch_creds()
477 struct io_wq_work *work) in io_impersonate_work() argument
479 if ((work->flags & IO_WQ_WORK_FILES) && in io_impersonate_work()
480 current->files != work->identity->files) { in io_impersonate_work()
482 current->files = work->identity->files; in io_impersonate_work()
483 current->nsproxy = work->identity->nsproxy; in io_impersonate_work()
485 if (!work->identity->files) { in io_impersonate_work()
486 /* failed grabbing files, ensure work gets cancelled */ in io_impersonate_work()
487 work->flags |= IO_WQ_WORK_CANCEL; in io_impersonate_work()
490 if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs) in io_impersonate_work()
491 current->fs = work->identity->fs; in io_impersonate_work()
492 if ((work->flags & IO_WQ_WORK_MM) && work->identity->mm != worker->mm) in io_impersonate_work()
493 io_wq_switch_mm(worker, work); in io_impersonate_work()
494 if ((work->flags & IO_WQ_WORK_CREDS) && in io_impersonate_work()
495 worker->cur_creds != work->identity->creds) in io_impersonate_work()
496 io_wq_switch_creds(worker, work); in io_impersonate_work()
497 if (work->flags & IO_WQ_WORK_FSIZE) in io_impersonate_work()
498 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->identity->fsize; in io_impersonate_work()
501 io_wq_switch_blkcg(worker, work); in io_impersonate_work()
503 current->loginuid = work->identity->loginuid; in io_impersonate_work()
504 current->sessionid = work->identity->sessionid; in io_impersonate_work()
509 struct io_wq_work *work) in io_assign_current_work() argument
511 if (work) { in io_assign_current_work()
512 /* flush pending signals before assigning new work */ in io_assign_current_work()
524 worker->cur_work = work; in io_assign_current_work()
528 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
537 struct io_wq_work *work; in io_worker_handle_work() local
540 * If we got some work, mark us as busy. If we didn't, but in io_worker_handle_work()
541 * the list isn't empty, it means we stalled on hashed work. in io_worker_handle_work()
542 * Mark us stalled so we don't keep looking for work when we in io_worker_handle_work()
543 * can't make progress, any work completion or insertion will in io_worker_handle_work()
546 work = io_get_next_work(wqe); in io_worker_handle_work()
547 if (work) in io_worker_handle_work()
548 __io_worker_busy(wqe, worker, work); in io_worker_handle_work()
553 if (!work) in io_worker_handle_work()
555 io_assign_current_work(worker, work); in io_worker_handle_work()
560 unsigned int hash = io_get_work_hash(work); in io_worker_handle_work()
562 next_hashed = wq_next_work(work); in io_worker_handle_work()
563 io_impersonate_work(worker, work); in io_worker_handle_work()
566 * work, the worker function will do the right thing. in io_worker_handle_work()
569 work->flags |= IO_WQ_WORK_CANCEL; in io_worker_handle_work()
571 old_work = work; in io_worker_handle_work()
572 linked = wq->do_work(work); in io_worker_handle_work()
574 work = next_hashed; in io_worker_handle_work()
575 if (!work && linked && !io_wq_is_hashed(linked)) { in io_worker_handle_work()
576 work = linked; in io_worker_handle_work()
579 io_assign_current_work(worker, work); in io_worker_handle_work()
590 if (!work) in io_worker_handle_work()
594 } while (work); in io_worker_handle_work()
663 * running and we have work pending, wake up a free one or have the manager
729 /* if we have available workers or no work, no need */ in io_wqe_need_worker()
839 struct io_wq_work *work) in io_wq_can_queue() argument
843 if (!(work->flags & IO_WQ_WORK_UNBOUND)) in io_wq_can_queue()
861 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) in io_run_cancel() argument
866 struct io_wq_work *old_work = work; in io_run_cancel()
868 work->flags |= IO_WQ_WORK_CANCEL; in io_run_cancel()
869 work = wq->do_work(work); in io_run_cancel()
871 } while (work); in io_run_cancel()
874 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work) in io_wqe_insert_work() argument
879 if (!io_wq_is_hashed(work)) { in io_wqe_insert_work()
881 wq_list_add_tail(&work->list, &wqe->work_list); in io_wqe_insert_work()
885 hash = io_get_work_hash(work); in io_wqe_insert_work()
887 wqe->hash_tail[hash] = work; in io_wqe_insert_work()
891 wq_list_add_after(&work->list, &tail->list, &wqe->work_list); in io_wqe_insert_work()
894 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work) in io_wqe_enqueue() argument
896 struct io_wqe_acct *acct = io_work_get_acct(wqe, work); in io_wqe_enqueue()
906 if (unlikely(!io_wq_can_queue(wqe, acct, work))) { in io_wqe_enqueue()
907 io_run_cancel(work, wqe); in io_wqe_enqueue()
911 work_flags = work->flags; in io_wqe_enqueue()
913 io_wqe_insert_work(wqe, work); in io_wqe_enqueue()
922 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) in io_wq_enqueue() argument
926 io_wqe_enqueue(wqe, work); in io_wq_enqueue()
930 * Work items that hash to the same value will not be done in parallel.
933 void io_wq_hash_work(struct io_wq_work *work, void *val) in io_wq_hash_work() argument
938 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); in io_wq_hash_work()
971 * may dereference the passed in work. in io_wq_worker_cancel()
986 struct io_wq_work *work, in io_wqe_remove_pending() argument
989 unsigned int hash = io_get_work_hash(work); in io_wqe_remove_pending()
992 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { in io_wqe_remove_pending()
1000 wq_list_del(&wqe->work_list, &work->list, prev); in io_wqe_remove_pending()
1007 struct io_wq_work *work; in io_wqe_cancel_pending_work() local
1013 work = container_of(node, struct io_wq_work, list); in io_wqe_cancel_pending_work()
1014 if (!match->fn(work, match->data)) in io_wqe_cancel_pending_work()
1016 io_wqe_remove_pending(wqe, work, prev); in io_wqe_cancel_pending_work()
1018 io_run_cancel(work, wqe); in io_wqe_cancel_pending_work()
1049 * from there. CANCEL_OK means that the work is returned as-new, in io_wq_cancel_cb()
1061 * Now check if a free (going busy) or busy worker has the work in io_wq_cancel_cb()
1081 static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data) in io_wq_io_cb_cancel_data() argument
1083 return work == data; in io_wq_io_cb_cancel_data()