Lines Matching full:work

31 	IO_WORKER_F_BOUND	= 8,	/* is doing bounded work */
65 struct work_struct work; member
161 struct io_wq_work *work) in io_work_get_acct() argument
163 return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
252 * If there's work to do, returns true with acct->lock acquired. If not,
291 * starting work or finishing work. In either case, if it does in io_wq_activate_free_worker()
292 * to go sleep, we'll kick off a new task for this work anyway. in io_wq_activate_free_worker()
309 * Most likely an attempt to queue unbounded work on an io_wq that in io_wq_create_worker()
391 * work item after we canceled in io_wq_exit_workers(). in io_queue_worker_create()
428 * Worker will start processing some work. Move it to the busy list, if
442 * No work, worker going to sleep. Move to freelist.
453 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
455 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
480 struct io_wq_work *work, *tail; in io_get_next_work() local
487 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
490 if (!io_wq_is_hashed(work)) { in io_get_next_work()
492 return work; in io_get_next_work()
495 hash = io_get_work_hash(work); in io_get_next_work()
496 /* all items with this hash lie in [work, tail] */ in io_get_next_work()
503 return work; in io_get_next_work()
516 * work being added and clearing the stalled bit. in io_get_next_work()
533 struct io_wq_work *work) in io_assign_current_work() argument
535 if (work) { in io_assign_current_work()
541 worker->cur_work = work; in io_assign_current_work()
557 struct io_wq_work *work; in io_worker_handle_work() local
560 * If we got some work, mark us as busy. If we didn't, but in io_worker_handle_work()
561 * the list isn't empty, it means we stalled on hashed work. in io_worker_handle_work()
562 * Mark us stalled so we don't keep looking for work when we in io_worker_handle_work()
563 * can't make progress, any work completion or insertion will in io_worker_handle_work()
566 work = io_get_next_work(acct, worker); in io_worker_handle_work()
568 if (work) { in io_worker_handle_work()
573 * it becomes the active work. That avoids a window in io_worker_handle_work()
574 * where the work has been removed from our general in io_worker_handle_work()
575 * work list, but isn't yet discoverable as the in io_worker_handle_work()
576 * current work item for this worker. in io_worker_handle_work()
579 worker->next_work = work; in io_worker_handle_work()
584 io_assign_current_work(worker, work); in io_worker_handle_work()
590 unsigned int hash = io_get_work_hash(work); in io_worker_handle_work()
592 next_hashed = wq_next_work(work); in io_worker_handle_work()
594 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) in io_worker_handle_work()
595 work->flags |= IO_WQ_WORK_CANCEL; in io_worker_handle_work()
596 wq->do_work(work); in io_worker_handle_work()
599 linked = wq->free_work(work); in io_worker_handle_work()
600 work = next_hashed; in io_worker_handle_work()
601 if (!work && linked && !io_wq_is_hashed(linked)) { in io_worker_handle_work()
602 work = linked; in io_worker_handle_work()
605 io_assign_current_work(worker, work); in io_worker_handle_work()
618 } while (work); in io_worker_handle_work()
645 * If we have work to do, io_acct_run_queue() returns with in io_wq_worker()
708 * running and we have work pending, wake up a free one or create a new one.
740 static bool io_wq_work_match_all(struct io_wq_work *work, void *data) in io_wq_work_match_all() argument
804 schedule_work(&worker->work); in create_worker_cont()
807 static void io_workqueue_create(struct work_struct *work) in io_workqueue_create() argument
809 struct io_worker *worker = container_of(work, struct io_worker, work); in io_workqueue_create()
850 INIT_WORK(&worker->work, io_workqueue_create); in create_io_worker()
851 schedule_work(&worker->work); in create_io_worker()
889 static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq) in io_run_cancel() argument
892 work->flags |= IO_WQ_WORK_CANCEL; in io_run_cancel()
893 wq->do_work(work); in io_run_cancel()
894 work = wq->free_work(work); in io_run_cancel()
895 } while (work); in io_run_cancel()
898 static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work) in io_wq_insert_work() argument
900 struct io_wq_acct *acct = io_work_get_acct(wq, work); in io_wq_insert_work()
904 if (!io_wq_is_hashed(work)) { in io_wq_insert_work()
906 wq_list_add_tail(&work->list, &acct->work_list); in io_wq_insert_work()
910 hash = io_get_work_hash(work); in io_wq_insert_work()
912 wq->hash_tail[hash] = work; in io_wq_insert_work()
916 wq_list_add_after(&work->list, &tail->list, &acct->work_list); in io_wq_insert_work()
919 static bool io_wq_work_match_item(struct io_wq_work *work, void *data) in io_wq_work_match_item() argument
921 return work == data; in io_wq_work_match_item()
924 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) in io_wq_enqueue() argument
926 struct io_wq_acct *acct = io_work_get_acct(wq, work); in io_wq_enqueue()
928 unsigned work_flags = work->flags; in io_wq_enqueue()
936 (work->flags & IO_WQ_WORK_CANCEL)) { in io_wq_enqueue()
937 io_run_cancel(work, wq); in io_wq_enqueue()
942 io_wq_insert_work(wq, work); in io_wq_enqueue()
967 match.data = work, in io_wq_enqueue()
975 * Work items that hash to the same value will not be done in parallel.
978 void io_wq_hash_work(struct io_wq_work *work, void *val) in io_wq_hash_work() argument
983 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); in io_wq_hash_work()
988 struct io_wq_work *work) in __io_wq_worker_cancel() argument
990 if (work && match->fn(work, match->data)) { in __io_wq_worker_cancel()
991 work->flags |= IO_WQ_WORK_CANCEL; in __io_wq_worker_cancel()
1005 * may dereference the passed in work. in io_wq_worker_cancel()
1017 struct io_wq_work *work, in io_wq_remove_pending() argument
1020 struct io_wq_acct *acct = io_work_get_acct(wq, work); in io_wq_remove_pending()
1021 unsigned int hash = io_get_work_hash(work); in io_wq_remove_pending()
1024 if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) { in io_wq_remove_pending()
1032 wq_list_del(&acct->work_list, &work->list, prev); in io_wq_remove_pending()
1040 struct io_wq_work *work; in io_acct_cancel_pending_work() local
1044 work = container_of(node, struct io_wq_work, list); in io_acct_cancel_pending_work()
1045 if (!match->fn(work, match->data)) in io_acct_cancel_pending_work()
1047 io_wq_remove_pending(wq, work, prev); in io_acct_cancel_pending_work()
1049 io_run_cancel(work, wq); in io_acct_cancel_pending_work()
1094 * from there. CANCEL_OK means that the work is returned as-new, in io_wq_cancel_cb()
1097 * Then check if a free (going busy) or busy worker has the work in io_wq_cancel_cb()
1103 * we'll find a work item regardless of state. in io_wq_cancel_cb()