Lines Matching full:work
18 * This is the generic async execution mechanism. Work items as are
21 * normal work items and the other for high priority ones) and some extra
209 PWQ_STAT_STARTED, /* work items started execution */
210 PWQ_STAT_COMPLETED, /* work items completed execution */
216 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
239 * When pwq->nr_active >= max_active, new work item is queued to
243 * All work items marked with WORK_STRUCT_INACTIVE do not participate
244 * in pwq->nr_active and all work items in pwq->inactive_works are
246 * work items are in pwq->inactive_works. Some of them are ready to
247 * run in pool->worklist or worker->scheduled. Those work itmes are
249 * not participate in pwq->nr_active. For non-barrier work item, it
282 * The externally visible workqueue. It relays the issued work items to
290 int work_color; /* WQ: current work color */
354 * Per-cpu work items which run for longer than the following threshold are
356 * management to prevent them from noticeably delaying other per-cpu work items.
381 /* PL&A: allowable cpus for unbound wqs and work items */
393 /* CPU where unbound work was last round robin scheduled from this CPU */
397 * Local execution of unbound work items is no longer guaranteed. The
398 * following always forces round-robin CPU selection on unbound work items
527 struct work_struct *work = addr; in work_is_static_object() local
529 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
538 struct work_struct *work = addr; in work_fixup_init() local
542 cancel_work_sync(work); in work_fixup_init()
543 debug_object_init(work, &work_debug_descr); in work_fixup_init()
556 struct work_struct *work = addr; in work_fixup_free() local
560 cancel_work_sync(work); in work_fixup_free()
561 debug_object_free(work, &work_debug_descr); in work_fixup_free()
576 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
578 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
581 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
583 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
586 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
589 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
591 debug_object_init(work, &work_debug_descr); in __init_work()
595 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
597 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
601 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
603 destroy_timer_on_stack(&work->timer); in destroy_delayed_work_on_stack()
604 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
609 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
610 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
652 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
658 * work->data. These functions should only be called while the work is
662 * corresponding to a work. Pool is available once the work has been
664 * available only while the work item is queued.
666 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
667 * canceled. While being canceled, a work item may have its PENDING set
671 static inline void set_work_data(struct work_struct *work, unsigned long data, in set_work_data() argument
674 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
675 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
678 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
681 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
685 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
688 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, in set_work_pool_and_keep_pending()
692 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
697 * test_and_set_bit(PENDING) and ensures all updates to @work made in set_work_pool_and_clear_pending()
702 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); in set_work_pool_and_clear_pending()
706 * work->current_func, which is executed afterwards. This possible in set_work_pool_and_clear_pending()
708 * the same @work. E.g. consider this case: in set_work_pool_and_clear_pending()
719 * 7 work->current_func() { in set_work_pool_and_clear_pending()
726 * a @work is not queued in a hope, that CPU#1 will eventually in set_work_pool_and_clear_pending()
727 * finish the queued @work. Meanwhile CPU#1 does not see in set_work_pool_and_clear_pending()
734 static void clear_work_data(struct work_struct *work) in clear_work_data() argument
737 set_work_data(work, WORK_STRUCT_NO_POOL, 0); in clear_work_data()
745 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
747 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
756 * get_work_pool - return the worker_pool a given work was associated with
757 * @work: the work item of interest
768 * Return: The worker_pool @work was last associated with. %NULL if none.
770 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
772 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
788 * get_work_pool_id - return the worker pool ID a given work is associated with
789 * @work: the work item of interest
791 * Return: The worker_pool ID @work was last associated with.
794 static int get_work_pool_id(struct work_struct *work) in get_work_pool_id() argument
796 unsigned long data = atomic_long_read(&work->data); in get_work_pool_id()
804 static void mark_work_canceling(struct work_struct *work) in mark_work_canceling() argument
806 unsigned long pool_id = get_work_pool_id(work); in mark_work_canceling()
809 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); in mark_work_canceling()
812 static bool work_is_canceling(struct work_struct *work) in work_is_canceling() argument
814 unsigned long data = atomic_long_read(&work->data); in work_is_canceling()
978 * find_worker_executing_work - find worker which is executing a work
980 * @work: work to find worker for
982 * Find a worker which is executing @work on @pool by searching
983 * @pool->busy_hash which is keyed by the address of @work. For a worker
984 * to match, its current execution should match the address of @work and
985 * its work function. This is to avoid unwanted dependency between
986 * unrelated work executions through a work item being recycled while still
989 * This is a bit tricky. A work item may be freed once its execution
991 * another work item. If the same work item address ends up being reused
993 * recycled work item as currently executing and make it wait until the
996 * This function checks the work item address and work function to avoid
998 * work function which can introduce dependency onto itself through a
999 * recycled work item. Well, if somebody wants to shoot oneself in the
1001 * actually occurs, it should be easy to locate the culprit work function.
1007 * Pointer to worker which is executing @work if found, %NULL
1011 struct work_struct *work) in find_worker_executing_work() argument
1016 (unsigned long)work) in find_worker_executing_work()
1017 if (worker->current_work == work && in find_worker_executing_work()
1018 worker->current_func == work->func) in find_worker_executing_work()
1026 * @work: start of series of works to be scheduled
1027 * @head: target list to append @work to
1030 * Schedule linked works starting from @work to @head. Work series to be
1031 * scheduled starts at @work and includes any consecutive work with
1038 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1047 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1048 list_move_tail(&work->entry, head); in move_linked_works()
1049 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1063 * assign_work - assign a work item and its linked work items to a worker
1064 * @work: work to assign
1068 * Assign @work and its linked work items to @worker. If @work is already being
1071 * If @nextp is not NULL, it's updated to point to the next work of the last
1072 * scheduled work. This allows assign_work() to be nested inside
1075 * Returns %true if @work was successfully assigned to @worker. %false if @work
1078 static bool assign_work(struct work_struct *work, struct worker *worker, in assign_work() argument
1087 * A single work shouldn't be executed concurrently by multiple workers. in assign_work()
1088 * __queue_work() ensures that @work doesn't jump to a different pool in assign_work()
1090 * @work is not executed concurrently by multiple workers from the same in assign_work()
1091 * pool. Check whether anyone is already processing the work. If so, in assign_work()
1092 * defer the work to the currently executing one. in assign_work()
1094 collision = find_worker_executing_work(pool, work); in assign_work()
1096 move_linked_works(work, &collision->scheduled, nextp); in assign_work()
1100 move_linked_works(work, &worker->scheduled, nextp); in assign_work()
1108 * @pool may have pending work items. Wake up worker if necessary. Returns
1125 * Idle @worker is about to execute @work and waking up provides an in kick_pool()
1142 struct work_struct *work = list_first_entry(&pool->worklist, in kick_pool() local
1145 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; in kick_pool()
1155 * Concurrency-managed per-cpu work items that hog CPU for longer than
1157 * which prevents them from stalling other concurrency-managed work items. If a
1158 * work function keeps triggering this mechanism, it's likely that the work item
1161 * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1163 * workqueues as appropriate. To avoid flooding the console, each violating work
1269 * CPU intensive auto-detection cares about how long a work item hogged in wq_worker_running()
1347 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. in wq_worker_tick()
1374 * wq_worker_last_func - retrieve worker's last work function
1375 * @task: Task to retrieve last work function of.
1394 * The last work function %current executed as a worker, NULL if it
1395 * hasn't executed any work yet.
1456 static void pwq_activate_inactive_work(struct work_struct *work) in pwq_activate_inactive_work() argument
1458 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_inactive_work()
1460 trace_workqueue_activate_work(work); in pwq_activate_inactive_work()
1463 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1464 __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work)); in pwq_activate_inactive_work()
1470 struct work_struct *work = list_first_entry(&pwq->inactive_works, in pwq_activate_first_inactive() local
1473 pwq_activate_inactive_work(work); in pwq_activate_first_inactive()
1479 * @work_data: work_data of work which left the queue
1481 * A work either has completed or is removed from pending queue,
1524 * try_to_grab_pending - steal work item from worklist and disable irq
1525 * @work: work item to steal
1526 * @is_dwork: @work is a delayed_work
1529 * Try to grab PENDING bit of @work. This function can handle @work in any
1535 * 1 if @work was pending and we successfully stole PENDING
1536 * 0 if @work was idle and we claimed PENDING
1538 * -ENOENT if someone else is canceling @work, this state may persist
1543 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1544 * interrupted while holding PENDING and @work off queue, irq must be
1553 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, in try_to_grab_pending() argument
1563 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
1575 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
1583 pool = get_work_pool(work); in try_to_grab_pending()
1589 * work->data is guaranteed to point to pwq only while the work in try_to_grab_pending()
1590 * item is queued on pwq->wq, and both updating work->data to point in try_to_grab_pending()
1592 * pwq->pool->lock. This in turn guarantees that, if work->data in try_to_grab_pending()
1593 * points to pwq which is associated with a locked pool, the work in try_to_grab_pending()
1596 pwq = get_work_pwq(work); in try_to_grab_pending()
1598 debug_work_deactivate(work); in try_to_grab_pending()
1601 * A cancelable inactive work item must be in the in try_to_grab_pending()
1605 * An inactive work item cannot be grabbed directly because in try_to_grab_pending()
1606 * it might have linked barrier work items which, if left in try_to_grab_pending()
1608 * management later on and cause stall. Make sure the work in try_to_grab_pending()
1611 if (*work_data_bits(work) & WORK_STRUCT_INACTIVE) in try_to_grab_pending()
1612 pwq_activate_inactive_work(work); in try_to_grab_pending()
1614 list_del_init(&work->entry); in try_to_grab_pending()
1615 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); in try_to_grab_pending()
1617 /* work->data points to pwq iff queued, point to pool */ in try_to_grab_pending()
1618 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1628 if (work_is_canceling(work)) in try_to_grab_pending()
1635 * insert_work - insert a work into a pool
1636 * @pwq: pwq @work belongs to
1637 * @work: work to insert
1641 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1647 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1650 debug_work_activate(work); in insert_work()
1652 /* record the work call stack in order to print it in KASAN reports */ in insert_work()
1653 kasan_record_aux_stack_noalloc(work); in insert_work()
1655 /* we own @work, set data and link */ in insert_work()
1656 set_work_pwq(work, pwq, extra_flags); in insert_work()
1657 list_add_tail(&work->entry, head); in insert_work()
1662 * Test whether @work is being queued from another work executing on the
1671 * Return %true iff I'm a worker executing a work item on @wq. If in is_chained_work()
1678 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1706 struct work_struct *work) in __queue_work() argument
1714 * While a work item is PENDING && off queue, a task trying to in __queue_work()
1725 * queues a new work item to a wq after destroy_workqueue(wq). in __queue_work()
1732 /* pwq which will be used unless @work is executing elsewhere */ in __queue_work()
1744 * If @work was previously on a different pool, it might still be in __queue_work()
1745 * running there, in which case the work needs to be queued on that in __queue_work()
1748 last_pool = get_work_pool(work); in __queue_work()
1754 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1773 * another pwq replacing it in cpu_pwq or while work items are executing in __queue_work()
1788 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1790 if (WARN_ON(!list_empty(&work->entry))) in __queue_work()
1800 trace_workqueue_activate_work(work); in __queue_work()
1802 insert_work(pwq, work, &pool->worklist, work_flags); in __queue_work()
1806 insert_work(pwq, work, &pwq->inactive_works, work_flags); in __queue_work()
1815 * queue_work_on - queue work on specific cpu
1816 * @cpu: CPU number to execute work on
1818 * @work: work to queue
1820 * We queue the work to a specific CPU, the caller must ensure it
1826 * Return: %false if @work was already on a queue, %true otherwise.
1829 struct work_struct *work) in queue_work_on() argument
1836 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_on()
1837 __queue_work(cpu, wq, work); in queue_work_on()
1853 * available CPU if we need to schedule this work.
1876 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1877 * @node: NUMA node that we are targeting the work for
1879 * @work: work to queue
1881 * We queue the work to a "random" CPU within a given NUMA node. The basic
1882 * idea here is to provide a way to somehow associate work with a given
1893 * Return: %false if @work was already on a queue, %true otherwise.
1896 struct work_struct *work) in queue_work_node() argument
1914 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_node()
1917 __queue_work(cpu, wq, work); in queue_work_node()
1931 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1939 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1944 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
1947 * If @delay is 0, queue @dwork->work immediately. This is for in __queue_delayed_work()
1953 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1968 * queue_delayed_work_on - queue work on specific CPU after delay
1969 * @cpu: CPU number to execute work on
1971 * @dwork: work to queue
1974 * Return: %false if @work was already on a queue, %true otherwise. If
1981 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1988 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_delayed_work_on()
1999 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2000 * @cpu: CPU number to execute work on
2002 * @dwork: work to queue
2007 * zero, @work is guaranteed to be scheduled immediately regardless of its
2023 ret = try_to_grab_pending(&dwork->work, true, &flags); in mod_delayed_work_on()
2042 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
2047 * queue_rcu_work - queue work after a RCU grace period
2049 * @rwork: work to queue
2058 struct work_struct *work = &rwork->work; in queue_rcu_work() local
2060 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_rcu_work()
2344 * @work: the pool's work for handling these idle workers
2351 * context, hence the split between timer callback and work item.
2353 static void idle_cull_fn(struct work_struct *work) in idle_cull_fn() argument
2355 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); in idle_cull_fn()
2387 static void send_mayday(struct work_struct *work) in send_mayday() argument
2389 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
2414 struct work_struct *work; in pool_mayday_timeout() local
2426 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
2427 send_mayday(work); in pool_mayday_timeout()
2526 * process_one_work - process single work
2528 * @work: work to process
2530 * Process @work. This function contains all the logics necessary to
2531 * process a single work including synchronization against and
2534 * call this function to process a work.
2539 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
2543 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
2552 * work->lockdep_map, make a copy and use that here. in process_one_work()
2556 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
2563 debug_work_deactivate(work); in process_one_work()
2564 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2565 worker->current_work = work; in process_one_work()
2566 worker->current_func = work->func; in process_one_work()
2569 work_data = *work_data_bits(work); in process_one_work()
2578 list_del_init(&work->entry); in process_one_work()
2584 * execution of the pending work items. in process_one_work()
2592 * chain execution of the pending work items for WORKER_NOT_RUNNING in process_one_work()
2599 * update to @work. Also, do this inside @pool->lock so that in process_one_work()
2603 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2623 * read-recursive acquire on the work(queue) 'locks', but this will then in process_one_work()
2632 trace_workqueue_execute_start(work); in process_one_work()
2633 worker->current_func(work); in process_one_work()
2635 * While we must be careful to not use "work" after this, the trace in process_one_work()
2638 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
2654 * kernels, where a requeueing work item waiting for something to in process_one_work()
2655 * happen could deadlock with stop_machine as such work item could in process_one_work()
2666 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than in process_one_work()
2688 * may change while processing a work, so this function repeatedly
2689 * fetches a work from the top and executes it.
2697 struct work_struct *work; in process_scheduled_works() local
2700 while ((work = list_first_entry_or_null(&worker->scheduled, in process_scheduled_works()
2706 process_one_work(worker, work); in process_scheduled_works()
2726 * work items regardless of their specific target workqueue. The only
2727 * exception is work items which belong to workqueues with a rescuer which
2767 * preparing to process a work or actually processing it. in worker_thread()
2782 struct work_struct *work = in worker_thread() local
2786 if (assign_work(work, worker, NULL)) in worker_thread()
2793 * pool->lock is held and there's no work to process and no need to in worker_thread()
2813 * Regular work processing on a pool may block trying to create a new
2845 * shouldn't have any work pending, but @wq->maydays may still have in rescuer_thread()
2847 * all the work items before the rescuer got to them. Go through in rescuer_thread()
2860 struct work_struct *work, *n; in rescuer_thread() local
2876 list_for_each_entry_safe(work, n, &pool->worklist, entry) { in rescuer_thread()
2877 if (get_work_pwq(work) == pwq && in rescuer_thread()
2878 assign_work(work, rescuer, &n)) in rescuer_thread()
2886 * The above execution of rescued work items could in rescuer_thread()
2890 * that such back-to-back work items, which may be in rescuer_thread()
2944 * @target_work: work item being flushed (NULL for workqueue flushes)
2974 struct work_struct work; member
2979 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
2981 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
2986 * insert_wq_barrier - insert a barrier work
2989 * @target: target work to attach @barr to
2998 * try_to_grab_pending() can't determine whether the work to be
3000 * flag of the previous work while there must be a valid next work
3001 * after a work with LINKED flag set.
3023 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); in insert_wq_barrier()
3024 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
3030 /* The barrier work item does not participate in pwq->nr_active. */ in insert_wq_barrier()
3053 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
3060 * @work_color: new work color, < 0 for no-op
3128 * __flush_workqueue - ensure that any scheduled work has run to completion.
3131 * This function sleeps until all work items which were queued on entry
3291 * work items on @wq can queue further work items on it. @wq is flushed
3340 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, in start_flush_work() argument
3350 pool = get_work_pool(work); in start_flush_work()
3358 pwq = get_work_pwq(work); in start_flush_work()
3363 worker = find_worker_executing_work(pool, work); in start_flush_work()
3369 check_flush_dependency(pwq->wq, work); in start_flush_work()
3371 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3378 * For single threaded workqueues the deadlock happens when the work in start_flush_work()
3379 * is after the work issuing the flush_work(). For rescuer equipped in start_flush_work()
3396 static bool __flush_work(struct work_struct *work, bool from_cancel) in __flush_work() argument
3403 if (WARN_ON(!work->func)) in __flush_work()
3406 lock_map_acquire(&work->lockdep_map); in __flush_work()
3407 lock_map_release(&work->lockdep_map); in __flush_work()
3409 if (start_flush_work(work, &barr, from_cancel)) { in __flush_work()
3411 destroy_work_on_stack(&barr.work); in __flush_work()
3419 * flush_work - wait for a work to finish executing the last queueing instance
3420 * @work: the work to flush
3422 * Wait until @work has finished execution. @work is guaranteed to be idle
3426 * %true if flush_work() waited for the work to finish execution,
3429 bool flush_work(struct work_struct *work) in flush_work() argument
3431 return __flush_work(work, false); in flush_work()
3437 struct work_struct *work; member
3444 if (cwait->work != key) in cwt_wakefn()
3449 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) in __cancel_work_timer() argument
3456 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work_timer()
3459 * finish. flush_work() doesn't work for PREEMPT_NONE in __cancel_work_timer()
3460 * because we may get scheduled between @work's completion in __cancel_work_timer()
3463 * as @work is no longer busy, try_to_grab_pending() will in __cancel_work_timer()
3464 * return -ENOENT as @work is still being canceled and the in __cancel_work_timer()
3470 * wake function which matches @work along with exclusive in __cancel_work_timer()
3478 cwait.work = work; in __cancel_work_timer()
3482 if (work_is_canceling(work)) in __cancel_work_timer()
3488 /* tell other tasks trying to grab @work to back off */ in __cancel_work_timer()
3489 mark_work_canceling(work); in __cancel_work_timer()
3493 * This allows canceling during early boot. We know that @work in __cancel_work_timer()
3497 __flush_work(work, true); in __cancel_work_timer()
3499 clear_work_data(work); in __cancel_work_timer()
3508 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); in __cancel_work_timer()
3514 * cancel_work_sync - cancel a work and wait for it to finish
3515 * @work: the work to cancel
3517 * Cancel @work and wait for its execution to finish. This function
3518 * can be used even if the work re-queues itself or migrates to
3519 * another workqueue. On return from this function, @work is
3522 * cancel_work_sync(&delayed_work->work) must not be used for
3525 * The caller must ensure that the workqueue on which @work was last
3529 * %true if @work was pending, %false otherwise.
3531 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
3533 return __cancel_work_timer(work, false); in cancel_work_sync()
3539 * @dwork: the delayed work to flush
3541 * Delayed timer is cancelled and the pending work is queued for
3546 * %true if flush_work() waited for the work to finish execution,
3553 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
3555 return flush_work(&dwork->work); in flush_delayed_work()
3561 * @rwork: the rcu work to flush
3564 * %true if flush_rcu_work() waited for the work to finish execution,
3569 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { in flush_rcu_work()
3571 flush_work(&rwork->work); in flush_rcu_work()
3574 return flush_work(&rwork->work); in flush_rcu_work()
3579 static bool __cancel_work(struct work_struct *work, bool is_dwork) in __cancel_work() argument
3585 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work()
3591 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); in __cancel_work()
3599 bool cancel_work(struct work_struct *work) in cancel_work() argument
3601 return __cancel_work(work, false); in cancel_work()
3606 * cancel_delayed_work - cancel a delayed work
3615 * The work callback function may still be running on return, unless
3616 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
3623 return __cancel_work(&dwork->work, true); in cancel_delayed_work()
3628 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3629 * @dwork: the delayed work cancel
3638 return __cancel_work_timer(&dwork->work, true); in cancel_delayed_work_sync()
3665 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
3667 INIT_WORK(work, func); in schedule_on_each_cpu()
3668 schedule_work_on(cpu, work); in schedule_on_each_cpu()
3682 * @ew: guaranteed storage for the execute work structure (must
3683 * be available when the work executes)
3694 fn(&ew->work); in execute_in_process_context()
3698 INIT_WORK(&ew->work, fn); in execute_in_process_context()
3699 schedule_work(&ew->work); in execute_in_process_context()
3990 * which implies no work queued to the pool, which implies no worker can in put_unbound_pool()
4108 static void pwq_release_workfn(struct kthread_work *work) in pwq_release_workfn() argument
4110 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_release_workfn()
4150 * workqueue's saved_max_active and activate inactive work items
4452 * work items are affine to the pod it was issued on. Older pwqs are released as
4453 * in-flight work items finish. Note that a work item which repeatedly requeues
4493 * executing the work items for the workqueue will lose their CPU affinity and
4496 * responsibility to flush the work item from CPU_DOWN_PREPARE.
4774 * Safely destroy a workqueue. All work currently pending will be done first.
4891 * current_work - retrieve %current task's work struct
4896 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4910 * work functions to determine whether it's being run off the rescuer task.
4962 * work_busy - test whether a work is currently pending or running
4963 * @work: the work to be tested
4965 * Test whether @work is currently pending or running. There is no
4972 unsigned int work_busy(struct work_struct *work) in work_busy() argument
4978 if (work_pending(work)) in work_busy()
4982 pool = get_work_pool(work); in work_busy()
4985 if (find_worker_executing_work(pool, work)) in work_busy()
4996 * set_worker_desc - set description for the current work item
5000 * This function can be called by a running work function to describe what
5001 * the work item is about. If the worker task gets dumped, this
5023 * If @task is a worker and currently executing a work item, print out the
5025 * set_worker_desc() by the currently executing work item.
5102 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) in pr_cont_work() argument
5104 if (work->func == wq_barrier_func) { in pr_cont_work()
5107 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
5115 pr_cont_work_flush(comma, work->func, pcwsp); in pr_cont_work()
5123 struct work_struct *work; in show_pwq() local
5153 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
5154 pr_cont_work(false, work, &pcws); in show_pwq()
5161 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
5162 if (get_work_pwq(work) == pwq) { in show_pwq()
5171 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
5172 if (get_work_pwq(work) != pwq) in show_pwq()
5175 pr_cont_work(comma, work, &pcws); in show_pwq()
5176 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
5186 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
5187 pr_cont_work(comma, work, &pcws); in show_pwq()
5188 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
5221 * drivers that queue work while holding locks in show_one_workqueue()
5254 /* How long the first pending work is waiting for a worker. */ in show_one_worker_pool()
5260 * queue work while holding locks also taken in their write in show_one_worker_pool()
5380 * are a lot of assumptions on strong associations among work, pwq and
5426 * unbound chain execution of currently pending work items. in unbind_workers()
5601 struct work_struct work; member
5607 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
5609 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
5631 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); in work_on_cpu_key()
5632 schedule_work_on(cpu, &wfc.work); in work_on_cpu_key()
5633 flush_work(&wfc.work); in work_on_cpu_key()
5634 destroy_work_on_stack(&wfc.work); in work_on_cpu_key()
5912 * max_active RW int : maximum number of in-flight work items
6377 * flush dependency, a concurrency managed work item which stays RUNNING
6399 * Show workers that might prevent the processing of pending work items.
6401 * Pending work items should be handled by another idle worker
6416 * drivers that queue work while holding locks in show_cpu_pool_hog()
6600 * boot code to create workqueues and queue/cancel work items. Actual work item
6743 * been created and work items queued on them, but there are no kworkers
6744 * executing the work items yet. Populate the worker pools with the initial