Lines Matching full:work

18  * This is the generic async execution mechanism.  Work items as are
21 * normal work items and the other for high priority ones) and some extra
242 PWQ_STAT_STARTED, /* work items started execution */
243 PWQ_STAT_COMPLETED, /* work items completed execution */
249 PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
273 * When pwq->nr_active >= max_active, new work item is queued to
277 * All work items marked with WORK_STRUCT_INACTIVE do not participate in
278 * nr_active and all work items in pwq->inactive_works are marked with
279 * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
281 * pool->worklist or worker->scheduled. Those work itmes are only struct
283 * in nr_active. For non-barrier work item, it is marked with
323 * to start executing a work item, it should increment ->nr using
325 * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
337 * The externally visible workqueue. It relays the issued work items to
345 int work_color; /* WQ: current work color */
417 * Per-cpu work items which run for longer than the following threshold are
419 * management to prevent them from noticeably delaying other per-cpu work items.
457 /* PL&A: allowable cpus for unbound wqs and work items */
469 /* CPU where unbound work was last round robin scheduled from this CPU */
473 * Local execution of unbound work items is no longer guaranteed. The
474 * following always forces round-robin CPU selection on unbound work items
616 struct work_struct *work = addr; in work_is_static_object() local
618 return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work)); in work_is_static_object()
627 struct work_struct *work = addr; in work_fixup_init() local
631 cancel_work_sync(work); in work_fixup_init()
632 debug_object_init(work, &work_debug_descr); in work_fixup_init()
645 struct work_struct *work = addr; in work_fixup_free() local
649 cancel_work_sync(work); in work_fixup_free()
650 debug_object_free(work, &work_debug_descr); in work_fixup_free()
665 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
667 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
670 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
672 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
675 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
678 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
680 debug_object_init(work, &work_debug_descr); in __init_work()
684 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
686 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
690 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
692 timer_destroy_on_stack(&work->timer); in destroy_delayed_work_on_stack()
693 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
698 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
699 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
776 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
781 * can be used to set the pwq, pool or clear work->data. These functions should
782 * only be called while the work is owned - ie. while the PENDING bit is set.
785 * corresponding to a work. Pool is available once the work has been
787 * available only while the work item is queued.
789 static inline void set_work_data(struct work_struct *work, unsigned long data) in set_work_data() argument
791 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
792 atomic_long_set(&work->data, data | work_static(work)); in set_work_data()
795 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
798 set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING | in set_work_pwq()
802 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
805 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | in set_work_pool_and_keep_pending()
809 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
814 * test_and_set_bit(PENDING) and ensures all updates to @work made in set_work_pool_and_clear_pending()
819 set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) | in set_work_pool_and_clear_pending()
824 * work->current_func, which is executed afterwards. This possible in set_work_pool_and_clear_pending()
826 * the same @work. E.g. consider this case: in set_work_pool_and_clear_pending()
837 * 7 work->current_func() { in set_work_pool_and_clear_pending()
844 * a @work is not queued in a hope, that CPU#1 will eventually in set_work_pool_and_clear_pending()
845 * finish the queued @work. Meanwhile CPU#1 does not see in set_work_pool_and_clear_pending()
857 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
859 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
868 * get_work_pool - return the worker_pool a given work was associated with
869 * @work: the work item of interest
880 * Return: The worker_pool @work was last associated with. %NULL if none.
882 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
884 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
1080 * find_worker_executing_work - find worker which is executing a work
1082 * @work: work to find worker for
1084 * Find a worker which is executing @work on @pool by searching
1085 * @pool->busy_hash which is keyed by the address of @work. For a worker
1086 * to match, its current execution should match the address of @work and
1087 * its work function. This is to avoid unwanted dependency between
1088 * unrelated work executions through a work item being recycled while still
1091 * This is a bit tricky. A work item may be freed once its execution
1093 * another work item. If the same work item address ends up being reused
1095 * recycled work item as currently executing and make it wait until the
1098 * This function checks the work item address and work function to avoid
1100 * work function which can introduce dependency onto itself through a
1101 * recycled work item. Well, if somebody wants to shoot oneself in the
1103 * actually occurs, it should be easy to locate the culprit work function.
1109 * Pointer to worker which is executing @work if found, %NULL
1113 struct work_struct *work) in find_worker_executing_work() argument
1118 (unsigned long)work) in find_worker_executing_work()
1119 if (worker->current_work == work && in find_worker_executing_work()
1120 worker->current_func == work->func) in find_worker_executing_work()
1126 static void mayday_cursor_func(struct work_struct *work) in mayday_cursor_func() argument
1134 * @work: start of series of works to be scheduled
1135 * @head: target list to append @work to
1138 * Schedule linked works starting from @work to @head. Work series to be
1139 * scheduled starts at @work and includes any consecutive work with
1146 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1155 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1156 list_move_tail(&work->entry, head); in move_linked_works()
1157 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1171 * assign_work - assign a work item and its linked work items to a worker
1172 * @work: work to assign
1176 * Assign @work and its linked work items to @worker. If @work is already being
1179 * If @nextp is not NULL, it's updated to point to the next work of the last
1180 * scheduled work. This allows assign_work() to be nested inside
1183 * Returns %true if @work was successfully assigned to @worker. %false if @work
1186 static bool assign_work(struct work_struct *work, struct worker *worker, in assign_work() argument
1194 /* The cursor work should not be processed */ in assign_work()
1195 if (unlikely(work->func == mayday_cursor_func)) { in assign_work()
1199 *nextp = list_next_entry(work, entry); in assign_work()
1200 list_del_init(&work->entry); in assign_work()
1205 * A single work shouldn't be executed concurrently by multiple workers. in assign_work()
1206 * __queue_work() ensures that @work doesn't jump to a different pool in assign_work()
1208 * @work is not executed concurrently by multiple workers from the same in assign_work()
1209 * pool. Check whether anyone is already processing the work. If so, in assign_work()
1210 * defer the work to the currently executing one. in assign_work()
1212 collision = find_worker_executing_work(pool, work); in assign_work()
1214 move_linked_works(work, &collision->scheduled, nextp); in assign_work()
1218 move_linked_works(work, &worker->scheduled, nextp); in assign_work()
1249 * @pool may have pending work items. Wake up worker if necessary. Returns
1271 * Idle @worker is about to execute @work and waking up provides an in kick_pool()
1288 struct work_struct *work = list_first_entry(&pool->worklist, in kick_pool() local
1294 get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; in kick_pool()
1305 * Concurrency-managed per-cpu work items that hog CPU for longer than
1307 * which prevents them from stalling other concurrency-managed work items. If a
1308 * work function keeps triggering this mechanism, it's likely that the work item
1311 * wq_cpu_intensive_report() tracks work functions which trigger such conditions
1313 * workqueues as appropriate. To avoid flooding the console, each violating work
1423 * CPU intensive auto-detection cares about how long a work item hogged in wq_worker_running()
1501 * CPU_INTENSIVE to avoid stalling other concurrency-managed work items. in wq_worker_tick()
1528 * wq_worker_last_func - retrieve worker's last work function
1529 * @task: Task to retrieve last work function of.
1548 * The last work function %current executed as a worker, NULL if it
1549 * hasn't executed any work yet.
1693 struct work_struct *work) in __pwq_activate_work() argument
1695 unsigned long *wdb = work_data_bits(work); in __pwq_activate_work()
1698 trace_workqueue_activate_work(work); in __pwq_activate_work()
1701 move_linked_works(work, &pwq->pool->worklist, NULL); in __pwq_activate_work()
1794 * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
1798 * Activate the first inactive work item of @pwq if available and allowed by
1801 * Returns %true if an inactive work item has been activated. %false if no
1802 * inactive work item is found or max_active limit is reached.
1806 struct work_struct *work = in pwq_activate_first_inactive() local
1810 if (work && pwq_tryinc_nr_active(pwq, fill)) { in pwq_activate_first_inactive()
1811 __pwq_activate_work(pwq, work); in pwq_activate_first_inactive()
1824 * ensure proper work item ordering::
1836 * to unplug the next oldest one to start its work item execution. Note that
1871 struct work_struct *work; in node_activate_pending_pwq() local
1900 * $pwq may not have any inactive work items due to e.g. cancellations. in node_activate_pending_pwq()
1903 work = list_first_entry_or_null(&pwq->inactive_works, in node_activate_pending_pwq()
1905 if (!work) { in node_activate_pending_pwq()
1911 * Acquire an nr_active count and activate the inactive work item. If in node_activate_pending_pwq()
1912 * $pwq still has inactive work items, rotate it to the end of the in node_activate_pending_pwq()
1914 * inactive work items are not activated in queueing order which is fine in node_activate_pending_pwq()
1919 __pwq_activate_work(pwq, work); in node_activate_pending_pwq()
1943 * Decrement @pwq's nr_active and try to activate the first inactive work item.
1961 * inactive work item on @pwq itself. in pwq_dec_nr_active()
1992 * @work_data: work_data of work which left the queue
1994 * A work either has completed or is removed from pending queue,
2000 * work item is complete.
2036 * try_to_grab_pending - steal work item from worklist and disable irq
2037 * @work: work item to steal
2041 * Try to grab PENDING bit of @work. This function can handle @work in any
2047 * 1 if @work was pending and we successfully stole PENDING
2048 * 0 if @work was idle and we claimed PENDING
2053 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
2054 * interrupted while holding PENDING and @work off queue, irq must be
2063 static int try_to_grab_pending(struct work_struct *work, u32 cflags, in try_to_grab_pending() argument
2073 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
2085 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
2093 pool = get_work_pool(work); in try_to_grab_pending()
2099 * work->data is guaranteed to point to pwq only while the work in try_to_grab_pending()
2100 * item is queued on pwq->wq, and both updating work->data to point in try_to_grab_pending()
2102 * pwq->pool->lock. This in turn guarantees that, if work->data in try_to_grab_pending()
2103 * points to pwq which is associated with a locked pool, the work in try_to_grab_pending()
2106 pwq = get_work_pwq(work); in try_to_grab_pending()
2108 unsigned long work_data = *work_data_bits(work); in try_to_grab_pending()
2110 debug_work_deactivate(work); in try_to_grab_pending()
2113 * A cancelable inactive work item must be in the in try_to_grab_pending()
2117 * An inactive work item cannot be deleted directly because in try_to_grab_pending()
2118 * it might have linked barrier work items which, if left in try_to_grab_pending()
2121 * barrier work items to the worklist when deleting the grabbed in try_to_grab_pending()
2127 move_linked_works(work, &pwq->pool->worklist, NULL); in try_to_grab_pending()
2129 list_del_init(&work->entry); in try_to_grab_pending()
2132 * work->data points to pwq iff queued. Let's point to pool. As in try_to_grab_pending()
2133 * this destroys work->data needed by the next step, stash it. in try_to_grab_pending()
2135 set_work_pool_and_keep_pending(work, pool->id, in try_to_grab_pending()
2153 * work_grab_pending - steal work item from worklist and disable irq
2154 * @work: work item to steal
2158 * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer
2165 * Returns %true if @work was pending. %false if idle.
2167 static bool work_grab_pending(struct work_struct *work, u32 cflags, in work_grab_pending() argument
2173 ret = try_to_grab_pending(work, cflags, irq_flags); in work_grab_pending()
2181 * insert_work - insert a work into a pool
2182 * @pwq: pwq @work belongs to
2183 * @work: work to insert
2187 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
2193 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
2196 debug_work_activate(work); in insert_work()
2198 /* record the work call stack in order to print it in KASAN reports */ in insert_work()
2199 kasan_record_aux_stack(work); in insert_work()
2201 /* we own @work, set data and link */ in insert_work()
2202 set_work_pwq(work, pwq, extra_flags); in insert_work()
2203 list_add_tail(&work->entry, head); in insert_work()
2208 * Test whether @work is being queued from another work executing on the
2217 * Return %true iff I'm a worker executing a work item on @wq. If in is_chained_work()
2224 * When queueing an unbound work item to a wq, prefer local CPU if allowed
2249 struct work_struct *work) in __queue_work() argument
2257 * While a work item is PENDING && off queue, a task trying to in __queue_work()
2267 * queues a new work item to a wq after destroy_workqueue(wq). in __queue_work()
2271 work->func, wq->name))) { in __queue_work()
2276 /* pwq which will be used unless @work is executing elsewhere */ in __queue_work()
2288 * If @work was previously on a different pool, it might still be in __queue_work()
2289 * running there, in which case the work needs to be queued on that in __queue_work()
2292 * For ordered workqueue, work items must be queued on the newest pwq in __queue_work()
2296 last_pool = get_work_pool(work); in __queue_work()
2302 worker = find_worker_executing_work(last_pool, work); in __queue_work()
2321 * another pwq replacing it in cpu_pwq or while work items are executing in __queue_work()
2336 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
2338 if (WARN_ON(!list_empty(&work->entry))) in __queue_work()
2345 * Limit the number of concurrently active work items to max_active. in __queue_work()
2346 * @work must also queue behind existing inactive work items to maintain in __queue_work()
2353 trace_workqueue_activate_work(work); in __queue_work()
2354 insert_work(pwq, work, &pool->worklist, work_flags); in __queue_work()
2358 insert_work(pwq, work, &pwq->inactive_works, work_flags); in __queue_work()
2366 static bool clear_pending_if_disabled(struct work_struct *work) in clear_pending_if_disabled() argument
2368 unsigned long data = *work_data_bits(work); in clear_pending_if_disabled()
2376 set_work_pool_and_clear_pending(work, offqd.pool_id, in clear_pending_if_disabled()
2382 * queue_work_on - queue work on specific cpu
2383 * @cpu: CPU number to execute work on
2385 * @work: work to queue
2387 * We queue the work to a specific CPU, the caller must ensure it
2393 * Return: %false if @work was already on a queue, %true otherwise.
2396 struct work_struct *work) in queue_work_on() argument
2403 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_work_on()
2404 !clear_pending_if_disabled(work)) { in queue_work_on()
2405 __queue_work(cpu, wq, work); in queue_work_on()
2421 * available CPU if we need to schedule this work.
2444 * queue_work_node - queue work on a "random" cpu for a given NUMA node
2445 * @node: NUMA node that we are targeting the work for
2447 * @work: work to queue
2449 * We queue the work to a "random" CPU within a given NUMA node. The basic
2450 * idea here is to provide a way to somehow associate work with a given
2461 * Return: %false if @work was already on a queue, %true otherwise.
2464 struct work_struct *work) in queue_work_node() argument
2482 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_work_node()
2483 !clear_pending_if_disabled(work)) { in queue_work_node()
2486 __queue_work(cpu, wq, work); in queue_work_node()
2500 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
2508 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
2513 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
2516 * If @delay is 0, queue @dwork->work immediately. This is for in __queue_delayed_work()
2522 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
2546 * queue_delayed_work_on - queue work on specific CPU after delay
2547 * @cpu: CPU number to execute work on
2549 * @dwork: work to queue
2555 * this will prevent queueing of @dwork->work unless the offlined CPU
2558 * Return: %false if @work was already on a queue, %true otherwise. If
2565 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
2572 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_delayed_work_on()
2573 !clear_pending_if_disabled(work)) { in queue_delayed_work_on()
2584 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
2585 * @cpu: CPU number to execute work on
2587 * @dwork: work to queue
2592 * zero, @work is guaranteed to be scheduled immediately regardless of its
2607 ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags); in mod_delayed_work_on()
2609 if (!clear_pending_if_disabled(&dwork->work)) in mod_delayed_work_on()
2623 __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); in rcu_work_rcufn()
2628 * queue_rcu_work - queue work after a RCU grace period
2630 * @rwork: work to queue
2639 struct work_struct *work = &rwork->work; in queue_rcu_work() local
2643 * inside @rwork and disabled the inner work. in queue_rcu_work()
2645 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && in queue_rcu_work()
2646 !WARN_ON_ONCE(clear_pending_if_disabled(work))) { in queue_rcu_work()
2953 * @work: the pool's work for handling these idle workers
2960 * context, hence the split between timer callback and work item.
2962 static void idle_cull_fn(struct work_struct *work) in idle_cull_fn() argument
2964 struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work); in idle_cull_fn()
3024 struct work_struct *work; in pool_mayday_timeout() local
3036 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
3037 send_mayday(get_work_pwq(work)); in pool_mayday_timeout()
3161 * process_one_work - process single work
3163 * @work: work to process
3165 * Process @work. This function contains all the logics necessary to
3166 * process a single work including synchronization against and
3169 * call this function to process a work.
3174 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
3178 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
3189 * work->lockdep_map, make a copy and use that here. in process_one_work()
3193 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
3200 debug_work_deactivate(work); in process_one_work()
3201 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
3202 worker->current_work = work; in process_one_work()
3203 worker->current_func = work->func; in process_one_work()
3208 work_data = *work_data_bits(work); in process_one_work()
3217 list_del_init(&work->entry); in process_one_work()
3223 * execution of the pending work items. in process_one_work()
3231 * chain execution of the pending work items for WORKER_NOT_RUNNING in process_one_work()
3238 * update to @work. Also, do this inside @pool->lock so that in process_one_work()
3242 set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool)); in process_one_work()
3266 * read-recursive acquire on the work(queue) 'locks', but this will then in process_one_work()
3275 trace_workqueue_execute_start(work); in process_one_work()
3276 worker->current_func(work); in process_one_work()
3278 * While we must be careful to not use "work" after this, the trace in process_one_work()
3281 trace_workqueue_execute_end(work, worker->current_func); in process_one_work()
3302 * kernels, where a requeueing work item waiting for something to in process_one_work()
3303 * happen could deadlock with stop_machine as such work item could in process_one_work()
3317 * CPU intensive by wq_worker_tick() if @work hogged CPU longer than in process_one_work()
3341 * may change while processing a work, so this function repeatedly
3342 * fetches a work from the top and executes it.
3350 struct work_struct *work; in process_scheduled_works() local
3353 while ((work = list_first_entry_or_null(&worker->scheduled, in process_scheduled_works()
3359 process_one_work(worker, work); in process_scheduled_works()
3379 * work items regardless of their specific target workqueue. The only
3380 * exception is work items which belong to workqueues with a rescuer which
3420 * preparing to process a work or actually processing it. in worker_thread()
3435 struct work_struct *work = in worker_thread() local
3439 if (assign_work(work, worker, NULL)) in worker_thread()
3446 * pool->lock is held and there's no work to process and no need to in worker_thread()
3463 struct work_struct *work, *n; in assign_rescuer_work() local
3465 /* have work items to rescue? */ in assign_rescuer_work()
3479 * work, and gone idle again due to concurrency limits. in assign_rescuer_work()
3487 * Do more work if memory pressure is still on to reduce in assign_rescuer_work()
3498 work = list_first_entry(&pool->worklist, struct work_struct, entry); in assign_rescuer_work()
3500 work = list_next_entry(cursor, entry); in assign_rescuer_work()
3502 /* find the next work item to rescue */ in assign_rescuer_work()
3503 list_for_each_entry_safe_from(work, n, &pool->worklist, entry) { in assign_rescuer_work()
3504 if (get_work_pwq(work) == pwq && assign_work(work, rescuer, &n)) { in assign_rescuer_work()
3522 * Regular work processing on a pool may block trying to create a new
3554 * shouldn't have any work pending, but @wq->maydays may still have in rescuer_thread()
3556 * all the work items before the rescuer got to them. Go through in rescuer_thread()
3586 * If the per-turn work item limit is reached and other in rescuer_thread()
3657 struct work_struct *work = in bh_worker() local
3661 if (assign_work(work, worker, NULL)) in bh_worker()
3694 struct work_struct work; member
3699 static void drain_dead_softirq_workfn(struct work_struct *work) in drain_dead_softirq_workfn() argument
3702 container_of(work, struct wq_drain_dead_softirq_work, work); in drain_dead_softirq_workfn()
3707 * @pool's CPU is dead and we want to execute its still pending work in drain_dead_softirq_workfn()
3708 * items from this BH work item which is running on a different CPU. As in drain_dead_softirq_workfn()
3709 * its CPU is dead, @pool can't be kicked and, as work execution path in drain_dead_softirq_workfn()
3726 * still are pending work items, reschedule self and return so that we in drain_dead_softirq_workfn()
3731 queue_work(system_bh_highpri_wq, work); in drain_dead_softirq_workfn()
3733 queue_work(system_bh_wq, work); in drain_dead_softirq_workfn()
3740 * @cpu is dead. Drain the remaining BH work items on the current CPU. It's
3744 * on). Let's keep it simple and drain them synchronously. These are BH work
3758 INIT_WORK_ONSTACK(&dead_work.work, drain_dead_softirq_workfn); in workqueue_softirq_dead()
3763 queue_work(system_bh_highpri_wq, &dead_work.work); in workqueue_softirq_dead()
3765 queue_work(system_bh_wq, &dead_work.work); in workqueue_softirq_dead()
3768 destroy_work_on_stack(&dead_work.work); in workqueue_softirq_dead()
3775 * @target_work: work item being flushed (NULL for workqueue flushes)
3776 * @from_cancel: are we called from the work cancel path
3779 * If this is not the cancel path (which implies work being flushed is either
3809 struct work_struct work; member
3814 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
3816 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
3821 * insert_wq_barrier - insert a barrier work
3824 * @target: target work to attach @barr to
3833 * try_to_grab_pending() can't determine whether the work to be
3835 * flag of the previous work while there must be a valid next work
3836 * after a work with LINKED flag set.
3863 INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func, in insert_wq_barrier()
3865 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
3871 /* The barrier work item does not participate in nr_active. */ in insert_wq_barrier()
3894 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
3901 * @work_color: new work color, < 0 for no-op
3999 static void touch_work_lockdep_map(struct work_struct *work, in touch_work_lockdep_map() argument
4006 lock_map_acquire(&work->lockdep_map); in touch_work_lockdep_map()
4007 lock_map_release(&work->lockdep_map); in touch_work_lockdep_map()
4015 * __flush_workqueue - ensure that any scheduled work has run to completion.
4018 * This function sleeps until all work items which were queued on entry
4177 * work items on @wq can queue further work items on it. @wq is flushed
4226 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, in start_flush_work() argument
4235 pool = get_work_pool(work); in start_flush_work()
4243 pwq = get_work_pwq(work); in start_flush_work()
4248 worker = find_worker_executing_work(pool, work); in start_flush_work()
4255 check_flush_dependency(wq, work, from_cancel); in start_flush_work()
4257 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
4260 touch_work_lockdep_map(work, wq); in start_flush_work()
4266 * For single threaded workqueues the deadlock happens when the work in start_flush_work()
4267 * is after the work issuing the flush_work(). For rescuer equipped in start_flush_work()
4282 static bool __flush_work(struct work_struct *work, bool from_cancel) in __flush_work() argument
4289 if (WARN_ON(!work->func)) in __flush_work()
4292 if (!start_flush_work(work, &barr, from_cancel)) in __flush_work()
4297 * that @work must have been executing during start_flush_work() and in __flush_work()
4298 * can't currently be queued. Its data must contain OFFQ bits. If @work in __flush_work()
4303 unsigned long data = *work_data_bits(work); in __flush_work()
4317 pool = get_work_pool(work); in __flush_work()
4331 destroy_work_on_stack(&barr.work); in __flush_work()
4336 * flush_work - wait for a work to finish executing the last queueing instance
4337 * @work: the work to flush
4339 * Wait until @work has finished execution. @work is guaranteed to be idle
4343 * %true if flush_work() waited for the work to finish execution,
4346 bool flush_work(struct work_struct *work) in flush_work() argument
4349 return __flush_work(work, false); in flush_work()
4355 * @dwork: the delayed work to flush
4357 * Delayed timer is cancelled and the pending work is queued for
4362 * %true if flush_work() waited for the work to finish execution,
4369 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
4371 return flush_work(&dwork->work); in flush_delayed_work()
4377 * @rwork: the rcu work to flush
4380 * %true if flush_rcu_work() waited for the work to finish execution,
4385 if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) { in flush_rcu_work()
4387 flush_work(&rwork->work); in flush_rcu_work()
4390 return flush_work(&rwork->work); in flush_rcu_work()
4402 WARN_ONCE(true, "workqueue: work disable count overflowed\n"); in work_offqd_disable()
4410 WARN_ONCE(true, "workqueue: work disable count underflowed\n"); in work_offqd_enable()
4413 static bool __cancel_work(struct work_struct *work, u32 cflags) in __cancel_work() argument
4419 ret = work_grab_pending(work, cflags, &irq_flags); in __cancel_work()
4421 work_offqd_unpack(&offqd, *work_data_bits(work)); in __cancel_work()
4426 set_work_pool_and_clear_pending(work, offqd.pool_id, in __cancel_work()
4432 static bool __cancel_work_sync(struct work_struct *work, u32 cflags) in __cancel_work_sync() argument
4436 ret = __cancel_work(work, cflags | WORK_CANCEL_DISABLE); in __cancel_work_sync()
4438 if (*work_data_bits(work) & WORK_OFFQ_BH) in __cancel_work_sync()
4444 * Skip __flush_work() during early boot when we know that @work isn't in __cancel_work_sync()
4448 __flush_work(work, true); in __cancel_work_sync()
4451 enable_work(work); in __cancel_work_sync()
4459 bool cancel_work(struct work_struct *work) in cancel_work() argument
4461 return __cancel_work(work, 0); in cancel_work()
4466 * cancel_work_sync - cancel a work and wait for it to finish
4467 * @work: the work to cancel
4469 * Cancel @work and wait for its execution to finish. This function can be used
4470 * even if the work re-queues itself or migrates to another workqueue. On return
4471 * from this function, @work is guaranteed to be not pending or executing on any
4474 * cancel_work_sync(&delayed_work->work) must not be used for delayed_work's.
4477 * Must be called from a sleepable context if @work was last queued on a non-BH
4479 * if @work was last queued on a BH workqueue.
4481 * Returns %true if @work was pending, %false otherwise.
4483 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
4485 return __cancel_work_sync(work, 0); in cancel_work_sync()
4490 * cancel_delayed_work - cancel a delayed work
4499 * The work callback function may still be running on return, unless
4500 * it returns %true and the work doesn't re-arm itself. Explicitly flush or
4507 return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED); in cancel_delayed_work()
4512 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
4513 * @dwork: the delayed work cancel
4522 return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED); in cancel_delayed_work_sync()
4527 * disable_work - Disable and cancel a work item
4528 * @work: work item to disable
4530 * Disable @work by incrementing its disable count and cancel it if currently
4531 * pending. As long as the disable count is non-zero, any attempt to queue @work
4535 * Can be called from any context. Returns %true if @work was pending, %false
4538 bool disable_work(struct work_struct *work) in disable_work() argument
4540 return __cancel_work(work, WORK_CANCEL_DISABLE); in disable_work()
4545 * disable_work_sync - Disable, cancel and drain a work item
4546 * @work: work item to disable
4548 * Similar to disable_work() but also wait for @work to finish if currently
4551 * Must be called from a sleepable context if @work was last queued on a non-BH
4553 * if @work was last queued on a BH workqueue.
4555 * Returns %true if @work was pending, %false otherwise.
4557 bool disable_work_sync(struct work_struct *work) in disable_work_sync() argument
4559 return __cancel_work_sync(work, WORK_CANCEL_DISABLE); in disable_work_sync()
4564 * enable_work - Enable a work item
4565 * @work: work item to enable
4567 * Undo disable_work[_sync]() by decrementing @work's disable count. @work can
4573 bool enable_work(struct work_struct *work) in enable_work() argument
4578 work_grab_pending(work, 0, &irq_flags); in enable_work()
4580 work_offqd_unpack(&offqd, *work_data_bits(work)); in enable_work()
4582 set_work_pool_and_clear_pending(work, offqd.pool_id, in enable_work()
4591 * disable_delayed_work - Disable and cancel a delayed work item
4592 * @dwork: delayed work item to disable
4594 * disable_work() for delayed work items.
4598 return __cancel_work(&dwork->work, in disable_delayed_work()
4604 * disable_delayed_work_sync - Disable, cancel and drain a delayed work item
4605 * @dwork: delayed work item to disable
4607 * disable_work_sync() for delayed work items.
4611 return __cancel_work_sync(&dwork->work, in disable_delayed_work_sync()
4617 * enable_delayed_work - Enable a delayed work item
4618 * @dwork: delayed work item to enable
4620 * enable_work() for delayed work items.
4624 return enable_work(&dwork->work); in enable_delayed_work()
4651 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
4653 INIT_WORK(work, func); in schedule_on_each_cpu()
4654 schedule_work_on(cpu, work); in schedule_on_each_cpu()
4668 * @ew: guaranteed storage for the execute work structure (must
4669 * be available when the work executes)
4680 fn(&ew->work); in execute_in_process_context()
4684 INIT_WORK(&ew->work, fn); in execute_in_process_context()
4685 schedule_work(&ew->work); in execute_in_process_context()
5042 * which implies no work queued to the pool, which implies no worker can in put_unbound_pool()
5151 static void pwq_release_workfn(struct kthread_work *work) in pwq_release_workfn() argument
5153 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_release_workfn()
5223 * Set the dummy cursor work with valid function and get_work_pwq(). in init_pwq()
5225 * The cursor work should only be in the pwq->pool->worklist, and in init_pwq()
5226 * should not be treated as a processable work item. in init_pwq()
5413 * of newly queued work items until execution of older work items in in apply_wqattrs_prepare()
5477 * work items are affine to the pod it was issued on. Older pwqs are released as
5478 * in-flight work items finish. Note that a work item which repeatedly requeues
5511 * executing the work items for the workqueue will lose their CPU affinity and
5514 * responsibility to flush the work item from CPU_DOWN_PREPARE.
5697 * activate inactive work items accordingly. If @wq is freezing, clear
5719 * Update @wq->max/min_active and then kick inactive work items if more in wq_adjust_max_active()
5720 * active work items are allowed. This doesn't break work item ordering in wq_adjust_max_active()
5721 * because new work items are always queued behind existing inactive in wq_adjust_max_active()
5722 * work items if there are any. in wq_adjust_max_active()
5734 * Round-robin through pwq's activating the first inactive work item in wq_adjust_max_active()
5937 * Safely destroy a workqueue. All work currently pending will be done first.
5939 * This function does NOT guarantee that non-pending work that has been
6066 * interdependent work items. Instead, an unbound workqueue is guaranteed to be
6067 * able to process min_active number of interdependent work items which is
6087 * current_work - retrieve %current task's work struct
6092 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
6106 * work functions to determine whether it's being run off the rescuer task.
6156 * work_busy - test whether a work is currently pending or running
6157 * @work: the work to be tested
6159 * Test whether @work is currently pending or running. There is no
6166 unsigned int work_busy(struct work_struct *work) in work_busy() argument
6172 if (work_pending(work)) in work_busy()
6176 pool = get_work_pool(work); in work_busy()
6179 if (find_worker_executing_work(pool, work)) in work_busy()
6190 * set_worker_desc - set description for the current work item
6194 * This function can be called by a running work function to describe what
6195 * the work item is about. If the worker task gets dumped, this
6217 * If @task is a worker and currently executing a work item, print out the
6219 * set_worker_desc() by the currently executing work item.
6313 static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp) in pr_cont_work() argument
6315 if (work->func == wq_barrier_func) { in pr_cont_work()
6318 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
6326 pr_cont_work_flush(comma, work->func, pcwsp); in pr_cont_work()
6334 struct work_struct *work; in show_pwq() local
6365 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
6366 pr_cont_work(false, work, &pcws); in show_pwq()
6373 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6374 if (get_work_pwq(work) == pwq) { in show_pwq()
6383 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
6384 if (get_work_pwq(work) != pwq) in show_pwq()
6387 pr_cont_work(comma, work, &pcws); in show_pwq()
6388 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
6398 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
6399 pr_cont_work(comma, work, &pcws); in show_pwq()
6400 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
6433 * drivers that queue work while holding locks in show_one_workqueue()
6466 /* How long the first pending work is waiting for a worker. */ in show_one_worker_pool()
6472 * queue work while holding locks also taken in their write in show_one_worker_pool()
6590 * are a lot of assumptions on strong associations among work, pwq and
6636 * unbound chain execution of currently pending work items. in unbind_workers()
6826 struct work_struct work; member
6832 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
6834 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
6856 INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key); in work_on_cpu_key()
6857 schedule_work_on(cpu, &wfc.work); in work_on_cpu_key()
6858 flush_work(&wfc.work); in work_on_cpu_key()
6859 destroy_work_on_stack(&wfc.work); in work_on_cpu_key()
7124 * max_active RW int : maximum number of in-flight work items
7556 * flush dependency, a concurrency managed work item which stays RUNNING
7585 * Show workers that might prevent the processing of pending work items.
7601 * drivers that queue work while holding locks in show_cpu_pool_busy_workers()
7860 * boot code to create workqueues and queue/cancel work items. Actual work item
8016 * been created and work items queued on them, but there are no kworkers
8017 * executing the work items yet. Populate the worker pools with the initial