Lines Matching full:work
307 * functions which do some additional work in non-modular code such as
795 * when they finish. There is defined a safe point for freezing when one work
804 struct kthread_work *work; in kthread_worker_fn() local
827 work = NULL; in kthread_worker_fn()
830 work = list_first_entry(&worker->work_list, in kthread_worker_fn()
832 list_del_init(&work->node); in kthread_worker_fn()
834 worker->current_work = work; in kthread_worker_fn()
837 if (work) { in kthread_worker_fn()
838 kthread_work_func_t func = work->func; in kthread_worker_fn()
840 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn()
841 work->func(work); in kthread_worker_fn()
843 * Avoid dereferencing work after this point. The trace in kthread_worker_fn()
846 trace_sched_kthread_work_execute_end(work, func); in kthread_worker_fn()
932 * how to handle pending work items, prevent queuing new ones, and
965 * Returns true when the work could not be queued at the moment.
970 struct kthread_work *work) in queuing_blocked() argument
974 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
978 struct kthread_work *work) in kthread_insert_work_sanity_check() argument
981 WARN_ON_ONCE(!list_empty(&work->node)); in kthread_insert_work_sanity_check()
982 /* Do not use a work with >1 worker, see kthread_queue_work() */ in kthread_insert_work_sanity_check()
983 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
986 /* insert @work before @pos in @worker */
988 struct kthread_work *work, in kthread_insert_work() argument
991 kthread_insert_work_sanity_check(worker, work); in kthread_insert_work()
993 trace_sched_kthread_work_queue_work(worker, work); in kthread_insert_work()
995 list_add_tail(&work->node, pos); in kthread_insert_work()
996 work->worker = worker; in kthread_insert_work()
1004 * @work: kthread_work to queue
1006 * Queue @work to work processor @task for async execution. @task
1008 * if @work was successfully queued, %false if it was already pending.
1010 * Reinitialize the work if it needs to be used by another worker.
1014 struct kthread_work *work) in kthread_queue_work() argument
1020 if (!queuing_blocked(worker, work)) { in kthread_queue_work()
1021 kthread_insert_work(worker, work, &worker->work_list); in kthread_queue_work()
1031 * delayed work when the timer expires.
1040 struct kthread_work *work = &dwork->work; in kthread_delayed_work_timer_fn() local
1041 struct kthread_worker *worker = work->worker; in kthread_delayed_work_timer_fn()
1045 * This might happen when a pending work is reinitialized. in kthread_delayed_work_timer_fn()
1052 /* Work must not be used with >1 worker, see kthread_queue_work(). */ in kthread_delayed_work_timer_fn()
1053 WARN_ON_ONCE(work->worker != worker); in kthread_delayed_work_timer_fn()
1055 /* Move the work from worker->delayed_work_list. */ in kthread_delayed_work_timer_fn()
1056 WARN_ON_ONCE(list_empty(&work->node)); in kthread_delayed_work_timer_fn()
1057 list_del_init(&work->node); in kthread_delayed_work_timer_fn()
1058 if (!work->canceling) in kthread_delayed_work_timer_fn()
1059 kthread_insert_work(worker, work, &worker->work_list); in kthread_delayed_work_timer_fn()
1070 struct kthread_work *work = &dwork->work; in __kthread_queue_delayed_work() local
1075 * If @delay is 0, queue @dwork->work immediately. This is for in __kthread_queue_delayed_work()
1081 kthread_insert_work(worker, work, &worker->work_list); in __kthread_queue_delayed_work()
1086 kthread_insert_work_sanity_check(worker, work); in __kthread_queue_delayed_work()
1088 list_add(&work->node, &worker->delayed_work_list); in __kthread_queue_delayed_work()
1089 work->worker = worker; in __kthread_queue_delayed_work()
1095 * kthread_queue_delayed_work - queue the associated kthread work
1101 * If the work has not been pending it starts a timer that will queue
1102 * the work after the given @delay. If @delay is zero, it queues the
1103 * work immediately.
1105 * Return: %false if the @work has already been pending. It means that
1106 * either the timer was running or the work was queued. It returns %true
1113 struct kthread_work *work = &dwork->work; in kthread_queue_delayed_work() local
1119 if (!queuing_blocked(worker, work)) { in kthread_queue_delayed_work()
1130 struct kthread_work work; member
1134 static void kthread_flush_work_fn(struct kthread_work *work) in kthread_flush_work_fn() argument
1137 container_of(work, struct kthread_flush_work, work); in kthread_flush_work_fn()
1143 * @work: work to flush
1145 * If @work is queued or executing, wait for it to finish execution.
1147 void kthread_flush_work(struct kthread_work *work) in kthread_flush_work() argument
1150 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_work()
1156 worker = work->worker; in kthread_flush_work()
1161 /* Work must not be used with >1 worker, see kthread_queue_work(). */ in kthread_flush_work()
1162 WARN_ON_ONCE(work->worker != worker); in kthread_flush_work()
1164 if (!list_empty(&work->node)) in kthread_flush_work()
1165 kthread_insert_work(worker, &fwork.work, work->node.next); in kthread_flush_work()
1166 else if (worker->current_work == work) in kthread_flush_work()
1167 kthread_insert_work(worker, &fwork.work, in kthread_flush_work()
1181 * not manipulate the work list_head any longer.
1186 static void kthread_cancel_delayed_work_timer(struct kthread_work *work, in kthread_cancel_delayed_work_timer() argument
1190 container_of(work, struct kthread_delayed_work, work); in kthread_cancel_delayed_work_timer()
1191 struct kthread_worker *worker = work->worker; in kthread_cancel_delayed_work_timer()
1199 work->canceling++; in kthread_cancel_delayed_work_timer()
1203 work->canceling--; in kthread_cancel_delayed_work_timer()
1207 * This function removes the work from the worker queue.
1210 * the timer used by delayed work is not running, e.g. by calling
1213 * The work might still be in use when this function finishes. See the
1216 * Return: %true if @work was pending and successfully canceled,
1217 * %false if @work was not pending
1219 static bool __kthread_cancel_work(struct kthread_work *work) in __kthread_cancel_work() argument
1222 * Try to remove the work from a worker list. It might either in __kthread_cancel_work()
1225 if (!list_empty(&work->node)) { in __kthread_cancel_work()
1226 list_del_init(&work->node); in __kthread_cancel_work()
1234 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1236 * @dwork: kthread delayed work to queue
1241 * @work is guaranteed to be queued immediately.
1245 * A special case is when the work is being canceled in parallel.
1260 struct kthread_work *work = &dwork->work; in kthread_mod_delayed_work() local
1267 if (!work->worker) { in kthread_mod_delayed_work()
1272 /* Work must not be used with >1 worker, see kthread_queue_work() */ in kthread_mod_delayed_work()
1273 WARN_ON_ONCE(work->worker != worker); in kthread_mod_delayed_work()
1276 * Temporary cancel the work but do not fight with another command in kthread_mod_delayed_work()
1277 * that is canceling the work as well. in kthread_mod_delayed_work()
1283 * when doing so. But the work can be removed from the queue (list) in kthread_mod_delayed_work()
1287 kthread_cancel_delayed_work_timer(work, &flags); in kthread_mod_delayed_work()
1288 if (work->canceling) { in kthread_mod_delayed_work()
1293 ret = __kthread_cancel_work(work); in kthread_mod_delayed_work()
1303 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) in __kthread_cancel_work_sync() argument
1305 struct kthread_worker *worker = work->worker; in __kthread_cancel_work_sync()
1313 /* Work must not be used with >1 worker, see kthread_queue_work(). */ in __kthread_cancel_work_sync()
1314 WARN_ON_ONCE(work->worker != worker); in __kthread_cancel_work_sync()
1317 kthread_cancel_delayed_work_timer(work, &flags); in __kthread_cancel_work_sync()
1319 ret = __kthread_cancel_work(work); in __kthread_cancel_work_sync()
1321 if (worker->current_work != work) in __kthread_cancel_work_sync()
1325 * The work is in progress and we need to wait with the lock released. in __kthread_cancel_work_sync()
1328 work->canceling++; in __kthread_cancel_work_sync()
1330 kthread_flush_work(work); in __kthread_cancel_work_sync()
1332 work->canceling--; in __kthread_cancel_work_sync()
1341 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1342 * @work: the kthread work to cancel
1344 * Cancel @work and wait for its execution to finish. This function
1345 * can be used even if the work re-queues itself. On return from this
1346 * function, @work is guaranteed to be not pending or executing on any CPU.
1348 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1351 * The caller must ensure that the worker on which @work was last
1354 * Return: %true if @work was pending, %false otherwise.
1356 bool kthread_cancel_work_sync(struct kthread_work *work) in kthread_cancel_work_sync() argument
1358 return __kthread_cancel_work_sync(work, false); in kthread_cancel_work_sync()
1363 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1365 * @dwork: the kthread delayed work to cancel
1373 return __kthread_cancel_work_sync(&dwork->work, true); in kthread_cancel_delayed_work_sync()
1387 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), in kthread_flush_worker()
1391 kthread_queue_work(worker, &fwork.work); in kthread_flush_worker()
1404 * Note that this function is not responsible for handling delayed work, so
1405 * caller should be responsible for queuing or canceling all delayed work items