Lines Matching full:work

3  * workqueue.h --- work queue handling for Linux.
20 * The first word is the work queue pointer and the flags rolled into
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
27 WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */
29 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
68 * When a work item is off queue, its high bits point to the last
98 struct work_struct work; member
101 /* target workqueue and CPU ->timer uses to queue ->work */
107 struct work_struct work; member
110 /* target workqueue ->rcu uses to queue ->work */
139 * Work items in this workqueue are affine to these CPUs and not allowed
177 * CPU pods are used to improve execution locality of unbound work
187 * @ordered: work items must be executed one by one in queueing order
192 static inline struct delayed_work *to_delayed_work(struct work_struct *work) in to_delayed_work() argument
194 return container_of(work, struct delayed_work, work); in to_delayed_work()
197 static inline struct rcu_work *to_rcu_work(struct work_struct *work) in to_rcu_work() argument
199 return container_of(work, struct rcu_work, work); in to_rcu_work()
203 struct work_struct work; member
226 .work = __WORK_INITIALIZER((n).work, (f)), \
241 extern void __init_work(struct work_struct *work, int onstack);
242 extern void destroy_work_on_stack(struct work_struct *work);
243 extern void destroy_delayed_work_on_stack(struct delayed_work *work);
244 static inline unsigned int work_static(struct work_struct *work) in work_static() argument
246 return *work_data_bits(work) & WORK_STRUCT_STATIC; in work_static()
249 static inline void __init_work(struct work_struct *work, int onstack) { } in __init_work() argument
250 static inline void destroy_work_on_stack(struct work_struct *work) { } in destroy_work_on_stack() argument
251 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } in destroy_delayed_work_on_stack() argument
252 static inline unsigned int work_static(struct work_struct *work) { return 0; } in work_static() argument
256 * initialize all of a work item in one go
259 * assignment of the work data initializer allows the compiler
299 INIT_WORK(&(_work)->work, (_func)); \
307 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
326 INIT_WORK(&(_work)->work, (_func))
329 INIT_WORK_ONSTACK(&(_work)->work, (_func))
332 * work_pending - Find out whether a work item is currently pending
333 * @work: The work item in question
335 #define work_pending(work) \ argument
336 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
339 * delayed_work_pending - Find out whether a delayable work item is currently
341 * @w: The work item in question
344 work_pending(&(w)->work)
367 * however, for example, a per-cpu work item scheduled from an
369 * execute the work item on that CPU breaking the idleness, which in
404 * system_highpri_wq is similar to system_wq but for work items which
436 * @max_active: max in-flight work items per CPU, 0 for default
456 * most one work item at any given time in the queued order. They are
483 struct work_struct *work);
485 struct work_struct *work);
487 struct delayed_work *work, unsigned long delay);
499 extern bool flush_work(struct work_struct *work);
500 extern bool cancel_work(struct work_struct *work);
501 extern bool cancel_work_sync(struct work_struct *work);
514 extern unsigned int work_busy(struct work_struct *work);
523 * queue_work - queue work on a workqueue
525 * @work: work to queue
527 * Returns %false if @work was already on a queue, %true otherwise.
529 * We queue the work to the CPU on which it was submitted, but if the CPU dies
534 * the CPU which will execute @work by the time such work executes, e.g.,
540 * WRITE_ONCE(x, 1); [ @work is being executed ]
541 * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
546 struct work_struct *work) in queue_work() argument
548 return queue_work_on(WORK_CPU_UNBOUND, wq, work); in queue_work()
552 * queue_delayed_work - queue work on a workqueue after delay
554 * @dwork: delayable work to queue
567 * mod_delayed_work - modify delay of or queue a delayed work
569 * @dwork: work to queue
582 * schedule_work_on - put work task on a specific cpu
583 * @cpu: cpu to put the work task on
584 * @work: job to be done
588 static inline bool schedule_work_on(int cpu, struct work_struct *work) in schedule_work_on() argument
590 return queue_work_on(cpu, system_wq, work); in schedule_work_on()
594 * schedule_work - put work task in global workqueue
595 * @work: job to be done
597 * Returns %false if @work was already on the kernel-global workqueue and
607 static inline bool schedule_work(struct work_struct *work) in schedule_work() argument
609 return queue_work(system_wq, work); in schedule_work()
652 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
667 * schedule_delayed_work - put work task in global workqueue after delay
693 * A new key is defined for each caller to make sure the work
707 * A new key is defined for each caller to make sure the work