Lines Matching full:work
30 /* List head pointing to ordered work list */
61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument
63 return work->wq->fs_info; in btrfs_work_owner()
173 * Hook for threshold which will be called before executing the work,
224 struct btrfs_work *work; in run_ordered_work() local
233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work()
235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work()
240 * we leave the work item on the list as a barrier so in run_ordered_work()
241 * that later work items that are done don't have their in run_ordered_work()
244 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) in run_ordered_work()
246 trace_btrfs_ordered_sched(work); in run_ordered_work()
248 work->ordered_func(work); in run_ordered_work()
252 list_del(&work->ordered_list); in run_ordered_work()
255 if (work == self) { in run_ordered_work()
257 * This is the work item that the worker is currently in run_ordered_work()
261 * of work items. I.e., if a work item with the same in run_ordered_work()
262 * address and work function is queued twice, the second in run_ordered_work()
264 * work item may be freed and recycled with the same in run_ordered_work()
265 * work function; the workqueue code assumes that the in run_ordered_work()
266 * original work item cannot depend on the recycled work in run_ordered_work()
269 * Note that different types of Btrfs work can depend on in run_ordered_work()
270 * each other, and one type of work on one Btrfs in run_ordered_work()
271 * filesystem may even depend on the same type of work in run_ordered_work()
273 * Therefore, we must not allow the current work item to in run_ordered_work()
283 work->ordered_free(work); in run_ordered_work()
284 /* NB: work must not be dereferenced past this point. */ in run_ordered_work()
285 trace_btrfs_all_work_done(wq->fs_info, work); in run_ordered_work()
299 struct btrfs_work *work = container_of(normal_work, struct btrfs_work, in btrfs_work_helper() local
305 * We should not touch things inside work in the following cases: in btrfs_work_helper()
306 * 1) after work->func() if it has no ordered_free in btrfs_work_helper()
307 * Since the struct is freed in work->func(). in btrfs_work_helper()
309 * The work may be freed in other threads almost instantly. in btrfs_work_helper()
312 if (work->ordered_func) in btrfs_work_helper()
314 wq = work->wq; in btrfs_work_helper()
316 trace_btrfs_work_sched(work); in btrfs_work_helper()
318 work->func(work); in btrfs_work_helper()
320 set_bit(WORK_DONE_BIT, &work->flags); in btrfs_work_helper()
321 run_ordered_work(wq, work); in btrfs_work_helper()
323 /* NB: work must not be dereferenced past this point. */ in btrfs_work_helper()
324 trace_btrfs_all_work_done(wq->fs_info, work); in btrfs_work_helper()
328 void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, in btrfs_init_work() argument
331 work->func = func; in btrfs_init_work()
332 work->ordered_func = ordered_func; in btrfs_init_work()
333 work->ordered_free = ordered_free; in btrfs_init_work()
334 INIT_WORK(&work->normal_work, btrfs_work_helper); in btrfs_init_work()
335 INIT_LIST_HEAD(&work->ordered_list); in btrfs_init_work()
336 work->flags = 0; in btrfs_init_work()
340 struct btrfs_work *work) in __btrfs_queue_work() argument
344 work->wq = wq; in __btrfs_queue_work()
346 if (work->ordered_func) { in __btrfs_queue_work()
348 list_add_tail(&work->ordered_list, &wq->ordered_list); in __btrfs_queue_work()
351 trace_btrfs_work_queued(work); in __btrfs_queue_work()
352 queue_work(wq->normal_wq, &work->normal_work); in __btrfs_queue_work()
356 struct btrfs_work *work) in btrfs_queue_work() argument
360 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) in btrfs_queue_work()
364 __btrfs_queue_work(dest_wq, work); in btrfs_queue_work()
394 void btrfs_set_work_high_priority(struct btrfs_work *work) in btrfs_set_work_high_priority() argument
396 set_bit(WORK_HIGH_PRIO_BIT, &work->flags); in btrfs_set_work_high_priority()