Lines Matching full:work
30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument
34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); in irq_work_claim()
36 * If the work is already pending, no need to raise the IPI. in irq_work_claim()
52 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument
55 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local()
56 if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local()
57 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local()
61 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) in __irq_work_queue_local()
66 /* Enqueue the irq work @work on the current CPU */
67 bool irq_work_queue(struct irq_work *work) in irq_work_queue() argument
70 if (!irq_work_claim(work)) in irq_work_queue()
75 __irq_work_queue_local(work); in irq_work_queue()
83 * Enqueue the irq_work @work on @cpu unless it's already pending
88 bool irq_work_queue_on(struct irq_work *work, int cpu) in irq_work_queue_on() argument
91 return irq_work_queue(work); in irq_work_queue_on()
94 /* All work should have been flushed before going offline */ in irq_work_queue_on()
98 if (!irq_work_claim(work)) in irq_work_queue_on()
105 __smp_call_single_queue(cpu, &work->llnode); in irq_work_queue_on()
107 __irq_work_queue_local(work); in irq_work_queue_on()
127 /* All work should have been flushed before going offline */ in irq_work_needs_cpu()
135 struct irq_work *work = arg; in irq_work_single() local
139 * Clear the PENDING bit, after this point the @work in irq_work_single()
142 * to claim that work don't rely on us to handle their data in irq_work_single()
145 flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags); in irq_work_single()
147 lockdep_irq_work_enter(work); in irq_work_single()
148 work->func(work); in irq_work_single()
149 lockdep_irq_work_exit(work); in irq_work_single()
155 (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); in irq_work_single()
160 struct irq_work *work, *tmp; in irq_work_run_list() local
169 llist_for_each_entry_safe(work, tmp, llnode, llnode) in irq_work_run_list()
170 irq_work_single(work); in irq_work_run_list()
197 void irq_work_sync(struct irq_work *work) in irq_work_sync() argument
201 while (atomic_read(&work->flags) & IRQ_WORK_BUSY) in irq_work_sync()