| /linux/io_uring/ |
| H A D | io-wq.c | 146 static void io_wq_dec_running(struct io_worker *worker); 163 static bool io_worker_get(struct io_worker *worker) in io_worker_get() argument 165 return refcount_inc_not_zero(&worker->ref); in io_worker_get() 168 static void io_worker_release(struct io_worker *worker) in io_worker_release() argument 170 if (refcount_dec_and_test(&worker->ref)) in io_worker_release() 171 complete(&worker->ref_done); in io_worker_release() 185 static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker) in io_wq_get_acct() argument 187 return worker->acct; in io_wq_get_acct() 198 struct io_worker *worker = current->worker_private; in io_wq_worker_stopped() local 203 return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state); in io_wq_worker_stopped() [all …]
|
| /linux/kernel/ |
| H A D | kthread.c | 943 void __kthread_init_worker(struct kthread_worker *worker, in __kthread_init_worker() argument 947 memset(worker, 0, sizeof(struct kthread_worker)); in __kthread_init_worker() 948 raw_spin_lock_init(&worker->lock); in __kthread_init_worker() 949 lockdep_set_class_and_name(&worker->lock, key, name); in __kthread_init_worker() 950 INIT_LIST_HEAD(&worker->work_list); in __kthread_init_worker() 951 INIT_LIST_HEAD(&worker->delayed_work_list); in __kthread_init_worker() 972 struct kthread_worker *worker = worker_ptr; in kthread_worker_fn() local 979 WARN_ON(worker->task && worker->task != current); in kthread_worker_fn() 980 worker->task = current; in kthread_worker_fn() 982 if (worker->flags & KTW_FREEZABLE) in kthread_worker_fn() [all …]
|
| H A D | workqueue.c | 217 struct worker *manager; /* L: purely informational */ 350 struct worker *rescuer; /* MD: rescue worker */ 587 #define for_each_pool_worker(worker, pool) \ argument 588 list_for_each_entry((worker), &(pool)->workers, node) \ 978 static inline void worker_set_flags(struct worker *worker, unsigned int flags) in worker_set_flags() argument 980 struct worker_pool *pool = worker->pool; in worker_set_flags() 986 !(worker->flags & WORKER_NOT_RUNNING)) { in worker_set_flags() 990 worker->flags |= flags; in worker_set_flags() 1000 static inline void worker_clr_flags(struct worker *worker, unsigned int flags) in worker_clr_flags() argument 1002 struct worker_pool *pool = worker->pool; in worker_clr_flags() [all …]
|
| /linux/tools/perf/bench/ |
| H A D | futex-hash.c | 44 struct worker { struct 76 struct worker *w = (struct worker *) arg; in workerfn() 134 struct worker *worker = NULL; in bench_futex_hash() local 162 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_hash() 163 if (!worker) in bench_futex_hash() 188 worker[i].tid = i; in bench_futex_hash() 189 worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex)); in bench_futex_hash() 190 if (!worker[i].futex) in bench_futex_hash() 201 ret = pthread_create(&worker[i].thread, &thread_attr, workerfn, in bench_futex_hash() 202 (void *)(struct worker *) &worker[i]); in bench_futex_hash() [all …]
|
| H A D | epoll-wait.c | 118 struct worker { struct 186 struct worker *w = (struct worker *) arg; in workerfn() 240 static void nest_epollfd(struct worker *w) in nest_epollfd() 292 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) in do_threads() argument 318 struct worker *w = &worker[i]; in do_threads() 366 (void *)(struct worker *) w); in do_threads() 382 struct worker *worker = p; in writerfn() local 394 shuffle((void *)worker, nthreads, sizeof(*worker)); in writerfn() 398 struct worker *w = &worker[i]; in writerfn() 421 struct worker *w1 = (struct worker *) p1; in cmpworker() [all …]
|
| H A D | futex-lock-pi.c | 27 struct worker { struct 35 static struct worker *worker; argument 87 struct worker *w = (struct worker *) arg; in workerfn() 124 static void create_threads(struct worker *w, struct perf_cpu_map *cpu) in create_threads() 141 worker[i].tid = i; in create_threads() 144 worker[i].futex = calloc(1, sizeof(u_int32_t)); in create_threads() 145 if (!worker[i].futex) in create_threads() 148 worker[i].futex = &global_futex; in create_threads() 158 if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) { in create_threads() 195 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_lock_pi() [all …]
|
| H A D | epoll-ctl.c | 67 struct worker { struct 132 static inline void do_epoll_op(struct worker *w, int op, int fd) in do_epoll_op() 160 static inline void do_random_epoll_op(struct worker *w) in do_random_epoll_op() 174 struct worker *w = (struct worker *) arg; in workerfn() 204 static void init_fdmaps(struct worker *w, int pct) in init_fdmaps() 223 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu) in do_threads() argument 241 struct worker *w = &worker[i]; in do_threads() 277 (void *)(struct worker *) w); in do_threads() 317 struct worker *worker = NULL; in bench_epoll_ctl() local 352 worker = calloc(nthreads, sizeof(*worker)); in bench_epoll_ctl() [all …]
|
| H A D | sched-messaging.c | 150 static void create_thread_worker(union messaging_worker *worker, in create_thread_worker() argument 164 ret = pthread_create(&worker->thread, &attr, func, ctx); in create_thread_worker() 171 static void create_process_worker(union messaging_worker *worker, in create_process_worker() argument 175 worker->pid = fork(); in create_process_worker() 177 if (worker->pid == -1) { in create_process_worker() 179 } else if (worker->pid == 0) { in create_process_worker() 185 static void create_worker(union messaging_worker *worker, in create_worker() argument 189 return create_process_worker(worker, ctx, func); in create_worker() 191 return create_thread_worker(worker, ctx, func); in create_worker() 194 static void reap_worker(union messaging_worker *worker) in reap_worker() argument [all …]
|
| H A D | futex-wake.c | 36 static pthread_t *worker; variable 172 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_wake() 173 if (!worker) in bench_futex_wake() 195 block_threads(worker, cpu); in bench_futex_wake() 224 ret = pthread_join(worker[i], NULL); in bench_futex_wake() 238 free(worker); in bench_futex_wake()
|
| H A D | futex-requeue.c | 36 static pthread_t *worker; variable 197 worker = calloc(params.nthreads, sizeof(*worker)); in bench_futex_requeue() 198 if (!worker) in bench_futex_requeue() 228 block_threads(worker, cpu); in bench_futex_requeue() 300 ret = pthread_join(worker[i], NULL); in bench_futex_requeue() 313 free(worker); in bench_futex_requeue()
|
| /linux/drivers/vhost/ |
| H A D | vhost.c | 244 static void vhost_worker_queue(struct vhost_worker *worker, in vhost_worker_queue() argument 252 llist_add(&work->node, &worker->work_list); in vhost_worker_queue() 253 worker->ops->wakeup(worker); in vhost_worker_queue() 259 struct vhost_worker *worker; in vhost_vq_work_queue() local 263 worker = rcu_dereference(vq->worker); in vhost_vq_work_queue() 264 if (worker) { in vhost_vq_work_queue() 266 vhost_worker_queue(worker, work); in vhost_vq_work_queue() 280 static void __vhost_worker_flush(struct vhost_worker *worker) in __vhost_worker_flush() argument 284 if (!worker->attachment_cnt || worker->killed) in __vhost_worker_flush() 290 vhost_worker_queue(worker, &flush.work); in __vhost_worker_flush() [all …]
|
| /linux/drivers/net/wireguard/ |
| H A D | queueing.c | 13 struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); in wg_packet_percpu_multicore_worker_alloc() local 15 if (!worker) in wg_packet_percpu_multicore_worker_alloc() 19 per_cpu_ptr(worker, cpu)->ptr = ptr; in wg_packet_percpu_multicore_worker_alloc() 20 INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); in wg_packet_percpu_multicore_worker_alloc() 22 return worker; in wg_packet_percpu_multicore_worker_alloc() 35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init() 36 if (!queue->worker) { in wg_packet_queue_init() 45 free_percpu(queue->worker); in wg_packet_queue_free()
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_vblank_work.c | 62 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 152 ret = kthread_queue_work(vblank->worker, &work->base); in drm_vblank_work_schedule() 255 kthread_flush_worker(vblank->worker); in drm_vblank_work_flush_all() 278 struct kthread_worker *worker; in drm_vblank_worker_init() local 282 worker = kthread_run_worker(0, "card%d-crtc%d", in drm_vblank_worker_init() 285 if (IS_ERR(worker)) in drm_vblank_worker_init() 286 return PTR_ERR(worker); in drm_vblank_worker_init() 288 vblank->worker = worker; in drm_vblank_worker_init() 290 sched_set_fifo(worker->task); in drm_vblank_worker_init()
|
| /linux/include/linux/ |
| H A D | kthread.h | 136 struct kthread_worker *worker; member 164 extern void __kthread_init_worker(struct kthread_worker *worker, 167 #define kthread_init_worker(worker) \ argument 170 __kthread_init_worker((worker), "("#worker")->lock", &__key); \ 243 bool kthread_queue_work(struct kthread_worker *worker, 246 bool kthread_queue_delayed_work(struct kthread_worker *worker, 250 bool kthread_mod_delayed_work(struct kthread_worker *worker, 255 void kthread_flush_worker(struct kthread_worker *worker); 260 void kthread_destroy_worker(struct kthread_worker *worker);
|
| H A D | devm-helpers.h | 48 work_func_t worker) in devm_delayed_work_autocancel() argument 50 INIT_DELAYED_WORK(w, worker); in devm_delayed_work_autocancel() 73 work_func_t worker) in devm_work_autocancel() argument 75 INIT_WORK(w, worker); in devm_work_autocancel()
|
| /linux/samples/seccomp/ |
| H A D | user-trap.c | 208 pid_t worker = 0 , tracer = 0; in main() local 215 worker = fork(); in main() 216 if (worker < 0) { in main() 221 if (worker == 0) { in main() 347 if (waitpid(worker, &status, 0) != worker) { in main() 372 if (worker > 0) in main() 373 kill(worker, SIGKILL); in main()
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_atomic.c | 118 timer->worker = kthread_run_worker(0, "atomic-worker-%d", crtc_idx); in msm_atomic_init_pending_timer() 119 if (IS_ERR(timer->worker)) { in msm_atomic_init_pending_timer() 120 int ret = PTR_ERR(timer->worker); in msm_atomic_init_pending_timer() 121 timer->worker = NULL; in msm_atomic_init_pending_timer() 124 sched_set_fifo(timer->worker->task); in msm_atomic_init_pending_timer() 126 msm_hrtimer_work_init(&timer->work, timer->worker, in msm_atomic_init_pending_timer() 135 if (timer->worker) in msm_atomic_destroy_pending_timer() 136 kthread_destroy_worker(timer->worker); in msm_atomic_destroy_pending_timer()
|
| H A D | msm_io_utils.c | 120 kthread_queue_work(work->worker, &work->work); in msm_hrtimer_worktimer() 133 struct kthread_worker *worker, in msm_hrtimer_work_init() argument 139 work->worker = worker; in msm_hrtimer_work_init()
|
| H A D | msm_kms.c | 247 if (kms->event_thread[i].worker) in msm_drm_kms_uninit() 248 kthread_destroy_worker(kms->event_thread[i].worker); in msm_drm_kms_uninit() 309 ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id); in msm_drm_kms_init() 310 if (IS_ERR(ev_thread->worker)) { in msm_drm_kms_init() 311 ret = PTR_ERR(ev_thread->worker); in msm_drm_kms_init() 313 ev_thread->worker = NULL; in msm_drm_kms_init() 317 sched_set_fifo(ev_thread->worker->task); in msm_drm_kms_init()
|
| /linux/Documentation/core-api/ |
| H A D | workqueue.rst | 20 queue is called workqueue and the thread is called worker. 22 While there are work items on the workqueue the worker executes the 24 there is no work item left on the workqueue the worker becomes idle. 25 When a new work item gets queued, the worker begins executing again. 32 worker thread per CPU and a single threaded (ST) wq had one worker 42 worker pool. An MT wq could provide only one execution context per CPU 60 * Use per-CPU unified worker pools shared by all wq to provide 64 * Automatically regulate worker pool and level of concurrency so that 84 the worker threads become idle. These worker threads are managed in 85 worker-pools. [all …]
|
| /linux/drivers/block/ |
| H A D | loop.c | 801 struct loop_worker *cur_worker, *worker = NULL; in loop_queue_work() local 816 worker = cur_worker; in loop_queue_work() 824 if (worker) in loop_queue_work() 827 worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT); in loop_queue_work() 832 if (!worker) { in loop_queue_work() 840 worker->blkcg_css = cmd->blkcg_css; in loop_queue_work() 841 css_get(worker->blkcg_css); in loop_queue_work() 842 INIT_WORK(&worker->work, loop_workfn); in loop_queue_work() 843 INIT_LIST_HEAD(&worker->cmd_list); in loop_queue_work() 844 INIT_LIST_HEAD(&worker->idle_list); in loop_queue_work() [all …]
|
| /linux/drivers/md/ |
| H A D | dm-delay.c | 39 struct task_struct *worker; member 71 return !!dc->worker; in delay_is_fast() 174 if (dc->worker) in delay_dtr() 175 kthread_stop(dc->worker); in delay_dtr() 284 dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker"); in delay_ctr() 285 if (IS_ERR(dc->worker)) { in delay_ctr() 286 ret = PTR_ERR(dc->worker); in delay_ctr() 287 dc->worker = NULL; in delay_ctr() 335 wake_up_process(dc->worker); in delay_bio()
|
| /linux/fs/erofs/ |
| H A D | zdata.c | 295 struct kthread_worker *worker; in erofs_destroy_percpu_workers() local 299 worker = rcu_dereference_protected( in erofs_destroy_percpu_workers() 302 if (worker) in erofs_destroy_percpu_workers() 303 kthread_destroy_worker(worker); in erofs_destroy_percpu_workers() 310 struct kthread_worker *worker = in erofs_init_percpu_worker() local 313 if (IS_ERR(worker)) in erofs_init_percpu_worker() 314 return worker; in erofs_init_percpu_worker() 316 sched_set_fifo_low(worker->task); in erofs_init_percpu_worker() 317 return worker; in erofs_init_percpu_worker() 322 struct kthread_worker *worker; in erofs_init_percpu_workers() local [all …]
|
| /linux/drivers/i2c/ |
| H A D | i2c-slave-testunit.c | 50 struct delayed_work worker; member 127 queue_delayed_work(system_long_wq, &tu->worker, in i2c_slave_testunit_slave_cb() 170 struct testunit_data *tu = container_of(work, struct testunit_data, worker.work); in i2c_slave_testunit_work() 247 INIT_DELAYED_WORK(&tu->worker, i2c_slave_testunit_work); in i2c_slave_testunit_probe() 268 cancel_delayed_work_sync(&tu->worker); in i2c_slave_testunit_remove()
|
| /linux/drivers/crypto/caam/ |
| H A D | caamrng.c | 43 struct work_struct worker; member 143 worker); in caam_rng_worker() 161 schedule_work(&ctx->worker); in caam_read() 170 flush_work(&ctx->worker); in caam_cleanup() 239 INIT_WORK(&ctx->worker, caam_rng_worker); in caam_init()
|