Lines Matching +full:re +full:- +full:attached
1 // SPDX-License-Identifier: GPL-2.0
28 __releases(&sqd->lock) in io_sq_thread_unpark()
30 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_unpark()
36 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
37 if (atomic_dec_return(&sqd->park_pending)) in io_sq_thread_unpark()
38 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_unpark()
39 mutex_unlock(&sqd->lock); in io_sq_thread_unpark()
43 __acquires(&sqd->lock) in io_sq_thread_park()
45 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_park()
47 atomic_inc(&sqd->park_pending); in io_sq_thread_park()
48 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); in io_sq_thread_park()
49 mutex_lock(&sqd->lock); in io_sq_thread_park()
50 if (sqd->thread) in io_sq_thread_park()
51 wake_up_process(sqd->thread); in io_sq_thread_park()
56 WARN_ON_ONCE(sqd->thread == current); in io_sq_thread_stop()
57 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)); in io_sq_thread_stop()
59 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sq_thread_stop()
60 mutex_lock(&sqd->lock); in io_sq_thread_stop()
61 if (sqd->thread) in io_sq_thread_stop()
62 wake_up_process(sqd->thread); in io_sq_thread_stop()
63 mutex_unlock(&sqd->lock); in io_sq_thread_stop()
64 wait_for_completion(&sqd->exited); in io_sq_thread_stop()
69 if (refcount_dec_and_test(&sqd->refs)) { in io_put_sq_data()
70 WARN_ON_ONCE(atomic_read(&sqd->park_pending)); in io_put_sq_data()
82 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sqd_update_thread_idle()
83 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); in io_sqd_update_thread_idle()
84 sqd->sq_thread_idle = sq_thread_idle; in io_sqd_update_thread_idle()
89 struct io_sq_data *sqd = ctx->sq_data; in io_sq_thread_finish()
93 list_del_init(&ctx->sqd_list); in io_sq_thread_finish()
98 ctx->sq_data = NULL; in io_sq_thread_finish()
108 f = fdget(p->wq_fd); in io_attach_sq_data()
110 return ERR_PTR(-ENXIO); in io_attach_sq_data()
113 return ERR_PTR(-EINVAL); in io_attach_sq_data()
116 ctx_attach = f.file->private_data; in io_attach_sq_data()
117 sqd = ctx_attach->sq_data; in io_attach_sq_data()
120 return ERR_PTR(-EINVAL); in io_attach_sq_data()
122 if (sqd->task_tgid != current->tgid) { in io_attach_sq_data()
124 return ERR_PTR(-EPERM); in io_attach_sq_data()
127 refcount_inc(&sqd->refs); in io_attach_sq_data()
133 bool *attached) in io_get_sq_data() argument
137 *attached = false; in io_get_sq_data()
138 if (p->flags & IORING_SETUP_ATTACH_WQ) { in io_get_sq_data()
141 *attached = true; in io_get_sq_data()
145 if (PTR_ERR(sqd) != -EPERM) in io_get_sq_data()
151 return ERR_PTR(-ENOMEM); in io_get_sq_data()
153 atomic_set(&sqd->park_pending, 0); in io_get_sq_data()
154 refcount_set(&sqd->refs, 1); in io_get_sq_data()
155 INIT_LIST_HEAD(&sqd->ctx_list); in io_get_sq_data()
156 mutex_init(&sqd->lock); in io_get_sq_data()
157 init_waitqueue_head(&sqd->wait); in io_get_sq_data()
158 init_completion(&sqd->exited); in io_get_sq_data()
164 return READ_ONCE(sqd->state); in io_sqd_events_pending()
173 /* if we're handling multiple rings, cap submit size for fairness */ in __io_sq_thread()
177 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { in __io_sq_thread()
180 if (ctx->sq_creds != current_cred()) in __io_sq_thread()
181 creds = override_creds(ctx->sq_creds); in __io_sq_thread()
183 mutex_lock(&ctx->uring_lock); in __io_sq_thread()
184 if (!wq_list_empty(&ctx->iopoll_list)) in __io_sq_thread()
191 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) && in __io_sq_thread()
192 !(ctx->flags & IORING_SETUP_R_DISABLED)) in __io_sq_thread()
194 mutex_unlock(&ctx->uring_lock); in __io_sq_thread()
196 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait)) in __io_sq_thread()
197 wake_up(&ctx->sqo_sq_wait); in __io_sq_thread()
210 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || in io_sqd_handle_event()
212 mutex_unlock(&sqd->lock); in io_sqd_handle_event()
216 mutex_lock(&sqd->lock); in io_sqd_handle_event()
217 sqd->sq_cpu = raw_smp_processor_id(); in io_sqd_handle_event()
219 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state); in io_sqd_handle_event()
230 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid); in io_sq_thread()
234 sqd->task_pid = current->pid; in io_sq_thread()
236 if (sqd->sq_cpu != -1) { in io_sq_thread()
237 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); in io_sq_thread()
240 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
243 mutex_lock(&sqd->lock); in io_sq_thread()
250 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
253 cap_entries = !list_is_singular(&sqd->ctx_list); in io_sq_thread()
254 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
257 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) in io_sq_thread()
265 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
267 mutex_unlock(&sqd->lock); in io_sq_thread()
269 mutex_lock(&sqd->lock); in io_sq_thread()
270 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
275 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); in io_sq_thread()
279 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { in io_sq_thread()
281 &ctx->rings->sq_flags); in io_sq_thread()
282 if ((ctx->flags & IORING_SETUP_IOPOLL) && in io_sq_thread()
283 !wq_list_empty(&ctx->iopoll_list)) { in io_sq_thread()
301 mutex_unlock(&sqd->lock); in io_sq_thread()
303 mutex_lock(&sqd->lock); in io_sq_thread()
304 sqd->sq_cpu = raw_smp_processor_id(); in io_sq_thread()
306 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
308 &ctx->rings->sq_flags); in io_sq_thread()
311 finish_wait(&sqd->wait, &wait); in io_sq_thread()
312 timeout = jiffies + sqd->sq_thread_idle; in io_sq_thread()
316 sqd->thread = NULL; in io_sq_thread()
317 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) in io_sq_thread()
318 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags); in io_sq_thread()
320 mutex_unlock(&sqd->lock); in io_sq_thread()
322 complete(&sqd->exited); in io_sq_thread()
333 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE); in io_sqpoll_wait_sq()
340 finish_wait(&ctx->sqo_sq_wait, &wait); in io_sqpoll_wait_sq()
349 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == in io_sq_offload_create()
353 f = fdget(p->wq_fd); in io_sq_offload_create()
355 return -ENXIO; in io_sq_offload_create()
358 return -EINVAL; in io_sq_offload_create()
362 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_sq_offload_create()
365 bool attached; in io_sq_offload_create() local
371 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
377 ctx->sq_creds = get_current_cred(); in io_sq_offload_create()
378 ctx->sq_data = sqd; in io_sq_offload_create()
379 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); in io_sq_offload_create()
380 if (!ctx->sq_thread_idle) in io_sq_offload_create()
381 ctx->sq_thread_idle = HZ; in io_sq_offload_create()
384 list_add(&ctx->sqd_list, &sqd->ctx_list); in io_sq_offload_create()
387 ret = (attached && !sqd->thread) ? -ENXIO : 0; in io_sq_offload_create()
392 if (attached) in io_sq_offload_create()
395 if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
396 int cpu = p->sq_thread_cpu; in io_sq_offload_create()
398 ret = -EINVAL; in io_sq_offload_create()
401 sqd->sq_cpu = cpu; in io_sq_offload_create()
403 sqd->sq_cpu = -1; in io_sq_offload_create()
406 sqd->task_pid = current->pid; in io_sq_offload_create()
407 sqd->task_tgid = current->tgid; in io_sq_offload_create()
414 sqd->thread = tsk; in io_sq_offload_create()
419 } else if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
421 ret = -EINVAL; in io_sq_offload_create()
427 complete(&ctx->sq_data->exited); in io_sq_offload_create()
436 struct io_sq_data *sqd = ctx->sq_data; in io_sqpoll_wq_cpu_affinity()
437 int ret = -EINVAL; in io_sqpoll_wq_cpu_affinity()
442 if (sqd->thread) in io_sqpoll_wq_cpu_affinity()
443 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask); in io_sqpoll_wq_cpu_affinity()