Lines Matching +full:wait +full:- +full:free +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0
22 #include "io-wq.h"
32 IO_WORKER_F_FREE = 2, /* worker on free list */
89 * The list of free workers. Protected by #workers_lock
131 struct wait_queue_entry wait; member
158 return refcount_inc_not_zero(&worker->ref); in io_worker_get()
163 if (refcount_dec_and_test(&worker->ref)) in io_worker_release()
164 complete(&worker->ref_done); in io_worker_release()
169 return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; in io_get_acct()
180 return worker->acct; in io_wq_get_acct()
185 if (atomic_dec_and_test(&wq->worker_refs)) in io_worker_ref_put()
186 complete(&wq->worker_done); in io_worker_ref_put()
191 struct io_worker *worker = current->worker_private; in io_wq_worker_stopped()
196 return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state); in io_wq_worker_stopped()
202 struct io_wq *wq = worker->wq; in io_worker_cancel_cb()
204 atomic_dec(&acct->nr_running); in io_worker_cancel_cb()
205 raw_spin_lock(&acct->workers_lock); in io_worker_cancel_cb()
206 acct->nr_workers--; in io_worker_cancel_cb()
207 raw_spin_unlock(&acct->workers_lock); in io_worker_cancel_cb()
209 clear_bit_unlock(0, &worker->create_state); in io_worker_cancel_cb()
217 if (cb->func != create_worker_cb) in io_task_worker_match()
225 struct io_wq *wq = worker->wq; in io_worker_exit()
229 struct callback_head *cb = task_work_cancel_match(wq->task, in io_worker_exit()
238 wait_for_completion(&worker->ref_done); in io_worker_exit()
240 raw_spin_lock(&acct->workers_lock); in io_worker_exit()
241 if (test_bit(IO_WORKER_F_FREE, &worker->flags)) in io_worker_exit()
242 hlist_nulls_del_rcu(&worker->nulls_node); in io_worker_exit()
243 list_del_rcu(&worker->all_list); in io_worker_exit()
244 raw_spin_unlock(&acct->workers_lock); in io_worker_exit()
247 * this worker is a goner, clear ->worker_private to avoid any in io_worker_exit()
251 current->worker_private = NULL; in io_worker_exit()
260 return !test_bit(IO_ACCT_STALLED_BIT, &acct->flags) && in __io_acct_run_queue()
261 !wq_list_empty(&acct->work_list); in __io_acct_run_queue()
265 * If there's work to do, returns true with acct->lock acquired. If not,
269 __acquires(&acct->lock) in io_acct_run_queue()
271 raw_spin_lock(&acct->lock); in io_acct_run_queue()
275 raw_spin_unlock(&acct->lock); in io_acct_run_queue()
280 * Check head of free list for an available worker. If one isn't available,
294 hlist_nulls_for_each_entry_rcu(worker, n, &acct->free_list, nulls_node) { in io_acct_activate_free_worker()
302 wake_up_process(worker->task); in io_acct_activate_free_worker()
311 * We need a worker. If we find a free one, we're good. If not, and we're
320 if (unlikely(!acct->max_workers)) in io_wq_create_worker()
321 pr_warn_once("io-wq is not configured for unbound workers"); in io_wq_create_worker()
323 raw_spin_lock(&acct->workers_lock); in io_wq_create_worker()
324 if (acct->nr_workers >= acct->max_workers) { in io_wq_create_worker()
325 raw_spin_unlock(&acct->workers_lock); in io_wq_create_worker()
328 acct->nr_workers++; in io_wq_create_worker()
329 raw_spin_unlock(&acct->workers_lock); in io_wq_create_worker()
330 atomic_inc(&acct->nr_running); in io_wq_create_worker()
331 atomic_inc(&wq->worker_refs); in io_wq_create_worker()
339 atomic_inc(&acct->nr_running); in io_wq_inc_running()
351 wq = worker->wq; in create_worker_cb()
352 acct = worker->acct; in create_worker_cb()
353 raw_spin_lock(&acct->workers_lock); in create_worker_cb()
355 if (acct->nr_workers < acct->max_workers) { in create_worker_cb()
356 acct->nr_workers++; in create_worker_cb()
359 raw_spin_unlock(&acct->workers_lock); in create_worker_cb()
363 atomic_dec(&acct->nr_running); in create_worker_cb()
366 clear_bit_unlock(0, &worker->create_state); in create_worker_cb()
374 struct io_wq *wq = worker->wq; in io_queue_worker_create()
377 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) in io_queue_worker_create()
387 if (test_bit(0, &worker->create_state) || in io_queue_worker_create()
388 test_and_set_bit_lock(0, &worker->create_state)) in io_queue_worker_create()
391 atomic_inc(&wq->worker_refs); in io_queue_worker_create()
392 init_task_work(&worker->create_work, func); in io_queue_worker_create()
393 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { in io_queue_worker_create()
400 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) in io_queue_worker_create()
406 clear_bit_unlock(0, &worker->create_state); in io_queue_worker_create()
410 atomic_dec(&acct->nr_running); in io_queue_worker_create()
418 struct io_wq *wq = worker->wq; in io_wq_dec_running()
420 if (!test_bit(IO_WORKER_F_UP, &worker->flags)) in io_wq_dec_running()
423 if (!atomic_dec_and_test(&acct->nr_running)) in io_wq_dec_running()
428 raw_spin_unlock(&acct->lock); in io_wq_dec_running()
429 atomic_inc(&acct->nr_running); in io_wq_dec_running()
430 atomic_inc(&wq->worker_refs); in io_wq_dec_running()
440 if (test_bit(IO_WORKER_F_FREE, &worker->flags)) { in __io_worker_busy()
441 clear_bit(IO_WORKER_F_FREE, &worker->flags); in __io_worker_busy()
442 raw_spin_lock(&acct->workers_lock); in __io_worker_busy()
443 hlist_nulls_del_init_rcu(&worker->nulls_node); in __io_worker_busy()
444 raw_spin_unlock(&acct->workers_lock); in __io_worker_busy()
452 __must_hold(acct->workers_lock) in __io_worker_idle()
454 if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) { in __io_worker_idle()
455 set_bit(IO_WORKER_F_FREE, &worker->flags); in __io_worker_idle()
456 hlist_nulls_add_head_rcu(&worker->nulls_node, &acct->free_list); in __io_worker_idle()
467 return __io_get_work_hash(atomic_read(&work->flags)); in io_get_work_hash()
474 spin_lock_irq(&wq->hash->wait.lock); in io_wait_on_hash()
475 if (list_empty(&wq->wait.entry)) { in io_wait_on_hash()
476 __add_wait_queue(&wq->hash->wait, &wq->wait); in io_wait_on_hash()
477 if (!test_bit(hash, &wq->hash->map)) { in io_wait_on_hash()
479 list_del_init(&wq->wait.entry); in io_wait_on_hash()
483 spin_unlock_irq(&wq->hash->wait.lock); in io_wait_on_hash()
489 __must_hold(acct->lock) in io_get_next_work()
493 unsigned int stall_hash = -1U; in io_get_next_work()
495 wq_list_for_each(node, prev, &acct->work_list) { in io_get_next_work()
502 work_flags = atomic_read(&work->flags); in io_get_next_work()
504 wq_list_del(&acct->work_list, node, prev); in io_get_next_work()
510 tail = wq->hash_tail[hash]; in io_get_next_work()
513 if (!test_and_set_bit(hash, &wq->hash->map)) { in io_get_next_work()
514 wq->hash_tail[hash] = NULL; in io_get_next_work()
515 wq_list_cut(&acct->work_list, &tail->list, prev); in io_get_next_work()
518 if (stall_hash == -1U) in io_get_next_work()
520 /* fast forward to a next hash, for-each will fix up @prev */ in io_get_next_work()
521 node = &tail->list; in io_get_next_work()
524 if (stall_hash != -1U) { in io_get_next_work()
531 set_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_get_next_work()
532 raw_spin_unlock(&acct->lock); in io_get_next_work()
534 raw_spin_lock(&acct->lock); in io_get_next_work()
536 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_get_next_work()
537 if (wq_has_sleeper(&wq->hash->wait)) in io_get_next_work()
538 wake_up(&wq->hash->wait); in io_get_next_work()
553 raw_spin_lock(&worker->lock); in io_assign_current_work()
554 worker->cur_work = work; in io_assign_current_work()
555 raw_spin_unlock(&worker->lock); in io_assign_current_work()
559 * Called with acct->lock held, drops it before returning
563 __releases(&acct->lock) in io_worker_handle_work()
565 struct io_wq *wq = worker->wq; in io_worker_handle_work()
566 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state); in io_worker_handle_work()
572 * If we got some work, mark us as busy. If we didn't, but in io_worker_handle_work()
574 * Mark us stalled so we don't keep looking for work when we in io_worker_handle_work()
587 raw_spin_lock(&worker->lock); in io_worker_handle_work()
588 worker->cur_work = work; in io_worker_handle_work()
589 raw_spin_unlock(&worker->lock); in io_worker_handle_work()
592 raw_spin_unlock(&acct->lock); in io_worker_handle_work()
605 unsigned int work_flags = atomic_read(&work->flags); in io_worker_handle_work()
608 : -1U; in io_worker_handle_work()
614 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); in io_worker_handle_work()
615 wq->do_work(work); in io_worker_handle_work()
618 linked = wq->free_work(work); in io_worker_handle_work()
628 if (hash != -1U && !next_hashed) { in io_worker_handle_work()
630 spin_lock_irq(&wq->hash->wait.lock); in io_worker_handle_work()
631 clear_bit(hash, &wq->hash->map); in io_worker_handle_work()
632 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_worker_handle_work()
633 spin_unlock_irq(&wq->hash->wait.lock); in io_worker_handle_work()
634 if (wq_has_sleeper(&wq->hash->wait)) in io_worker_handle_work()
635 wake_up(&wq->hash->wait); in io_worker_handle_work()
641 raw_spin_lock(&acct->lock); in io_worker_handle_work()
649 struct io_wq *wq = worker->wq; in io_wq_worker()
653 set_mask_bits(&worker->flags, 0, in io_wq_worker()
656 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid); in io_wq_worker()
659 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { in io_wq_worker()
666 * the acct->lock held. If not, it will drop it. in io_wq_worker()
671 raw_spin_lock(&acct->workers_lock); in io_wq_worker()
676 if (last_timeout && (exit_mask || acct->nr_workers > 1)) { in io_wq_worker()
677 acct->nr_workers--; in io_wq_worker()
678 raw_spin_unlock(&acct->workers_lock); in io_wq_worker()
684 raw_spin_unlock(&acct->workers_lock); in io_wq_worker()
698 wq->cpu_mask); in io_wq_worker()
702 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) && io_acct_run_queue(acct)) in io_wq_worker()
710 * Called when a worker is scheduled in. Mark us as currently running.
714 struct io_worker *worker = tsk->worker_private; in io_wq_worker_running()
718 if (!test_bit(IO_WORKER_F_UP, &worker->flags)) in io_wq_worker_running()
720 if (test_bit(IO_WORKER_F_RUNNING, &worker->flags)) in io_wq_worker_running()
722 set_bit(IO_WORKER_F_RUNNING, &worker->flags); in io_wq_worker_running()
728 * running and we have work pending, wake up a free one or create a new one.
732 struct io_worker *worker = tsk->worker_private; in io_wq_worker_sleeping()
736 if (!test_bit(IO_WORKER_F_UP, &worker->flags)) in io_wq_worker_sleeping()
738 if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags)) in io_wq_worker_sleeping()
741 clear_bit(IO_WORKER_F_RUNNING, &worker->flags); in io_wq_worker_sleeping()
748 tsk->worker_private = worker; in io_init_new_worker()
749 worker->task = tsk; in io_init_new_worker()
750 set_cpus_allowed_ptr(tsk, wq->cpu_mask); in io_init_new_worker()
752 raw_spin_lock(&acct->workers_lock); in io_init_new_worker()
753 hlist_nulls_add_head_rcu(&worker->nulls_node, &acct->free_list); in io_init_new_worker()
754 list_add_tail_rcu(&worker->all_list, &acct->all_list); in io_init_new_worker()
755 set_bit(IO_WORKER_F_FREE, &worker->flags); in io_init_new_worker()
756 raw_spin_unlock(&acct->workers_lock); in io_init_new_worker()
773 if (worker->init_retries++ >= WORKER_INIT_LIMIT) in io_should_retry_thread()
777 case -EAGAIN: in io_should_retry_thread()
778 case -ERESTARTSYS: in io_should_retry_thread()
779 case -ERESTARTNOINTR: in io_should_retry_thread()
780 case -ERESTARTNOHAND: in io_should_retry_thread()
795 schedule_delayed_work(&worker->work, in queue_create_worker_retry()
796 msecs_to_jiffies(worker->init_retries * 5)); in queue_create_worker_retry()
807 clear_bit_unlock(0, &worker->create_state); in create_worker_cont()
808 wq = worker->wq; in create_worker_cont()
816 atomic_dec(&acct->nr_running); in create_worker_cont()
817 raw_spin_lock(&acct->workers_lock); in create_worker_cont()
818 acct->nr_workers--; in create_worker_cont()
819 if (!acct->nr_workers) { in create_worker_cont()
825 raw_spin_unlock(&acct->workers_lock); in create_worker_cont()
829 raw_spin_unlock(&acct->workers_lock); in create_worker_cont()
836 /* re-create attempts grab a new worker ref, drop the existing one */ in create_worker_cont()
861 atomic_dec(&acct->nr_running); in create_io_worker()
862 raw_spin_lock(&acct->workers_lock); in create_io_worker()
863 acct->nr_workers--; in create_io_worker()
864 raw_spin_unlock(&acct->workers_lock); in create_io_worker()
869 refcount_set(&worker->ref, 1); in create_io_worker()
870 worker->wq = wq; in create_io_worker()
871 worker->acct = acct; in create_io_worker()
872 raw_spin_lock_init(&worker->lock); in create_io_worker()
873 init_completion(&worker->ref_done); in create_io_worker()
882 INIT_DELAYED_WORK(&worker->work, io_workqueue_create); in create_io_worker()
900 list_for_each_entry_rcu(worker, &acct->all_list, all_list) { in io_acct_for_each_worker()
903 if (worker->task) in io_acct_for_each_worker()
919 if (!io_acct_for_each_worker(&wq->acct[i], func, data)) in io_wq_for_each_worker()
928 __set_notify_signal(worker->task); in io_wq_worker_wake()
929 wake_up_process(worker->task); in io_wq_worker_wake()
936 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); in io_run_cancel()
937 wq->do_work(work); in io_run_cancel()
938 work = wq->free_work(work); in io_run_cancel()
950 wq_list_add_tail(&work->list, &acct->work_list); in io_wq_insert_work()
955 tail = wq->hash_tail[hash]; in io_wq_insert_work()
956 wq->hash_tail[hash] = work; in io_wq_insert_work()
960 wq_list_add_after(&work->list, &tail->list, &acct->work_list); in io_wq_insert_work()
970 unsigned int work_flags = atomic_read(&work->flags); in io_wq_enqueue()
980 * If io-wq is exiting for this task, or if the request has explicitly in io_wq_enqueue()
983 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || in io_wq_enqueue()
989 raw_spin_lock(&acct->lock); in io_wq_enqueue()
991 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); in io_wq_enqueue()
992 raw_spin_unlock(&acct->lock); in io_wq_enqueue()
999 !atomic_read(&acct->nr_running))) { in io_wq_enqueue()
1006 raw_spin_lock(&acct->workers_lock); in io_wq_enqueue()
1007 if (acct->nr_workers) { in io_wq_enqueue()
1008 raw_spin_unlock(&acct->workers_lock); in io_wq_enqueue()
1011 raw_spin_unlock(&acct->workers_lock); in io_wq_enqueue()
1027 atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags); in io_wq_hash_work()
1034 if (work && match->fn(work, match->data)) { in __io_wq_worker_cancel()
1035 atomic_or(IO_WQ_WORK_CANCEL, &work->flags); in __io_wq_worker_cancel()
1036 __set_notify_signal(worker->task); in __io_wq_worker_cancel()
1048 * Hold the lock to avoid ->cur_work going out of scope, caller in io_wq_worker_cancel()
1051 raw_spin_lock(&worker->lock); in io_wq_worker_cancel()
1052 if (__io_wq_worker_cancel(worker, match, worker->cur_work)) in io_wq_worker_cancel()
1053 match->nr_running++; in io_wq_worker_cancel()
1054 raw_spin_unlock(&worker->lock); in io_wq_worker_cancel()
1056 return match->nr_running && !match->cancel_all; in io_wq_worker_cancel()
1067 if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) { in io_wq_remove_pending()
1071 wq->hash_tail[hash] = prev_work; in io_wq_remove_pending()
1073 wq->hash_tail[hash] = NULL; in io_wq_remove_pending()
1075 wq_list_del(&acct->work_list, &work->list, prev); in io_wq_remove_pending()
1085 raw_spin_lock(&acct->lock); in io_acct_cancel_pending_work()
1086 wq_list_for_each(node, prev, &acct->work_list) { in io_acct_cancel_pending_work()
1088 if (!match->fn(work, match->data)) in io_acct_cancel_pending_work()
1091 raw_spin_unlock(&acct->lock); in io_acct_cancel_pending_work()
1093 match->nr_pending++; in io_acct_cancel_pending_work()
1097 raw_spin_unlock(&acct->lock); in io_acct_cancel_pending_work()
1111 if (match->cancel_all) in io_wq_cancel_pending_work()
1121 raw_spin_lock(&acct->workers_lock); in io_acct_cancel_running_work()
1123 raw_spin_unlock(&acct->workers_lock); in io_acct_cancel_running_work()
1132 io_acct_cancel_running_work(&wq->acct[i], match); in io_wq_cancel_running_work()
1148 * from there. CANCEL_OK means that the work is returned as-new, in io_wq_cancel_cb()
1151 * Then check if a free (going busy) or busy worker has the work in io_wq_cancel_cb()
1156 * Do both of these while holding the acct->workers_lock, to ensure that in io_wq_cancel_cb()
1174 static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode, in io_wq_hash_wake() argument
1177 struct io_wq *wq = container_of(wait, struct io_wq, wait); in io_wq_hash_wake()
1180 list_del_init(&wait->entry); in io_wq_hash_wake()
1184 struct io_wq_acct *acct = &wq->acct[i]; in io_wq_hash_wake()
1186 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) in io_wq_hash_wake()
1198 if (WARN_ON_ONCE(!data->free_work || !data->do_work)) in io_wq_create()
1199 return ERR_PTR(-EINVAL); in io_wq_create()
1201 return ERR_PTR(-EINVAL); in io_wq_create()
1205 return ERR_PTR(-ENOMEM); in io_wq_create()
1207 refcount_inc(&data->hash->refs); in io_wq_create()
1208 wq->hash = data->hash; in io_wq_create()
1209 wq->free_work = data->free_work; in io_wq_create()
1210 wq->do_work = data->do_work; in io_wq_create()
1212 ret = -ENOMEM; in io_wq_create()
1214 if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL)) in io_wq_create()
1216 cpuset_cpus_allowed(data->task, wq->cpu_mask); in io_wq_create()
1217 wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; in io_wq_create()
1218 wq->acct[IO_WQ_ACCT_UNBOUND].max_workers = in io_wq_create()
1220 INIT_LIST_HEAD(&wq->wait.entry); in io_wq_create()
1221 wq->wait.func = io_wq_hash_wake; in io_wq_create()
1223 struct io_wq_acct *acct = &wq->acct[i]; in io_wq_create()
1225 atomic_set(&acct->nr_running, 0); in io_wq_create()
1227 raw_spin_lock_init(&acct->workers_lock); in io_wq_create()
1228 INIT_HLIST_NULLS_HEAD(&acct->free_list, 0); in io_wq_create()
1229 INIT_LIST_HEAD(&acct->all_list); in io_wq_create()
1231 INIT_WQ_LIST(&acct->work_list); in io_wq_create()
1232 raw_spin_lock_init(&acct->lock); in io_wq_create()
1235 wq->task = get_task_struct(data->task); in io_wq_create()
1236 atomic_set(&wq->worker_refs, 1); in io_wq_create()
1237 init_completion(&wq->worker_done); in io_wq_create()
1238 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); in io_wq_create()
1244 io_wq_put_hash(data->hash); in io_wq_create()
1245 free_cpumask_var(wq->cpu_mask); in io_wq_create()
1254 if (cb->func != create_worker_cb && cb->func != create_worker_cont) in io_task_work_match()
1257 return worker->wq == data; in io_task_work_match()
1262 set_bit(IO_WQ_BIT_EXIT, &wq->state); in io_wq_exit_start()
1269 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { in io_wq_cancel_tw_create()
1278 if (cb->func == create_worker_cont) in io_wq_cancel_tw_create()
1285 if (!wq->task) in io_wq_exit_workers()
1294 wait_for_completion(&wq->worker_done); in io_wq_exit_workers()
1296 spin_lock_irq(&wq->hash->wait.lock); in io_wq_exit_workers()
1297 list_del_init(&wq->wait.entry); in io_wq_exit_workers()
1298 spin_unlock_irq(&wq->hash->wait.lock); in io_wq_exit_workers()
1300 put_task_struct(wq->task); in io_wq_exit_workers()
1301 wq->task = NULL; in io_wq_exit_workers()
1311 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); in io_wq_destroy()
1313 free_cpumask_var(wq->cpu_mask); in io_wq_destroy()
1314 io_wq_put_hash(wq->hash); in io_wq_destroy()
1320 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); in io_wq_put_and_exit()
1335 if (od->online) in io_wq_worker_affinity()
1336 cpumask_set_cpu(od->cpu, worker->wq->cpu_mask); in io_wq_worker_affinity()
1338 cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask); in io_wq_worker_affinity()
1374 if (!tctx || !tctx->io_wq) in io_wq_cpu_affinity()
1375 return -EINVAL; in io_wq_cpu_affinity()
1378 return -ENOMEM; in io_wq_cpu_affinity()
1381 cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask); in io_wq_cpu_affinity()
1384 cpumask_copy(tctx->io_wq->cpu_mask, mask); in io_wq_cpu_affinity()
1386 ret = -EINVAL; in io_wq_cpu_affinity()
1388 cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask); in io_wq_cpu_affinity()
1421 acct = &wq->acct[i]; in io_wq_max_workers()
1422 raw_spin_lock(&acct->workers_lock); in io_wq_max_workers()
1423 prev[i] = max_t(int, acct->max_workers, prev[i]); in io_wq_max_workers()
1425 acct->max_workers = new_count[i]; in io_wq_max_workers()
1426 raw_spin_unlock(&acct->workers_lock); in io_wq_max_workers()
1440 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", in io_wq_init()