Lines Matching defs:q

53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
115 static bool blk_freeze_set_owner(struct request_queue *q,
121 if (!q->mq_freeze_depth) {
122 q->mq_freeze_owner = owner;
123 q->mq_freeze_owner_depth = 1;
124 q->mq_freeze_disk_dead = !q->disk ||
125 test_bit(GD_DEAD, &q->disk->state) ||
126 !blk_queue_registered(q);
127 q->mq_freeze_queue_dying = blk_queue_dying(q);
131 if (owner == q->mq_freeze_owner)
132 q->mq_freeze_owner_depth += 1;
137 static bool blk_unfreeze_check_owner(struct request_queue *q)
139 if (q->mq_freeze_owner != current)
141 if (--q->mq_freeze_owner_depth == 0) {
142 q->mq_freeze_owner = NULL;
150 static bool blk_freeze_set_owner(struct request_queue *q,
156 static bool blk_unfreeze_check_owner(struct request_queue *q)
162 bool __blk_freeze_queue_start(struct request_queue *q,
167 mutex_lock(&q->mq_freeze_lock);
168 freeze = blk_freeze_set_owner(q, owner);
169 if (++q->mq_freeze_depth == 1) {
170 percpu_ref_kill(&q->q_usage_counter);
171 mutex_unlock(&q->mq_freeze_lock);
172 if (queue_is_mq(q))
173 blk_mq_run_hw_queues(q, false);
175 mutex_unlock(&q->mq_freeze_lock);
181 void blk_freeze_queue_start(struct request_queue *q)
183 if (__blk_freeze_queue_start(q, current))
184 blk_freeze_acquire_lock(q);
188 void blk_mq_freeze_queue_wait(struct request_queue *q)
190 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
194 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
197 return wait_event_timeout(q->mq_freeze_wq,
198 percpu_ref_is_zero(&q->q_usage_counter),
203 void blk_mq_freeze_queue_nomemsave(struct request_queue *q)
205 blk_freeze_queue_start(q);
206 blk_mq_freeze_queue_wait(q);
210 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
214 mutex_lock(&q->mq_freeze_lock);
216 q->q_usage_counter.data->force_atomic = true;
217 q->mq_freeze_depth--;
218 WARN_ON_ONCE(q->mq_freeze_depth < 0);
219 if (!q->mq_freeze_depth) {
220 percpu_ref_resurrect(&q->q_usage_counter);
221 wake_up_all(&q->mq_freeze_wq);
223 unfreeze = blk_unfreeze_check_owner(q);
224 mutex_unlock(&q->mq_freeze_lock);
229 void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q)
231 if (__blk_mq_unfreeze_queue(q, false))
232 blk_unfreeze_release_lock(q);
243 void blk_freeze_queue_start_non_owner(struct request_queue *q)
245 __blk_freeze_queue_start(q, NULL);
250 void blk_mq_unfreeze_queue_non_owner(struct request_queue *q)
252 __blk_mq_unfreeze_queue(q, false);
260 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
264 spin_lock_irqsave(&q->queue_lock, flags);
265 if (!q->quiesce_depth++)
266 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
267 spin_unlock_irqrestore(&q->queue_lock, flags);
291 * @q: request queue.
298 void blk_mq_quiesce_queue(struct request_queue *q)
300 blk_mq_quiesce_queue_nowait(q);
302 if (queue_is_mq(q))
303 blk_mq_wait_quiesce_done(q->tag_set);
309 * @q: request queue.
314 void blk_mq_unquiesce_queue(struct request_queue *q)
319 spin_lock_irqsave(&q->queue_lock, flags);
320 if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
322 } else if (!--q->quiesce_depth) {
323 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
326 spin_unlock_irqrestore(&q->queue_lock, flags);
330 blk_mq_run_hw_queues(q, true);
336 struct request_queue *q;
339 list_for_each_entry(q, &set->tag_list, tag_set_list) {
340 if (!blk_queue_skip_tagset_quiesce(q))
341 blk_mq_quiesce_queue_nowait(q);
351 struct request_queue *q;
354 list_for_each_entry(q, &set->tag_list, tag_set_list) {
355 if (!blk_queue_skip_tagset_quiesce(q))
356 blk_mq_unquiesce_queue(q);
362 void blk_mq_wake_waiters(struct request_queue *q)
367 queue_for_each_hw_ctx(q, hctx, i)
372 void blk_rq_init(struct request_queue *q, struct request *rq)
377 rq->q = q;
392 if (blk_queue_rq_alloc_time(rq->q))
404 struct request_queue *q = data->q;
407 rq->q = q;
440 struct elevator_queue *e = data->q->elevator;
479 percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
487 struct request_queue *q = data->q;
493 if (blk_queue_rq_alloc_time(q))
500 data->ctx = blk_mq_get_ctx(q);
503 if (q->elevator) {
516 struct elevator_mq_ops *ops = &q->elevator->type->ops;
569 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
575 .q = q,
587 if (blk_queue_enter(q, flags))
594 blk_queue_exit(q);
598 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
611 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
616 if (!rq || rq->q != q)
633 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
638 rq = blk_mq_alloc_cached_request(q, opf, flags);
641 .q = q,
653 ret = blk_queue_enter(q, flags);
666 blk_queue_exit(q);
671 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
675 .q = q,
692 if (blk_queue_rq_alloc_time(q))
705 if (hctx_idx >= q->nr_hw_queues)
708 ret = blk_queue_enter(q, flags);
717 data.hctx = xa_load(&q->hctx_table, hctx_idx);
723 data.ctx = __blk_mq_get_ctx(q, cpu);
725 if (q->elevator)
747 blk_queue_exit(q);
754 struct request_queue *q = rq->q;
759 q->elevator->type->ops.finish_request(rq);
771 struct request_queue *q = rq->q;
787 blk_queue_exit(q);
792 struct request_queue *q = rq->q;
797 laptop_io_completion(q->disk->bdi);
799 rq_qos_done(q, rq);
818 rq->q->disk ? rq->q->disk->disk_name : "?",
846 req->q->disk ? req->q->disk->disk_name : "?",
951 !test_bit(GD_DEAD, &req->q->disk->state)) {
1068 if (!blk_queue_passthrough_stat(req->q))
1098 if (!blk_queue_io_stat(req->q))
1115 req->part = req->q->disk->part0;
1140 rq_qos_done(rq->q, rq);
1162 struct request_queue *q = hctx->queue;
1167 percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1190 rq_qos_done(rq->q, rq);
1226 rq->q->mq_ops->complete(rq);
1250 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1263 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1311 if (rq->q->nr_hw_queues == 1) {
1329 rq->q->mq_ops->complete(rq);
1343 struct request_queue *q = rq->q;
1347 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
1352 rq_qos_issue(q, rq);
1386 trace_block_plug(rq->q);
1388 (!blk_queue_nomerges(rq->q) &&
1392 trace_block_plug(rq->q);
1395 if (!plug->multiple_queues && last && last->q != rq->q)
1465 blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0);
1508 struct request_queue *q = rq->q;
1513 rq_qos_requeue(q, rq);
1523 struct request_queue *q = rq->q;
1531 spin_lock_irqsave(&q->requeue_lock, flags);
1532 list_add_tail(&rq->queuelist, &q->requeue_list);
1533 spin_unlock_irqrestore(&q->requeue_lock, flags);
1536 blk_mq_kick_requeue_list(q);
1542 struct request_queue *q =
1548 spin_lock_irq(&q->requeue_lock);
1549 list_splice_init(&q->requeue_list, &rq_list);
1550 list_splice_init(&q->flush_list, &flush_list);
1551 spin_unlock_irq(&q->requeue_lock);
1574 blk_mq_run_hw_queues(q, false);
1577 void blk_mq_kick_requeue_list(struct request_queue *q)
1579 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1583 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1586 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1608 if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
1620 bool blk_mq_queue_inflight(struct request_queue *q)
1624 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1632 if (req->q->mq_ops->timeout) {
1635 ret = req->q->mq_ops->timeout(req);
1709 struct request_queue *q =
1730 if (!percpu_ref_tryget(&q->q_usage_counter))
1734 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired);
1742 blk_mq_wait_quiesce_done(q->tag_set);
1745 blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
1749 mod_timer(&q->timeout, expired.next);
1757 queue_for_each_hw_ctx(q, hctx, i) {
1763 blk_queue_exit(q);
2024 budget_token = blk_mq_get_dispatch_budget(rq->q);
2046 blk_mq_put_dispatch_budget(rq->q, budget_token);
2055 static void blk_mq_release_budgets(struct request_queue *q,
2064 blk_mq_put_dispatch_budget(q, budget_token);
2092 struct request_queue *q = hctx->queue;
2120 ret = q->mq_ops->queue_rq(hctx, &bd);
2158 blk_mq_release_budgets(q, list);
2367 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2369 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2386 * @q: Pointer to the request queue to run.
2389 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2395 if (blk_queue_sq_sched(q))
2396 sq_hctx = blk_mq_get_sq_hctx(q);
2397 queue_for_each_hw_ctx(q, hctx, i) {
2414 * @q: Pointer to the request queue to run.
2417 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2423 if (blk_queue_sq_sched(q))
2424 sq_hctx = blk_mq_get_sq_hctx(q);
2425 queue_for_each_hw_ctx(q, hctx, i) {
2474 void blk_mq_stop_hw_queues(struct request_queue *q)
2479 queue_for_each_hw_ctx(q, hctx, i)
2492 void blk_mq_start_hw_queues(struct request_queue *q)
2497 queue_for_each_hw_ctx(q, hctx, i)
2518 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2523 queue_for_each_hw_ctx(q, hctx, i)
2597 struct request_queue *q = rq->q;
2636 } else if (q->elevator) {
2642 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2670 rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q,
2683 struct request_queue *q = rq->q;
2695 ret = q->mq_ops->queue_rq(hctx, &bd);
2717 budget_token = blk_mq_get_dispatch_budget(rq->q);
2722 blk_mq_put_dispatch_budget(rq->q, budget_token);
2743 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2774 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
2824 static void __blk_mq_flush_list(struct request_queue *q, struct rq_list *rqs)
2826 if (blk_queue_quiesced(q))
2828 q->mq_ops->queue_rqs(rqs);
2835 struct request_queue *this_q = rq->q;
2843 if (rq->q == this_q) {
2862 struct request_queue *q = rq_list_peek(rqs)->q;
2864 trace_block_unplug(q, depth, true);
2872 if (q->mq_ops->queue_rqs) {
2873 blk_mq_run_dispatch_ops(q, __blk_mq_flush_list(q, rqs));
2878 blk_mq_run_dispatch_ops(q, blk_mq_issue_direct(rqs));
3004 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
3007 if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
3008 if (blk_attempt_plug_merge(q, bio, nr_segs))
3010 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
3016 static struct request *blk_mq_get_new_requests(struct request_queue *q,
3021 .q = q,
3033 rq_qos_throttle(q, bio);
3043 rq_qos_cleanup(q, bio);
3051 struct request_queue *q, blk_opf_t opf)
3059 if (!rq || rq->q != q)
3080 rq_qos_throttle(rq->q, bio);
3087 static bool bio_unaligned(const struct bio *bio, struct request_queue *q)
3089 unsigned int bs_mask = queue_logical_block_size(q) - 1;
3102 * Builds up a request structure from @q and @bio and send to the device. The
3113 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
3124 rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf);
3135 blk_queue_exit(q);
3153 if (unlikely(bio_unaligned(bio, q))) {
3158 if ((bio->bi_opf & REQ_POLLED) && !blk_mq_can_poll(q)) {
3164 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
3171 if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
3183 rq = blk_mq_get_new_requests(q, plug, bio);
3193 rq_qos_track(q, rq, bio);
3218 (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
3222 blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
3232 blk_queue_exit(q);
3242 struct request_queue *q = rq->q;
3277 if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
3291 blk_mq_run_dispatch_ops(q,
3346 struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src,
3861 static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
3865 spin_lock(&q->unused_hctx_lock);
3866 list_splice_init(&q->unused_hctx_list, &hctx_list);
3867 spin_unlock(&q->unused_hctx_lock);
3873 spin_lock(&q->unused_hctx_lock);
3874 list_splice(&hctx_list, &q->unused_hctx_list);
3875 spin_unlock(&q->unused_hctx_lock);
3883 static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
3889 queue_for_each_hw_ctx(q, hctx, i)
3924 static void blk_mq_exit_hctx(struct request_queue *q,
3933 if (blk_queue_init_done(q))
3942 xa_erase(&q->hctx_table, hctx_idx);
3944 spin_lock(&q->unused_hctx_lock);
3945 list_add(&hctx->hctx_list, &q->unused_hctx_list);
3946 spin_unlock(&q->unused_hctx_lock);
3949 static void blk_mq_exit_hw_queues(struct request_queue *q,
3955 queue_for_each_hw_ctx(q, hctx, i) {
3959 blk_mq_exit_hctx(q, set, hctx, i);
3963 static int blk_mq_init_hctx(struct request_queue *q,
3979 if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3995 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
4018 hctx->queue = q;
4061 static void blk_mq_init_cpu_queues(struct request_queue *q,
4064 struct blk_mq_tag_set *set = q->tag_set;
4068 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
4077 __ctx->queue = q;
4084 hctx = blk_mq_map_queue_type(q, j, i);
4145 static void blk_mq_map_swqueue(struct request_queue *q)
4151 struct blk_mq_tag_set *set = q->tag_set;
4153 queue_for_each_hw_ctx(q, hctx, i) {
4166 ctx = per_cpu_ptr(q->queue_ctx, i);
4169 ctx->hctxs[j] = blk_mq_map_queue_type(q,
4186 hctx = blk_mq_map_queue_type(q, j, i);
4209 ctx->hctxs[j] = blk_mq_map_queue_type(q,
4213 queue_for_each_hw_ctx(q, hctx, i) {
4263 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
4268 queue_for_each_hw_ctx(q, hctx, i) {
4281 struct request_queue *q;
4286 list_for_each_entry(q, &set->tag_list, tag_set_list) {
4287 memflags = blk_mq_freeze_queue(q);
4288 queue_set_hctx_shared(q, shared);
4289 blk_mq_unfreeze_queue(q, memflags);
4293 static void blk_mq_del_queue_tag_set(struct request_queue *q)
4295 struct blk_mq_tag_set *set = q->tag_set;
4298 list_del(&q->tag_set_list);
4306 INIT_LIST_HEAD(&q->tag_set_list);
4310 struct request_queue *q)
4324 queue_set_hctx_shared(q, true);
4325 list_add_tail(&q->tag_set_list, &set->tag_list);
4330 /* All allocations will be freed in release handler of q->mq_kobj */
4331 static int blk_mq_alloc_ctxs(struct request_queue *q)
4349 q->mq_kobj = &ctxs->kobj;
4350 q->queue_ctx = ctxs->queue_ctx;
4361 * and headache because q->mq_kobj shouldn't have been introduced,
4364 void blk_mq_release(struct request_queue *q)
4369 queue_for_each_hw_ctx(q, hctx, i)
4373 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
4378 xa_destroy(&q->hctx_table);
4384 blk_mq_sysfs_deinit(q);
4391 struct request_queue *q;
4400 q = blk_alloc_queue(lim, set->numa_node);
4401 if (IS_ERR(q))
4402 return q;
4403 q->queuedata = queuedata;
4404 ret = blk_mq_init_allocated_queue(set, q);
4406 blk_put_queue(q);
4409 return q;
4415 * @q: request queue to shutdown
4423 void blk_mq_destroy_queue(struct request_queue *q)
4425 WARN_ON_ONCE(!queue_is_mq(q));
4426 WARN_ON_ONCE(blk_queue_registered(q));
4430 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4431 blk_queue_start_drain(q);
4432 blk_mq_freeze_queue_wait(q);
4434 blk_sync_queue(q);
4435 blk_mq_cancel_work_sync(q);
4436 blk_mq_exit_queue(q);
4444 struct request_queue *q;
4447 q = blk_mq_alloc_queue(set, lim, queuedata);
4448 if (IS_ERR(q))
4449 return ERR_CAST(q);
4451 disk = __alloc_disk_node(q, set->numa_node, lkclass);
4453 blk_mq_destroy_queue(q);
4454 blk_put_queue(q);
4462 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4467 if (!blk_get_queue(q))
4469 disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4471 blk_put_queue(q);
4486 struct blk_mq_tag_set *set, struct request_queue *q,
4492 spin_lock(&q->unused_hctx_lock);
4493 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4501 spin_unlock(&q->unused_hctx_lock);
4504 hctx = blk_mq_alloc_hctx(q, set, node);
4508 if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4520 struct request_queue *q)
4528 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4532 blk_mq_exit_hctx(q, set, old_hctx, i);
4535 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4540 hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4546 * hctxs and keep the previous q->nr_hw_queues.
4549 j = q->nr_hw_queues;
4552 q->nr_hw_queues = set->nr_hw_queues;
4555 xa_for_each_start(&q->hctx_table, j, hctx, j)
4556 blk_mq_exit_hctx(q, set, hctx, j);
4560 struct request_queue *q)
4562 __blk_mq_realloc_hw_ctxs(set, q);
4565 blk_mq_remove_hw_queues_cpuhp(q);
4568 blk_mq_add_hw_queues_cpuhp(q);
4572 struct request_queue *q)
4575 q->mq_ops = set->ops;
4581 q->tag_set = set;
4583 if (blk_mq_alloc_ctxs(q))
4586 /* init q->mq_kobj and sw queues' kobjects */
4587 blk_mq_sysfs_init(q);
4589 INIT_LIST_HEAD(&q->unused_hctx_list);
4590 spin_lock_init(&q->unused_hctx_lock);
4592 xa_init(&q->hctx_table);
4594 blk_mq_realloc_hw_ctxs(set, q);
4595 if (!q->nr_hw_queues)
4598 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4599 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4601 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4603 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4604 INIT_LIST_HEAD(&q->flush_list);
4605 INIT_LIST_HEAD(&q->requeue_list);
4606 spin_lock_init(&q->requeue_lock);
4608 q->nr_requests = set->queue_depth;
4610 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4611 blk_mq_map_swqueue(q);
4612 blk_mq_add_queue_tag_set(set, q);
4616 blk_mq_release(q);
4618 q->mq_ops = NULL;
4624 void blk_mq_exit_queue(struct request_queue *q)
4626 struct blk_mq_tag_set *set = q->tag_set;
4629 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4631 blk_mq_del_queue_tag_set(q);
4920 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4922 struct blk_mq_tag_set *set = q->tag_set;
4927 if (WARN_ON_ONCE(!q->mq_freeze_depth))
4933 if (q->nr_requests == nr)
4936 blk_mq_quiesce_queue(q);
4939 queue_for_each_hw_ctx(q, hctx, i) {
4955 if (q->elevator && q->elevator->type->ops.depth_updated)
4956 q->elevator->type->ops.depth_updated(hctx);
4959 q->nr_requests = nr;
4961 if (q->elevator)
4962 blk_mq_tag_update_sched_shared_tags(q);
4968 blk_mq_unquiesce_queue(q);
4976 static void blk_mq_elv_switch_back(struct request_queue *q,
4979 struct elevator_type *e = xa_load(elv_tbl, q->id);
4980 struct elevator_tags *t = xa_load(et_tbl, q->id);
4983 elv_update_nr_hw_queues(q, e, t);
4992 * q->id as an index to store the elevator type into the xarray.
4994 static int blk_mq_elv_switch_none(struct request_queue *q,
4999 lockdep_assert_held_write(&q->tag_set->update_nr_hwq_lock);
5002 * Accessing q->elevator without holding q->elevator_lock is safe here
5008 if (q->elevator) {
5010 ret = xa_insert(elv_tbl, q->id, q->elevator->type, GFP_KERNEL);
5021 __elevator_get(q->elevator->type);
5023 elevator_set_none(q);
5031 struct request_queue *q;
5054 list_for_each_entry(q, &set->tag_list, tag_set_list) {
5055 blk_mq_debugfs_unregister_hctxs(q);
5056 blk_mq_sysfs_unregister_hctxs(q);
5059 list_for_each_entry(q, &set->tag_list, tag_set_list)
5060 blk_mq_freeze_queue_nomemsave(q);
5067 list_for_each_entry(q, &set->tag_list, tag_set_list)
5068 if (blk_mq_elv_switch_none(q, &elv_tbl))
5076 list_for_each_entry(q, &set->tag_list, tag_set_list) {
5077 __blk_mq_realloc_hw_ctxs(set, q);
5079 if (q->nr_hw_queues != set->nr_hw_queues) {
5090 blk_mq_map_swqueue(q);
5094 list_for_each_entry(q, &set->tag_list, tag_set_list)
5095 blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
5097 list_for_each_entry(q, &set->tag_list, tag_set_list) {
5098 blk_mq_sysfs_register_hctxs(q);
5099 blk_mq_debugfs_register_hctxs(q);
5101 blk_mq_remove_hw_queues_cpuhp(q);
5102 blk_mq_add_hw_queues_cpuhp(q);
5125 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
5132 ret = q->mq_ops->poll(hctx, iob);
5152 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie,
5155 if (!blk_mq_can_poll(q))
5157 return blk_hctx_poll(q, xa_load(&q->hctx_table, cookie), iob, flags);
5163 struct request_queue *q = rq->q;
5168 if (!percpu_ref_tryget(&q->q_usage_counter))
5171 ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags);
5172 blk_queue_exit(q);
5184 void blk_mq_cancel_work_sync(struct request_queue *q)
5189 cancel_delayed_work_sync(&q->requeue_work);
5191 queue_for_each_hw_ctx(q, hctx, i)