Lines Matching defs:q
78 * @q: request queue
80 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
82 set_bit(flag, &q->queue_flags);
89 * @q: request queue
91 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
93 clear_bit(flag, &q->queue_flags);
204 * @q: the queue
220 void blk_sync_queue(struct request_queue *q)
222 timer_delete_sync(&q->timeout);
223 cancel_work_sync(&q->timeout_work);
229 * @q: request queue pointer
231 void blk_set_pm_only(struct request_queue *q)
233 atomic_inc(&q->pm_only);
237 void blk_clear_pm_only(struct request_queue *q)
241 pm_only = atomic_dec_return(&q->pm_only);
244 wake_up_all(&q->mq_freeze_wq);
250 struct request_queue *q = container_of(rcu_head,
253 percpu_ref_exit(&q->q_usage_counter);
254 kmem_cache_free(blk_requestq_cachep, q);
257 static void blk_free_queue(struct request_queue *q)
259 blk_free_queue_stats(q->stats);
260 if (queue_is_mq(q))
261 blk_mq_release(q);
263 ida_free(&blk_queue_ida, q->id);
264 lockdep_unregister_key(&q->io_lock_cls_key);
265 lockdep_unregister_key(&q->q_lock_cls_key);
266 call_rcu(&q->rcu_head, blk_free_queue_rcu);
271 * @q: the request_queue structure to decrement the refcount for
276 void blk_put_queue(struct request_queue *q)
278 if (refcount_dec_and_test(&q->refs))
279 blk_free_queue(q);
283 bool blk_queue_start_drain(struct request_queue *q)
290 bool freeze = __blk_freeze_queue_start(q, current);
291 if (queue_is_mq(q))
292 blk_mq_wake_waiters(q);
294 wake_up_all(&q->mq_freeze_wq);
300 * blk_queue_enter() - try to increase q->q_usage_counter
301 * @q: request queue pointer
304 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
308 while (!blk_try_enter_queue(q, pm)) {
320 wait_event(q->mq_freeze_wq,
321 (!q->mq_freeze_depth &&
322 blk_pm_resume_queue(pm, q)) ||
323 blk_queue_dying(q));
324 if (blk_queue_dying(q))
328 rwsem_acquire_read(&q->q_lockdep_map, 0, 0, _RET_IP_);
329 rwsem_release(&q->q_lockdep_map, _RET_IP_);
333 int __bio_queue_enter(struct request_queue *q, struct bio *bio)
335 while (!blk_try_enter_queue(q, false)) {
353 wait_event(q->mq_freeze_wq,
354 (!q->mq_freeze_depth &&
355 blk_pm_resume_queue(false, q)) ||
361 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
362 rwsem_release(&q->io_lockdep_map, _RET_IP_);
369 void blk_queue_exit(struct request_queue *q)
371 percpu_ref_put(&q->q_usage_counter);
376 struct request_queue *q =
379 wake_up_all(&q->mq_freeze_wq);
384 struct request_queue *q = timer_container_of(q, t, timeout);
386 kblockd_schedule_work(&q->timeout_work);
395 struct request_queue *q;
398 q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
400 if (!q)
403 q->last_merge = NULL;
405 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
406 if (q->id < 0) {
407 error = q->id;
411 q->stats = blk_alloc_queue_stats();
412 if (!q->stats) {
420 q->limits = *lim;
422 q->node = node_id;
424 atomic_set(&q->nr_active_requests_shared_tags, 0);
426 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
427 INIT_WORK(&q->timeout_work, blk_timeout_work);
428 INIT_LIST_HEAD(&q->icq_list);
430 refcount_set(&q->refs, 1);
431 mutex_init(&q->debugfs_mutex);
432 mutex_init(&q->elevator_lock);
433 mutex_init(&q->sysfs_lock);
434 mutex_init(&q->limits_lock);
435 mutex_init(&q->rq_qos_mutex);
436 spin_lock_init(&q->queue_lock);
438 init_waitqueue_head(&q->mq_freeze_wq);
439 mutex_init(&q->mq_freeze_lock);
441 blkg_init_queue(q);
447 error = percpu_ref_init(&q->q_usage_counter,
452 lockdep_register_key(&q->io_lock_cls_key);
453 lockdep_register_key(&q->q_lock_cls_key);
454 lockdep_init_map(&q->io_lockdep_map, "&q->q_usage_counter(io)",
455 &q->io_lock_cls_key, 0);
456 lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)",
457 &q->q_lock_cls_key, 0);
461 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
462 rwsem_release(&q->io_lockdep_map, _RET_IP_);
465 q->nr_requests = BLKDEV_DEFAULT_RQ;
467 return q;
470 blk_free_queue_stats(q->stats);
472 ida_free(&blk_queue_ida, q->id);
474 kmem_cache_free(blk_requestq_cachep, q);
480 * @q: the request_queue structure to increment the refcount for
486 bool blk_get_queue(struct request_queue *q)
488 if (unlikely(blk_queue_dying(q)))
490 refcount_inc(&q->refs);
594 static inline blk_status_t blk_check_zone_append(struct request_queue *q,
612 if (nr_sectors > q->limits.chunk_sectors)
616 if (nr_sectors > q->limits.max_zone_append_sectors)
681 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
699 if (q == bdev_get_queue(bio->bi_bdev))
756 static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q,
759 if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q))
762 if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q))
780 struct request_queue *q = bdev_get_queue(bdev);
825 status = blk_validate_atomic_write_op_size(q, bio);
845 status = blk_check_zone_append(q, bio);
850 if (!q->limits.max_write_zeroes_sectors)
936 struct request_queue *q;
943 q = bdev_get_queue(bdev);
958 if (!percpu_ref_tryget(&q->q_usage_counter))
960 if (queue_is_mq(q)) {
961 ret = blk_mq_poll(q, cookie, iob, flags);
963 struct gendisk *disk = q->disk;
965 if ((q->limits.features & BLK_FEAT_POLL) && disk &&
969 blk_queue_exit(q);
1080 * @q : the queue of the device being checked
1097 int blk_lld_busy(struct request_queue *q)
1099 if (queue_is_mq(q) && q->mq_ops->busy)
1100 return q->mq_ops->busy(q);