Lines Matching +refs:queue +refs:get +refs:op
254 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue); in ublk_dev_param_zoned_apply()
255 blk_queue_required_elevator_features(ub->ub_disk->queue, in ublk_dev_param_zoned_apply()
259 blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors); in ublk_dev_param_zoned_apply()
270 struct request_queue *q = ublk->ub_disk->queue; in ublk_alloc_report_buffer()
298 unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors; in ublk_report_zones()
325 req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0); in ublk_report_zones()
336 ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length, in ublk_report_zones()
498 struct request_queue *q = ub->ub_disk->queue; in ublk_dev_param_basic_apply()
525 struct request_queue *q = ub->ub_disk->queue; in ublk_dev_param_discard_apply()
957 enum req_op op = req_op(req); in ublk_setup_iod() local
961 (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND)) in ublk_setup_iod()
1449 blk_mq_quiesce_queue(disk->queue); in ublk_abort_requests()
1452 blk_mq_unquiesce_queue(disk->queue); in ublk_abort_requests()
1552 WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue)); in ublk_wait_tagset_rqs_idle()
1569 blk_mq_quiesce_queue(ub->ub_disk->queue); in __ublk_quiesce_dev()
1604 blk_mq_unquiesce_queue(ub->ub_disk->queue); in ublk_unquiesce_dev()
1606 blk_mq_kick_requeue_list(ub->ub_disk->queue); in ublk_unquiesce_dev()
2279 unsigned long queue; in ublk_ctrl_get_queue_affinity() local
2291 queue = header->data[0]; in ublk_ctrl_get_queue_affinity()
2292 if (queue >= ub->dev_info.nr_hw_queues) in ublk_ctrl_get_queue_affinity()
2299 if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue) in ublk_ctrl_get_queue_affinity()
2728 blk_mq_unquiesce_queue(ub->ub_disk->queue); in ublk_ctrl_end_recovery()
2731 blk_mq_kick_requeue_list(ub->ub_disk->queue); in ublk_ctrl_end_recovery()
3015 .get = ublk_get_max_ublks,