/linux/drivers/target/ |
H A D | target_core_tpg.c | 158 struct se_node_acl *acl, u32 queue_depth) in target_set_nacl_queue_depth() argument 160 acl->queue_depth = queue_depth; in target_set_nacl_queue_depth() 162 if (!acl->queue_depth) { in target_set_nacl_queue_depth() 166 acl->queue_depth = 1; in target_set_nacl_queue_depth() 174 u32 queue_depth; in target_alloc_node_acl() local 191 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); in target_alloc_node_acl() 193 queue_depth = 1; in target_alloc_node_acl() 194 target_set_nacl_queue_depth(tpg, acl, queue_depth); in target_alloc_node_acl() 218 acl->queue_depth, in target_add_node_acl() 369 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, in core_tpg_del_initiator_node_acl() [all …]
|
/linux/drivers/s390/block/ |
H A D | dasd_genhd.c | 23 static unsigned int queue_depth = 32; variable 26 module_param(queue_depth, uint, 0444); 27 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 58 block->tag_set.queue_depth = queue_depth; in dasd_gendisk_alloc()
|
/linux/arch/um/drivers/ |
H A D | vector_kern.c | 266 atomic_sub(advance, &qi->queue_depth); in vector_advancehead() 267 return atomic_read(&qi->queue_depth); in vector_advancehead() 280 atomic_add(advance, &qi->queue_depth); in vector_advancetail() 281 return atomic_read(&qi->queue_depth); in vector_advancetail() 328 int queue_depth; in vector_enqueue() local 334 queue_depth = atomic_read(&qi->queue_depth); in vector_enqueue() 339 if (queue_depth < qi->max_depth) { in vector_enqueue() 354 queue_depth = vector_advancetail(qi, 1); in vector_enqueue() 358 return queue_depth; in vector_enqueue() 367 return queue_depth; in vector_enqueue() [all …]
|
H A D | vector_kern.h | 48 atomic_t queue_depth; member
|
/linux/drivers/target/iscsi/ |
H A D | iscsi_target_device.c | 41 sess->cmdsn_window = se_nacl->queue_depth; in iscsit_determine_maxcmdsn() 42 atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn); in iscsit_determine_maxcmdsn()
|
/linux/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs-clt.c | 71 size_t max_depth = clt->queue_depth; in __rtrs_get_permit() 497 if (WARN_ON(msg_id >= clt_path->queue_depth)) in process_io_rsp() 551 if (WARN_ON(buf_id >= clt_path->queue_depth)) in rtrs_clt_rkey_rsp_done() 724 q_size = clt_path->queue_depth; in post_recv_path() 1323 for (i = 0; i < clt_path->queue_depth; ++i) { in fail_all_outstanding_reqs() 1349 for (i = 0; i < clt_path->queue_depth; ++i) { in free_path_reqs() 1365 clt_path->reqs = kcalloc(clt_path->queue_depth, in alloc_path_reqs() 1371 for (i = 0; i < clt_path->queue_depth; ++i) { in alloc_path_reqs() 1411 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL); in alloc_permits() 1416 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL); in alloc_permits() [all …]
|
H A D | rtrs-srv.c | 112 for (i = 0; i < srv->queue_depth; i++) in rtrs_srv_free_ops_ids() 141 srv_path->ops_ids = kcalloc(srv->queue_depth, in rtrs_srv_alloc_ops_ids() 147 for (i = 0; i < srv->queue_depth; ++i) { in rtrs_srv_alloc_ops_ids() 588 mrs_num = srv->queue_depth; in map_cont_bufs() 592 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); in map_cont_bufs() 593 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); in map_cont_bufs() 610 srv->queue_depth - chunks); in map_cont_bufs() 658 chunk_bits = ilog2(srv->queue_depth - 1) + 1; in map_cont_bufs() 995 q_size = srv->queue_depth; in post_recv_path() 1263 if (msg_id >= srv->queue_depth || off >= max_chunk_size) { in rtrs_srv_rdma_done() [all …]
|
H A D | rtrs-clt.h | 143 size_t queue_depth; member 172 size_t queue_depth; member
|
H A D | rtrs.h | 111 u32 queue_depth; member
|
H A D | rtrs-srv.h | 110 size_t queue_depth; member
|
/linux/drivers/ata/ |
H A D | libata-sata.c | 1281 int queue_depth) in ata_change_queue_depth() argument 1290 if (!dev || queue_depth < 1 || queue_depth == sdev->queue_depth) { in ata_change_queue_depth() 1292 return sdev->queue_depth; in ata_change_queue_depth() 1301 if (queue_depth > max_queue_depth) { in ata_change_queue_depth() 1310 if (queue_depth == 1 || !ata_ncq_supported(dev)) { in ata_change_queue_depth() 1312 queue_depth = 1; in ata_change_queue_depth() 1319 if (queue_depth == sdev->queue_depth) in ata_change_queue_depth() 1320 return sdev->queue_depth; in ata_change_queue_depth() 1322 return scsi_change_queue_depth(sdev, queue_depth); in ata_change_queue_depth() 1341 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) in ata_scsi_change_queue_depth() argument [all …]
|
/linux/drivers/gpu/drm/scheduler/tests/ |
H A D | tests_basic.c | 75 unsigned int queue_depth; member 84 .queue_depth = 100, 90 .queue_depth = 100, 97 .queue_depth = 100, 103 .queue_depth = 100, 136 for (i = 0; i < params->queue_depth; i++) { in drm_sched_basic_test()
|
/linux/include/net/mana/ |
H A D | hw_channel.h | 148 u16 queue_depth; member 162 u16 queue_depth; member
|
/linux/block/ |
H A D | blk-rq-qos.c | 124 if (rqd->queue_depth == 1) { in rq_depth_calc_max_depth() 140 rqd->queue_depth); in rq_depth_calc_max_depth() 144 unsigned int maxd = 3 * rqd->queue_depth / 4; in rq_depth_calc_max_depth()
|
H A D | blk-mq.c | 3899 unsigned int queue_depth, struct request *flush_rq) in blk_mq_clear_flush_rq_mapping() argument 3910 for (i = 0; i < queue_depth; i++) in blk_mq_clear_flush_rq_mapping() 3935 set->queue_depth, flush_rq); in blk_mq_exit_hctx() 4121 set->queue_depth); in __blk_mq_alloc_map_and_rqs() 4608 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue() 4641 set->queue_depth); in __blk_mq_alloc_rq_maps() 4676 depth = set->queue_depth; in blk_mq_alloc_set_map_and_rqs() 4682 set->queue_depth >>= 1; in blk_mq_alloc_set_map_and_rqs() 4683 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { in blk_mq_alloc_set_map_and_rqs() 4687 } while (set->queue_depth); in blk_mq_alloc_set_map_and_rqs() [all …]
|
/linux/tools/testing/selftests/ublk/ |
H A D | kublk.c | 339 info->dev_id, info->nr_hw_queues, info->queue_depth, in ublk_ctrl_dump() 438 int depth = dev->dev_info.queue_depth; in ublk_queue_init() 501 unsigned nr_ios = dev->dev_info.queue_depth * dev->dev_info.nr_hw_queues; in ublk_thread_init() 674 int nr_ios = dinfo->nr_hw_queues * dinfo->queue_depth; in ublk_submit_fetch_commands() 676 int q_id = i / dinfo->queue_depth; in ublk_submit_fetch_commands() 677 int tag = i % dinfo->queue_depth; in ublk_submit_fetch_commands() 1107 unsigned depth = ctx->queue_depth; in __cmd_dev_add() 1172 info->queue_depth = depth; in __cmd_dev_add() 1543 .queue_depth = 128, in main() 1575 ctx.queue_depth = strtol(optarg, NULL, 10); in main()
|
/linux/drivers/block/rnbd/ |
H A D | rnbd-srv.h | 25 int queue_depth; member
|
H A D | rnbd-clt.h | 86 size_t queue_depth; member
|
/linux/drivers/scsi/snic/ |
H A D | snic_main.c | 91 if (qsz < sdev->queue_depth) in snic_change_queue_depth() 93 else if (qsz > sdev->queue_depth) in snic_change_queue_depth() 96 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); in snic_change_queue_depth() 100 return sdev->queue_depth; in snic_change_queue_depth()
|
/linux/rust/kernel/block/mq/ |
H A D | tag_set.rs | 52 queue_depth: num_tags, in new()
|
/linux/drivers/block/ |
H A D | zloop.c | 72 unsigned int queue_depth; member 941 opts->nr_queues * opts->queue_depth, zlo->id); in zloop_ctl_add() 982 zlo->tag_set.queue_depth = opts->queue_depth; in zloop_ctl_add() 1107 opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH; in zloop_parse_options() 1213 opts->queue_depth = token; in zloop_parse_options()
|
/linux/drivers/scsi/ |
H A D | scsi.c | 222 sdev->queue_depth = depth; in scsi_change_queue_depth() 229 sbitmap_resize(&sdev->budget_map, sdev->queue_depth); in scsi_change_queue_depth() 231 return sdev->queue_depth; in scsi_change_queue_depth()
|
/linux/drivers/net/ethernet/microsoft/mana/ |
H A D | hw_channel.c | 345 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth); in mana_hwc_comp_event() 346 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth); in mana_hwc_comp_event() 425 hwc_cq->queue_depth = q_depth; in mana_hwc_create_cq() 540 hwc_wq->queue_depth = q_depth; in mana_hwc_create_wq()
|
/linux/drivers/scsi/bfa/ |
H A D | bfad_im.c | 90 (bfa_lun_queue_depth > cmnd->device->queue_depth)) { in bfa_cb_ioim_done() 118 if (bfa_lun_queue_depth > cmnd->device->queue_depth) { in bfa_cb_ioim_good_comp() 878 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { in bfad_ramp_up_qdepth() 882 tmp_sdev->queue_depth + 1); in bfad_ramp_up_qdepth() 900 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); in bfad_handle_qfull()
|
/linux/include/target/ |
H A D | target_core_base.h | 580 u32 queue_depth; member 728 u32 queue_depth; member 826 u32 queue_depth; member
|