Home
last modified time | relevance | path

Searched refs:max_queues (Results 1 – 25 of 40) sorted by relevance

12

/linux/block/
H A Dblk-mq-cpumap.c21 unsigned int max_queues) in blk_mq_num_queues() argument
26 return min_not_zero(num, max_queues); in blk_mq_num_queues()
38 unsigned int blk_mq_num_possible_queues(unsigned int max_queues) in blk_mq_num_possible_queues() argument
40 return blk_mq_num_queues(cpu_possible_mask, max_queues); in blk_mq_num_possible_queues()
53 unsigned int blk_mq_num_online_queues(unsigned int max_queues) in blk_mq_num_online_queues() argument
55 return blk_mq_num_queues(cpu_online_mask, max_queues); in blk_mq_num_online_queues()
/linux/drivers/accel/habanalabs/common/
H A Dhw_queue.c53 if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) in hl_hw_queue_update_ci()
61 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { in hl_hw_queue_update_ci()
646 u32 max_queues; in hl_hw_queue_schedule_cs() local
661 max_queues = hdev->asic_prop.max_queues; in hl_hw_queue_schedule_cs()
664 for (i = 0, cq_cnt = 0 ; i < max_queues ; i++, q++) { in hl_hw_queue_schedule_cs()
795 for (i = 0 ; (i < max_queues) && (cq_cnt > 0) ; i++, q++) { in hl_hw_queue_schedule_cs()
1085 hdev->kernel_queues = kcalloc(asic->max_queues, in hl_hw_queues_create()
1095 i < asic->max_queues ; i++, q_ready_cnt++, q++) { in hl_hw_queues_create()
1129 u32 max_queues = hdev->asic_prop.max_queues; in hl_hw_queues_destroy() local
1132 for (i = 0, q = hdev->kernel_queues ; i < max_queues ; i++, q++) in hl_hw_queues_destroy()
[all …]
H A Dcommand_submission.c950 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
953 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
1214 if (chunk->queue_index >= asic->max_queues) { in validate_queue_index()
2024 if (q_idx >= hdev->asic_prop.max_queues) { in cs_ioctl_reserve_signals()
2232 if (chunk->queue_index >= hdev->asic_prop.max_queues) { in cs_ioctl_signal_wait()
/linux/arch/mips/cavium-octeon/executive/
H A Dcvmx-pko.c215 int max_queues = in cvmx_pko_initialize_global() local
219 if (max_queues <= 32) in cvmx_pko_initialize_global()
221 else if (max_queues <= 64) in cvmx_pko_initialize_global()
224 if (max_queues <= 64) in cvmx_pko_initialize_global()
226 else if (max_queues <= 128) in cvmx_pko_initialize_global()
/linux/drivers/net/ethernet/google/gve/
H A Dgve_main.c509 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
511 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
515 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
516 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
517 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
518 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
519 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
520 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
577 if (cur_cpu >= nr_cpu_ids || (i + 1) == priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
1736 (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) { in verify_xdp_configuration()
[all …]
H A Dgve_ethtool.c467 cmd->max_rx = priv->rx_cfg.max_queues; in gve_get_channels()
468 cmd->max_tx = priv->tx_cfg.max_queues; in gve_get_channels()
499 (2 * new_tx > priv->tx_cfg.max_queues)) { in gve_set_channels()
H A Dgve_tx.c342 if (total_queues > cfg->qcfg->max_queues) { in gve_tx_alloc_rings_gqi()
348 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_gqi()
/linux/drivers/net/ethernet/samsung/sxgbe/
H A Dsxgbe_common.h194 #define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num) \ argument
195 for (queue_num = 0; queue_num < max_queues; queue_num++)
/linux/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_ethtool.c171 for (i = 0; i < interface->hw.mac.max_queues; i++) { in fm10k_get_stat_strings()
208 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN; in fm10k_get_sset_count()
283 for (i = 0; i < interface->hw.mac.max_queues; i++) { in fm10k_get_ethtool_stats()
412 if (i < hw->mac.max_queues) in fm10k_get_regs()
1097 unsigned int max_combined = interface->hw.mac.max_queues; in fm10k_max_channels()
H A Dfm10k_common.c241 return fm10k_disable_queues_generic(hw, hw->mac.max_queues); in fm10k_stop_hw_generic()
H A Dfm10k_pf.c175 hw->mac.max_queues = FM10K_MAX_QUEUES_PF; in fm10k_init_hw_pf()
1367 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); in fm10k_update_hw_stats_pf()
1392 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); in fm10k_rebind_hw_stats_pf()
H A Dfm10k_type.h546 u16 max_queues; member
H A Dfm10k_main.c1496 rss_i = interface->hw.mac.max_queues / pcs; in fm10k_set_qos_queues()
1529 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit); in fm10k_set_rss_queues()
/linux/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_qp.c74 u16 num_queues, u16 max_queues) in hinic_qp_prepare_header() argument
76 u16 max_sqs = max_queues; in hinic_qp_prepare_header()
77 u16 max_rqs = max_queues; in hinic_qp_prepare_header()
H A Dhinic_hw_qp.h130 u16 num_queues, u16 max_queues);
/linux/drivers/net/ethernet/amazon/ena/
H A Dena_netdev.c2925 struct ena_admin_queue_feature_desc *max_queues = in ena_calc_io_queue_size() local
2926 &get_feat_ctx->max_queues; in ena_calc_io_queue_size()
2927 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, in ena_calc_io_queue_size()
2928 max_queues->max_sq_depth); in ena_calc_io_queue_size()
2929 max_tx_queue_size = max_queues->max_cq_depth; in ena_calc_io_queue_size()
2936 max_queues->max_sq_depth); in ena_calc_io_queue_size()
2939 max_queues->max_packet_tx_descs); in ena_calc_io_queue_size()
2941 max_queues->max_packet_rx_descs); in ena_calc_io_queue_size()
3731 struct ena_admin_queue_feature_desc *max_queues = in ena_calc_max_io_queue_num() local
3732 &get_feat_ctx->max_queues; in ena_calc_max_io_queue_num()
[all …]
H A Dena_com.h394 struct ena_admin_queue_feature_desc max_queues; member
/linux/include/linux/
H A Dblk-mq.h950 unsigned int blk_mq_num_possible_queues(unsigned int max_queues);
951 unsigned int blk_mq_num_online_queues(unsigned int max_queues);
/linux/drivers/net/ethernet/microsoft/mana/
H A Dmana_en.c2899 u32 max_txq, max_rxq, max_queues; in mana_init_port() local
2919 max_queues = min_t(u32, max_txq, max_rxq); in mana_init_port()
2920 if (apc->max_queues > max_queues) in mana_init_port()
2921 apc->max_queues = max_queues; in mana_init_port()
2923 if (apc->num_queues > apc->max_queues) in mana_init_port()
2924 apc->num_queues = apc->max_queues; in mana_init_port()
3148 apc->max_queues = gc->max_num_queues; in mana_probe_port()
H A Dmana_ethtool.c387 channel->max_combined = apc->max_queues; in mana_get_channels()
/linux/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_main.c1850 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) { in nicvf_xdp_setup()
1853 nic->max_queues); in nicvf_xdp_setup()
2145 nic->max_queues = qcount; in nicvf_probe()
2150 nic->max_queues *= 2; in nicvf_probe()
2216 nic->rx_queues + nic->tx_queues <= nic->max_queues) in nicvf_probe()
H A Dnic.h310 u8 max_queues; member
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_vf.c548 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues; in otx2vf_realloc_msix_vectors()
610 hw->max_queues = qcount; in otx2vf_probe()
/linux/drivers/net/
H A Dxen-netfront.c65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
2265 unsigned int max_queues = 0; in talk_to_netback() local
2277 max_queues = xenbus_read_unsigned(info->xbdev->otherend, in talk_to_netback()
2279 num_queues = min(max_queues, xennet_max_queues); in talk_to_netback()
/linux/include/net/mana/
H A Dmana.h517 unsigned int max_queues; member

12