/linux-5.10/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_lib.c | 48 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_dcb_sriov() 78 for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { in ixgbe_cache_ring_dcb_sriov() 223 for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { in ixgbe_cache_ring_sriov() 237 for (; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_sriov() 260 for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++) in ixgbe_cache_ring_rss() 380 adapter->num_tx_queues = vmdq_i * tcs; in ixgbe_set_dcb_sriov_queues() 399 adapter->num_tx_queues += fcoe_i; in ixgbe_set_dcb_sriov_queues() 436 rss_i = dev->num_tx_queues / tcs; in ixgbe_set_dcb_queues() 478 adapter->num_tx_queues = rss_i * tcs; in ixgbe_set_dcb_queues() 551 adapter->num_tx_queues = vmdq_i * rss_i; in ixgbe_set_sriov_queues() [all …]
|
/linux-5.10/net/sched/ |
D | sch_mq.c | 62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) in mq_destroy() 83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mq_init() 88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_init() 113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_attach() 144 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_dump() 179 if (ntx >= dev->num_tx_queues) in mq_queue_get() 265 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { in mq_walk()
|
D | sch_mqprio.c | 38 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; in mqprio_destroy() 157 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) in mqprio_init() 221 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mqprio_init() 226 for (i = 0; i < dev->num_tx_queues; i++) { in mqprio_init() 297 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_attach() 315 if (ntx >= dev->num_tx_queues) in mqprio_queue_get() 401 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_dump() 479 return (ntx <= dev->num_tx_queues) ? ntx : 0; in mqprio_find() 594 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { in mqprio_walk()
|
D | sch_taprio.c | 458 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_peek_soft() 492 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_peek_offload() 543 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_soft() 617 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_offload() 934 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt() 953 if (qopt->offset[i] >= dev->num_tx_queues || in taprio_parse_mqprio_opt() 1614 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) in taprio_destroy() 1664 q->qdiscs = kcalloc(dev->num_tx_queues, in taprio_init() 1674 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_init() 1702 if (ntx >= dev->num_tx_queues) in taprio_queue_get() [all …]
|
/linux-5.10/drivers/net/ethernet/intel/igc/ |
D | igc_tsn.c | 11 for (i = 0; i < adapter->num_tx_queues; i++) { in is_any_launchtime() 43 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_tsn_disable_offload() 88 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_tsn_enable_offload()
|
D | igc_dump.c | 135 for (n = 0; n < adapter->num_tx_queues; n++) { in igc_rings_dump() 166 for (n = 0; n < adapter->num_tx_queues; n++) { in igc_rings_dump()
|
D | igc_main.c | 260 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources() 272 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings() 326 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources() 627 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx() 1397 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping() 1398 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping() 2926 adapter->num_tx_queues = 0; in igc_free_q_vectors() 3104 adapter->num_tx_queues = adapter->rss_queues; in igc_set_interrupt_capability() 3111 numvecs += adapter->num_tx_queues; in igc_set_interrupt_capability() 3147 adapter->num_tx_queues = 1; in igc_set_interrupt_capability() [all …]
|
D | igc_ethtool.c | 116 (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \ 592 for (i = 0; i < adapter->num_tx_queues; i++) in igc_ethtool_set_ringparam() 601 if (adapter->num_tx_queues > adapter->num_rx_queues) in igc_ethtool_set_ringparam() 603 adapter->num_tx_queues)); in igc_ethtool_set_ringparam() 620 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_ethtool_set_ringparam() 635 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_ethtool_set_ringparam() 760 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_ethtool_get_strings() 826 for (j = 0; j < adapter->num_tx_queues; j++) { in igc_ethtool_get_stats()
|
/linux-5.10/drivers/net/ethernet/broadcom/ |
D | bcmsysport.c | 358 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_sset_count() 385 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_strings() 456 for (q = 0; q < priv->netdev->num_tx_queues; q++) { in bcm_sysport_update_tx_stats() 520 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT; in bcm_sysport_get_stats() 522 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_get_stats() 650 for (i = 0; i < dev->num_tx_queues; i++) in bcm_sysport_set_coalesce() 1006 for (q = 0; q < priv->netdev->num_tx_queues; q++) in bcm_sysport_tx_reclaim_all() 1156 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_rx_isr() 1189 for (ring = 0; ring < dev->num_tx_queues; ring++) { in bcm_sysport_tx_isr() 2011 for (i = 0; i < dev->num_tx_queues; i++) { in bcm_sysport_open() [all …]
|
/linux-5.10/drivers/net/ |
D | ifb.c | 135 for (i = 0; i < dev->num_tx_queues; i++,txp++) { in ifb_stats64() 162 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); in ifb_dev_init() 166 for (i = 0; i < dev->num_tx_queues; i++,txp++) { in ifb_dev_init() 201 for (i = 0; i < dev->num_tx_queues; i++,txp++) { in ifb_dev_free()
|
/linux-5.10/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_netdev.c | 59 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_setup_all_tx_resources() 226 for (i = 0; i < interface->num_tx_queues; i++) in fm10k_clean_all_tx_rings() 238 int i = interface->num_tx_queues; in fm10k_free_all_tx_resources() 462 interface->num_tx_queues); in fm10k_open() 513 int num_tx_queues = READ_ONCE(interface->num_tx_queues); in fm10k_xmit_frame() local 517 if (!num_tx_queues) in fm10k_xmit_frame() 572 if (r_idx >= num_tx_queues) in fm10k_xmit_frame() 573 r_idx %= num_tx_queues; in fm10k_xmit_frame() 591 if (txqueue >= interface->num_tx_queues) { in fm10k_tx_timeout() 1241 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_get_stats64()
|
D | fm10k_main.c | 1508 interface->num_tx_queues = rss_i * pcs; in fm10k_set_qos_queues() 1534 interface->num_tx_queues = rss_i; in fm10k_set_rss_queues() 1569 interface->num_tx_queues = 0; in fm10k_reset_num_queues() 1717 unsigned int txr_remaining = interface->num_tx_queues; in fm10k_alloc_q_vectors() 1812 v_budget = max(interface->num_rx_queues, interface->num_tx_queues); in fm10k_init_msix_capability() 1899 for (i = 0; i < interface->num_tx_queues; i++) in fm10k_cache_ring_rss()
|
/linux-5.10/drivers/net/vmxnet3/ |
D | vmxnet3_drv.c | 156 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link() 165 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_check_link() 193 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_process_events() 469 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_destroy_all() 555 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_cleanup_all() 815 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_tq_init_all() 1220 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); in vmxnet3_xmit_frame() 1939 for (i = 0; i < adapter->num_tx_queues; i++) in vmxnet3_do_poll() 2016 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_msix_tx() 2141 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_request_irqs() [all …]
|
D | vmxnet3_ethtool.c | 135 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_get_stats64() 173 adapter->num_tx_queues + in vmxnet3_get_sset_count() 197 (1 + adapter->num_tx_queues * 17 /* Tx queue registers */) + in vmxnet3_get_regs_len() 224 for (j = 0; j < adapter->num_tx_queues; j++) { in vmxnet3_get_strings() 391 for (j = 0; j < adapter->num_tx_queues; j++) { in vmxnet3_get_ethtool_stats() 458 buf[j++] = adapter->num_tx_queues; in vmxnet3_get_regs() 459 for (i = 0; i < adapter->num_tx_queues; i++) { in vmxnet3_get_regs()
|
/linux-5.10/drivers/crypto/caam/ |
D | dpseci.h | 53 u8 num_tx_queues; member 77 u8 num_tx_queues; member
|
D | dpseci_cmd.h | 71 u8 num_tx_queues; member
|
D | dpseci.c | 178 attr->num_tx_queues = rsp_params->num_tx_queues; in dpseci_get_attributes()
|
/linux-5.10/drivers/net/ethernet/intel/ixgbevf/ |
D | ethtool.c | 63 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ 266 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_set_ringparam() 280 adapter->num_tx_queues + in ixgbevf_set_ringparam() 287 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_set_ringparam() 361 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_set_ringparam() 396 i < adapter->num_tx_queues + adapter->num_xdp_queues; i++) in ixgbevf_set_ringparam() 452 for (j = 0; j < adapter->num_tx_queues; j++) { in ixgbevf_get_ethtool_stats() 522 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_get_strings()
|
D | ixgbevf_main.c | 1764 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_configure_tx() 2182 unsigned int num_tx_queues = adapter->num_tx_queues; in ixgbevf_configure_dcb() local 2197 num_tx_queues = 1; in ixgbevf_configure_dcb() 2208 (adapter->num_tx_queues != num_tx_queues)) { in ixgbevf_configure_dcb() 2467 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_clean_all_tx_rings() 2502 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_down() 2611 adapter->num_tx_queues = 1; in ixgbevf_set_num_queues() 2640 adapter->num_tx_queues = rss; in ixgbevf_set_num_queues() 2665 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbevf_set_interrupt_capability() 2865 int txr_remaining = adapter->num_tx_queues; in ixgbevf_alloc_q_vectors() [all …]
|
/linux-5.10/drivers/infiniband/hw/hfi1/ |
D | ipoib_tx.c | 703 priv->tx_napis = kcalloc_node(dev->num_tx_queues, in hfi1_ipoib_txreq_init() 710 priv->txqs = kcalloc_node(dev->num_tx_queues, in hfi1_ipoib_txreq_init() 717 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_txreq_init() 812 for (i = 0; i < priv->netdev->num_tx_queues; i++) { in hfi1_ipoib_txreq_deinit() 838 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_napi_tx_enable() 850 for (i = 0; i < dev->num_tx_queues; i++) { in hfi1_ipoib_napi_tx_disable()
|
/linux-5.10/drivers/net/dsa/ocelot/ |
D | felix.h | 23 int num_tx_queues; member
|
/linux-5.10/drivers/net/ethernet/freescale/ |
D | gianfar.c | 133 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_tx_rx_base() 250 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { in gfar_configure_coalescing() 298 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_get_stats() 401 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_alloc_tx_queues() 435 for (i = 0; i < priv->num_tx_queues; i++) in gfar_free_tx_queues() 567 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { in gfar_parse_group() 570 grp->num_tx_queues++; in gfar_parse_group() 720 priv->num_tx_queues = num_tx_qs; in gfar_of_init() 1160 for (i = 0; i < priv->num_tx_queues; i++) { in free_skb_resources() 1321 for (i = 0; i < priv->num_tx_queues; i++) { in gfar_init_bds() [all …]
|
/linux-5.10/arch/powerpc/boot/dts/fsl/ |
D | pq3-etsec2-2.dtsi | 49 fsl,num_tx_queues = <0x8>;
|
D | pq3-etsec2-0.dtsi | 50 fsl,num_tx_queues = <0x8>;
|
D | pq3-etsec2-1.dtsi | 50 fsl,num_tx_queues = <0x8>;
|