| /linux/tools/sched_ext/ ! |
| H A D | scx_pair.bpf.c | 248 u32 *q_idx; in BPF_STRUCT_OPS() local 258 q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); in BPF_STRUCT_OPS() 259 if (!q_idx) { in BPF_STRUCT_OPS() 264 cgq = bpf_map_lookup_elem(&cgrp_q_arr, q_idx); in BPF_STRUCT_OPS() 267 cgid, *q_idx); in BPF_STRUCT_OPS() 277 cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); in BPF_STRUCT_OPS() 321 s32 pid, q_idx; in try_dispatch() local 380 u32 *q_idx; in try_dispatch() local 389 q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &new_cgid); in try_dispatch() 390 if (!q_idx) in try_dispatch() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ ! |
| H A D | ice_base.c | 754 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) in ice_vsi_cfg_single_rxq() argument 756 if (q_idx >= vsi->num_rxq) in ice_vsi_cfg_single_rxq() 759 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); in ice_vsi_cfg_single_rxq() 1133 u16 q_idx) in ice_vsi_cfg_single_txq() argument 1137 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) in ice_vsi_cfg_single_txq() 1142 return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); in ice_vsi_cfg_single_txq() 1159 u16 q_idx; in ice_vsi_cfg_txqs() local 1163 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_cfg_txqs() 1164 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); in ice_vsi_cfg_txqs() 1394 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) in ice_qp_reset_stats() argument [all …]
|
| H A D | ice_base.h | 9 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); 19 u16 q_idx); 35 int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx); 36 int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
|
| H A D | ice_lib.c | 1987 u16 q_idx; in ice_vsi_stop_tx_rings() local 1992 for (q_idx = 0; q_idx < count; q_idx++) { in ice_vsi_stop_tx_rings() 1996 if (!rings || !rings[q_idx]) in ice_vsi_stop_tx_rings() 1999 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings() 2001 rings[q_idx], &txq_meta); in ice_vsi_stop_tx_rings() 2786 int q_idx, v_idx; in ice_vsi_set_napi_queues() local 2792 ice_for_each_rxq(vsi, q_idx) in ice_vsi_set_napi_queues() 2793 if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector) in ice_vsi_set_napi_queues() 2794 netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, in ice_vsi_set_napi_queues() 2795 &vsi->rx_rings[q_idx]->q_vector->napi); in ice_vsi_set_napi_queues() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ ! |
| H A D | vnic_main.c | 124 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_tx_counters() argument 127 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_tx_counters() 153 u8 q_idx, struct sk_buff *skb, int err) in hfi1_vnic_update_rx_counters() argument 156 struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; in hfi1_vnic_update_rx_counters() 205 u8 q_idx) in hfi1_vnic_maybe_stop_tx() argument 207 netif_stop_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 208 if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx)) in hfi1_vnic_maybe_stop_tx() 211 netif_start_subqueue(vinfo->netdev, q_idx); in hfi1_vnic_maybe_stop_tx() 218 u8 pad_len, q_idx = skb->queue_mapping; in hfi1_netdev_start_xmit() local 225 v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); in hfi1_netdev_start_xmit() [all …]
|
| H A D | vnic_sdma.c | 126 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, in hfi1_vnic_send_dma() argument 130 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_send_dma() 224 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) in hfi1_vnic_sdma_wakeup() 225 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); in hfi1_vnic_sdma_wakeup() 229 u8 q_idx) in hfi1_vnic_sdma_write_avail() argument 231 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; in hfi1_vnic_sdma_write_avail() 249 vnic_sdma->q_idx = i; in hfi1_vnic_sdma_init()
|
| H A D | vnic.h | 49 u8 q_idx; member 113 u8 q_idx); 122 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
|
| H A D | ipoib_tx.c | 57 netif_stop_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_stop_txq() 64 netif_wake_subqueue(txq->priv->netdev, txq->q_idx); in hfi1_ipoib_wake_txq() 125 le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx() 723 txq->q_idx = i; in hfi1_ipoib_txreq_init() 789 txq->q_idx, in hfi1_ipoib_drain_tx_list() 848 __netif_subqueue_stopped(dev, txq->q_idx), in hfi1_ipoib_tx_timeout()
|
| /linux/drivers/accel/habanalabs/common/ ! |
| H A D | hw_queue.c | 420 u32 q_idx; in init_signal_cs() local 423 q_idx = job->hw_queue_id; in init_signal_cs() 424 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_signal_cs() 432 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx, in init_signal_cs() 441 rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1, in init_signal_cs() 480 u32 q_idx; in init_wait_cs() local 482 q_idx = job->hw_queue_id; in init_wait_cs() 483 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in init_wait_cs() 497 cs->encaps_sig_hdl->q_idx, in init_wait_cs() 533 prop->base_mon_id, q_idx, cs->sequence); in init_wait_cs() [all …]
|
| H A D | command_submission.c | 156 hw_sob->q_idx, hw_sob->sob_id); in hl_sob_reset_error() 1798 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, in hl_cs_signal_sob_wraparound_handler() argument 1806 prop = &hdev->kernel_queues[q_idx].sync_stream_prop; in hl_cs_signal_sob_wraparound_handler() 1829 q_idx); in hl_cs_signal_sob_wraparound_handler() 1870 prop->curr_sob_offset, q_idx); in hl_cs_signal_sob_wraparound_handler() 1938 enum hl_queue_type q_type, u32 q_idx, u32 encaps_signal_offset) in cs_ioctl_signal_wait_create_jobs() argument 1973 job->hw_queue_id = q_idx; in cs_ioctl_signal_wait_create_jobs() 2002 u32 q_idx, u32 count, in cs_ioctl_reserve_signals() argument 2022 if (q_idx >= hdev->asic_prop.max_queues) { in cs_ioctl_reserve_signals() 2024 q_idx); in cs_ioctl_reserve_signals() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/virt/ ! |
| H A D | queues.c | 186 void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) in ice_vf_ena_txq_interrupt() argument 189 u32 pfq = vsi->txq_map[q_idx]; in ice_vf_ena_txq_interrupt() 209 void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) in ice_vf_ena_rxq_interrupt() argument 212 u32 pfq = vsi->rxq_map[q_idx]; in ice_vf_ena_rxq_interrupt() 756 int i = -1, q_idx; in ice_vc_cfg_qs_msg() local 801 q_idx = qpi->rxq.queue_id; in ice_vc_cfg_qs_msg() 806 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { in ice_vc_cfg_qs_msg() 812 vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr; in ice_vc_cfg_qs_msg() 813 vsi->tx_rings[q_idx]->count = qpi->txq.ring_len; in ice_vc_cfg_qs_msg() 816 if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) in ice_vc_cfg_qs_msg() [all …]
|
| H A D | virtchnl.h | 95 void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx); 96 void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx); 106 static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { } in ice_vf_ena_rxq_interrupt() argument 107 static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { } in ice_vf_ena_txq_interrupt() argument
|
| /linux/drivers/net/ethernet/microsoft/mana/ ! |
| H A D | mana_bpf.c | 36 u16 q_idx) in mana_xdp_xmit_fm() argument 44 skb_set_queue_mapping(skb, q_idx); in mana_xdp_xmit_fm() 57 u16 q_idx; in mana_xdp_xmit() local 62 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in mana_xdp_xmit() 65 if (mana_xdp_xmit_fm(ndev, frames[i], q_idx)) in mana_xdp_xmit() 71 tx_stats = &apc->tx_qp[q_idx].txq.stats; in mana_xdp_xmit()
|
| /linux/drivers/net/hyperv/ ! |
| H A D | netvsc_bpf.c | 230 struct xdp_frame *frame, u16 q_idx) in netvsc_ndoxdp_xmit_fm() argument 240 skb_record_rx_queue(skb, q_idx); in netvsc_ndoxdp_xmit_fm() 256 u16 q_idx; in netvsc_ndoxdp_xmit() local 276 q_idx = smp_processor_id() % ndev->real_num_tx_queues; in netvsc_ndoxdp_xmit() 279 if (netvsc_ndoxdp_xmit_fm(ndev, frames[i], q_idx)) in netvsc_ndoxdp_xmit() 285 tx_stats = &nvsc_dev->chan_table[q_idx].tx_stats; in netvsc_ndoxdp_xmit()
|
| H A D | netvsc.c | 321 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) in netvsc_alloc_recv_comp_ring() argument 323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; in netvsc_alloc_recv_comp_ring() 765 u16 q_idx = 0; in netvsc_send_tx_complete() local 786 q_idx = packet->q_idx; in netvsc_send_tx_complete() 788 tx_stats = &net_device->chan_table[q_idx].tx_stats; in netvsc_send_tx_complete() 800 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); in netvsc_send_tx_complete() 806 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); in netvsc_send_tx_complete() 1103 &net_device->chan_table[packet->q_idx]; in netvsc_send_pkt() 1107 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); in netvsc_send_pkt() 1248 nvchan = &net_device->chan_table[packet->q_idx]; in netvsc_send() [all …]
|
| H A D | netvsc_drv.c | 255 int q_idx; in netvsc_get_tx_queue() local 257 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) & in netvsc_get_tx_queue() 261 if (q_idx != old_idx && in netvsc_get_tx_queue() 263 sk_tx_queue_set(sk, q_idx); in netvsc_get_tx_queue() 265 return q_idx; in netvsc_get_tx_queue() 281 int q_idx = sk_tx_queue_get(skb->sk); in netvsc_pick_tx() local 283 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { in netvsc_pick_tx() 288 q_idx = skb_get_rx_queue(skb); in netvsc_pick_tx() 290 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); in netvsc_pick_tx() 293 return q_idx; in netvsc_pick_tx() [all …]
|
| /linux/drivers/net/ethernet/cavium/thunder/ ! |
| H A D | nicvf_queues.h | 354 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); 355 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); 356 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); 357 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
|
| H A D | nicvf_queues.c | 1722 static u64 nicvf_int_type_to_mask(int int_type, int q_idx) in nicvf_int_type_to_mask() argument 1728 reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); in nicvf_int_type_to_mask() 1731 reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); in nicvf_int_type_to_mask() 1734 reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); in nicvf_int_type_to_mask() 1756 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_enable_intr() argument 1758 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_enable_intr() 1770 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_disable_intr() argument 1772 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_disable_intr() 1784 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) in nicvf_clear_intr() argument 1786 u64 mask = nicvf_int_type_to_mask(int_type, q_idx); in nicvf_clear_intr() [all …]
|
| /linux/drivers/net/ethernet/ti/icssg/ ! |
| H A D | icssg_common.c | 97 unsigned int q_idx) in emac_xsk_xmit_zc() argument 99 struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx]; in emac_xsk_xmit_zc() 141 (emac->port_id | (q_idx << 8))); in emac_xsk_xmit_zc() 689 unsigned int q_idx, in emac_xmit_xdp_frame() argument 702 if (q_idx >= PRUETH_MAX_TX_QUEUES) { in emac_xmit_xdp_frame() 703 netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx); in emac_xmit_xdp_frame() 707 tx_chn = &emac->tx_chns[q_idx]; in emac_xmit_xdp_frame() 742 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); in emac_xmit_xdp_frame() 788 int q_idx, err; in emac_run_xdp() local 803 q_idx = cpu % emac->tx_ch_num; in emac_run_xdp() [all …]
|
| /linux/drivers/net/ethernet/meta/fbnic/ ! |
| H A D | fbnic_txrx.c | 118 return netdev_get_tx_queue(dev, ring->q_idx); in txring_txq() 1221 skb_record_rx_queue(skb, rcq->q_idx); in fbnic_populate_skb_fields() 1472 WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); in fbnic_remove_tx_ring() 1473 fbn->tx[txr->q_idx] = NULL; in fbnic_remove_tx_ring() 1485 WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr); in fbnic_remove_xdp_ring() 1486 fbn->tx[xdpr->q_idx] = NULL; in fbnic_remove_xdp_ring() 1498 WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); in fbnic_remove_rx_ring() 1499 fbn->rx[rxr->q_idx] = NULL; in fbnic_remove_rx_ring() 1606 int q_idx, u8 flags) in fbnic_ring_init() argument 1610 ring->q_idx = q_idx; in fbnic_ring_init() [all …]
|
| /linux/drivers/net/ethernet/intel/fm10k/ ! |
| H A D | fm10k_pf.c | 503 u16 vsi, queue, pc, q_idx; in fm10k_configure_dglort_map_pf() local 520 q_idx = dglort->queue_b; in fm10k_configure_dglort_map_pf() 524 for (queue = 0; queue < queue_count; queue++, q_idx++) { in fm10k_configure_dglort_map_pf() 525 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 528 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 529 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort); in fm10k_configure_dglort_map_pf() 539 q_idx = pc + dglort->queue_b; in fm10k_configure_dglort_map_pf() 541 if (q_idx >= FM10K_MAX_QUEUES) in fm10k_configure_dglort_map_pf() 544 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx)); in fm10k_configure_dglort_map_pf() 547 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl); in fm10k_configure_dglort_map_pf() [all …]
|
| /linux/drivers/net/ethernet/intel/iavf/ ! |
| H A D | iavf_main.c | 1175 int q_idx; in iavf_napi_enable_all() local 1179 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_enable_all() 1182 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_enable_all() 1194 int q_idx; in iavf_napi_disable_all() local 1198 for (q_idx = 0; q_idx < q_vectors; q_idx++) { in iavf_napi_disable_all() 1199 q_vector = &adapter->q_vectors[q_idx]; in iavf_napi_disable_all() 1786 int q_idx = 0, num_q_vectors, irq_num; in iavf_alloc_q_vectors() local 1794 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { in iavf_alloc_q_vectors() 1795 irq_num = adapter->msix_entries[q_idx + NONQ_VECS].vector; in iavf_alloc_q_vectors() 1796 q_vector = &adapter->q_vectors[q_idx]; in iavf_alloc_q_vectors() [all …]
|
| /linux/net/sched/ ! |
| H A D | sch_api.c | 1828 int ret = 0, q_idx = *q_idx_p; in tc_dump_qdisc_root() local 1836 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1837 q_idx++; in tc_dump_qdisc_root() 1844 q_idx++; in tc_dump_qdisc_root() 1857 if (q_idx < s_q_idx) { in tc_dump_qdisc_root() 1858 q_idx++; in tc_dump_qdisc_root() 1866 q_idx++; in tc_dump_qdisc_root() 1870 *q_idx_p = q_idx; in tc_dump_qdisc_root() 1880 int idx, q_idx; in tc_dump_qdisc() local 1888 s_q_idx = q_idx = cb->args[1]; in tc_dump_qdisc() [all …]
|
| /linux/net/core/ ! |
| H A D | netdev-genl.c | 391 u32 q_idx, u32 q_type, const struct genl_info *info) in netdev_nl_queue_fill_one() argument 402 if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || in netdev_nl_queue_fill_one() 409 rxq = __netif_get_rx_queue(netdev, q_idx); in netdev_nl_queue_fill_one() 425 txq = netdev_get_tx_queue(netdev, q_idx); in netdev_nl_queue_fill_one() 461 netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, in netdev_nl_queue_fill() argument 469 err = netdev_nl_queue_validate(netdev, q_idx, q_type); in netdev_nl_queue_fill() 473 return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); in netdev_nl_queue_fill()
|
| /linux/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/ ! |
| H A D | trx.c | 534 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx) in rtl92ee_get_available_desc() argument 542 get_desc_addr_fr_q_idx(q_idx)); in rtl92ee_get_available_desc() 889 u8 q_idx = *val; in rtl92ee_set_desc() local 900 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[q_idx]; in rtl92ee_set_desc() 903 if (q_idx == BEACON_QUEUE) { in rtl92ee_set_desc() 914 get_desc_addr_fr_q_idx(q_idx), in rtl92ee_set_desc()
|