/linux/drivers/net/wwan/t7xx/ |
H A D | t7xx_hif_dpmaif_rx.c | 84 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; in t7xx_dpmaif_update_bat_wr_idx() local 85 struct dpmaif_bat_request *bat_req = rxq->bat_req; in t7xx_dpmaif_update_bat_wr_idx() 88 if (!rxq->que_started) { in t7xx_dpmaif_update_bat_wr_idx() 89 dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index); in t7xx_dpmaif_update_bat_wr_idx() 236 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq, in t7xx_dpmaifq_release_pit_entry() argument 239 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info; in t7xx_dpmaifq_release_pit_entry() 243 if (!rxq->que_started) in t7xx_dpmaifq_release_pit_entry() 246 if (rel_entry_num >= rxq->pit_size_cnt) { in t7xx_dpmaifq_release_pit_entry() 247 dev_err(rxq in t7xx_dpmaifq_release_pit_entry() 276 t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid) t7xx_frag_bat_cur_bid_check() argument 393 t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct sk_buff * skb) t7xx_dpmaif_set_frag_to_skb() argument 425 t7xx_dpmaif_get_frag(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,const struct dpmaif_cur_rx_skb_info * skb_info) t7xx_dpmaif_get_frag() argument 446 t7xx_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid) t7xx_bat_cur_bid_check() argument 462 t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pit) t7xx_dpmaif_check_pit_seq() argument 500 t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num,const enum bat_type buf_type) t7xx_dpmaif_release_bat_entry() argument 556 t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue * rxq) t7xx_dpmaif_pit_release_and_add() argument 571 t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue * rxq) t7xx_dpmaif_bat_release_and_add() argument 593 t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue * rxq) t7xx_dpmaif_frag_bat_release_and_add() argument 611 t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * msg_pit,struct dpmaif_cur_rx_skb_info * skb_info) t7xx_dpmaif_parse_msg_pit() argument 623 t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info) t7xx_dpmaif_set_data_to_skb() argument 659 t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info) t7xx_dpmaif_get_rx_pkt() argument 680 t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue * rxq) t7xx_dpmaifq_rx_notify_hw() argument 694 t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue * rxq,struct dpmaif_cur_rx_skb_info * skb_info) t7xx_dpmaif_rx_skb() argument 718 t7xx_dpmaif_rx_start(struct dpmaif_rx_queue * rxq,const unsigned int pit_cnt,const unsigned int budget,int * once_more) t7xx_dpmaif_rx_start() argument 799 t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue * rxq) t7xx_dpmaifq_poll_pit() argument 817 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num]; t7xx_dpmaif_napi_rx_data_collect() local 834 struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi); t7xx_dpmaif_napi_rx_poll() local 892 struct dpmaif_rx_queue *rxq; t7xx_dpmaif_irq_rx_done() local 1007 t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue * rxq) t7xx_dpmaif_rx_alloc() argument 1031 t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue * rxq) t7xx_dpmaif_rx_buf_free() argument 1064 struct dpmaif_rx_queue *rxq; t7xx_dpmaif_bat_release_work() local 1117 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i]; t7xx_dpmaif_rx_stop() local 1131 t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue * rxq) t7xx_dpmaif_stop_rxq() argument [all...] |
H A D | t7xx_hif_dpmaif.c | 227 rx_q = &dpmaif_ctrl->rxq[rx_idx]; in t7xx_dpmaif_rxtx_sw_allocs() 267 rx_q = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_rxtx_sw_allocs() 294 rx_q = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_sw_release() 303 struct dpmaif_rx_queue *rxq; in t7xx_dpmaif_start() local 314 rxq = &dpmaif_ctrl->rxq[i]; in t7xx_dpmaif_start() 315 rxq->que_started = true; in t7xx_dpmaif_start() 316 rxq->index = i; in t7xx_dpmaif_start() 317 rxq->budget = rxq in t7xx_dpmaif_start() 432 struct dpmaif_rx_queue *rxq; t7xx_dpmaif_start_txrx_qs() local [all...] |
/linux/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_rx.c | 51 * @rxq: Logical Rx Queue 53 static void hinic_rxq_clean_stats(struct hinic_rxq *rxq) in hinic_rxq_clean_stats() argument 55 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in hinic_rxq_clean_stats() 68 * @rxq: Logical Rx Queue 71 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) in hinic_rxq_get_stats() argument 73 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in hinic_rxq_get_stats() 89 * @rxq: Logical Rx Queue 91 static void rxq_stats_init(struct hinic_rxq *rxq) in rxq_stats_init() argument 93 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; in rxq_stats_init() 96 hinic_rxq_clean_stats(rxq); in rxq_stats_init() 99 rx_csum(struct hinic_rxq * rxq,u32 status,struct sk_buff * skb) rx_csum() argument 127 rx_alloc_skb(struct hinic_rxq * rxq,dma_addr_t * dma_addr) rx_alloc_skb() argument 163 rx_unmap_skb(struct hinic_rxq * rxq,dma_addr_t dma_addr) rx_unmap_skb() argument 180 rx_free_skb(struct hinic_rxq * rxq,struct sk_buff * skb,dma_addr_t dma_addr) rx_free_skb() argument 193 rx_alloc_pkts(struct hinic_rxq * rxq) rx_alloc_pkts() argument 243 free_all_rx_skbs(struct hinic_rxq * rxq) free_all_rx_skbs() argument 271 rx_recv_jumbo_pkt(struct hinic_rxq * rxq,struct sk_buff * head_skb,unsigned int left_pkt_len,u16 ci) rx_recv_jumbo_pkt() argument 354 rxq_recv(struct hinic_rxq * rxq,int budget) rxq_recv() argument 459 struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); rx_poll() local 478 rx_add_napi(struct hinic_rxq * rxq) rx_add_napi() argument 487 rx_del_napi(struct hinic_rxq * rxq) rx_del_napi() argument 495 struct hinic_rxq *rxq = (struct hinic_rxq *)data; rx_irq() local 513 rx_request_irq(struct hinic_rxq * rxq) rx_request_irq() argument 563 rx_free_irq(struct hinic_rxq * rxq) rx_free_irq() argument 580 hinic_init_rxq(struct hinic_rxq * rxq,struct hinic_rq * rq,struct net_device * netdev) hinic_init_rxq() argument 623 hinic_clean_rxq(struct hinic_rxq * rxq) hinic_clean_rxq() argument [all...] |
H A D | hinic_rx.h | 44 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); 46 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, 49 void hinic_clean_rxq(struct hinic_rxq *rxq);
|
/linux/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/ |
H A D | rx.c | 56 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 59 * or reused - added immediately to the iwl->rxq->rx_free list. 67 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 77 * detached from the iwl->rxq. The driver 'processed' index is updated. 78 * + If there are no allocated buffers in iwl->rxq->rx_free, 105 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 109 * rxq 120 iwl_rxq_space(const struct iwl_rxq * rxq) iwl_rxq_space() argument 168 iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_rxq_inc_wr_ptr() argument 210 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_rxq_check_wrptr() local 222 iwl_pcie_restock_bd(struct iwl_trans * trans,struct iwl_rxq * rxq,struct iwl_rx_mem_buffer * rxb) iwl_pcie_restock_bd() argument 246 iwl_pcie_rxmq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_rxmq_restock() argument 293 iwl_pcie_rxsq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_rxsq_restock() argument 350 iwl_pcie_rxq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_rxq_restock() argument 431 iwl_pcie_rxq_alloc_rbs(struct iwl_trans * trans,gfp_t priority,struct iwl_rxq * rxq) iwl_pcie_rxq_alloc_rbs() argument 615 iwl_pcie_rx_allocator_get(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_rx_allocator_get() argument 680 iwl_pcie_free_rxq_dma(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_free_rxq_dma() argument 715 iwl_pcie_alloc_rxq_dma(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_alloc_rxq_dma() argument 758 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_alloc_rxq_dma() local 808 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_rx_alloc() local 836 iwl_pcie_rx_hw_init(struct iwl_trans * trans,struct iwl_rxq * rxq) iwl_pcie_rx_hw_init() argument 991 iwl_pcie_rx_init_rxb_lists(struct iwl_rxq * rxq) iwl_pcie_rx_init_rxb_lists() argument 1010 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); iwl_pcie_napi_poll() local 1037 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); iwl_pcie_napi_poll_msix() local 1076 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_rx_napi_sync() local 1113 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; _iwl_pcie_rx_init() local 1234 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_rx_free() local 1251 iwl_pcie_rx_move_to_allocator(struct iwl_rxq * rxq,struct iwl_rb_allocator * rba) iwl_pcie_rx_move_to_allocator() argument 1267 iwl_pcie_rx_reuse_rbd(struct iwl_trans * trans,struct iwl_rx_mem_buffer * rxb,struct iwl_rxq * rxq,bool emergency) iwl_pcie_rx_reuse_rbd() argument 1298 iwl_pcie_rx_handle_rb(struct iwl_trans * trans,struct iwl_rxq * rxq,struct iwl_rx_mem_buffer * rxb,bool emergency,int i) iwl_pcie_rx_handle_rb() argument 1447 iwl_pcie_get_rxb(struct iwl_trans * trans,struct iwl_rxq * rxq,int i,bool * join) iwl_pcie_get_rxb() argument 1504 struct iwl_rxq *rxq; iwl_pcie_rx_handle() local 1647 struct iwl_rxq *rxq; iwl_pcie_irq_rx_msix_handler() local [all...] |
/linux/drivers/net/ethernet/huawei/hinic3/ |
H A D | hinic3_rx.c | 73 static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) in hinic3_rx_fill_buffers() argument 75 u32 i, free_wqebbs = rxq->delta - 1; in hinic3_rx_fill_buffers() 81 rx_info = &rxq->rx_info[rxq->next_to_update]; in hinic3_rx_fill_buffers() 83 err = rx_alloc_mapped_page(rxq->page_pool, rx_info, in hinic3_rx_fill_buffers() 84 rxq->buf_len); in hinic3_rx_fill_buffers() 90 rq_wqe_buf_set(rxq->rq, rxq->next_to_update, dma_addr, in hinic3_rx_fill_buffers() 91 rxq->buf_len); in hinic3_rx_fill_buffers() 92 rxq in hinic3_rx_fill_buffers() 105 hinic3_add_rx_frag(struct hinic3_rxq * rxq,struct hinic3_rx_info * rx_info,struct sk_buff * skb,u32 size) hinic3_add_rx_frag() argument 132 packaging_skb(struct hinic3_rxq * rxq,struct sk_buff * skb,u32 sge_num,u32 pkt_len) packaging_skb() argument 160 hinic3_get_sge_num(struct hinic3_rxq * rxq,u32 pkt_len) hinic3_get_sge_num() argument 170 hinic3_fetch_rx_buffer(struct hinic3_rxq * rxq,u32 pkt_len) hinic3_fetch_rx_buffer() argument 215 hinic3_rx_csum(struct hinic3_rxq * rxq,u32 offload_type,u32 status,struct sk_buff * skb) hinic3_rx_csum() argument 266 recv_one_pkt(struct hinic3_rxq * rxq,struct hinic3_rq_cqe * rx_cqe,u32 pkt_len,u32 vlan_len,u32 status) recv_one_pkt() argument 302 hinic3_rx_poll(struct hinic3_rxq * rxq,int budget) hinic3_rx_poll() argument [all...] |
/linux/drivers/infiniband/hw/hfi1/ |
H A D | netdev_rx.c | 194 rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq), in hfi1_netdev_rxq_init() 197 if (!rx->rxq) { in hfi1_netdev_rxq_init() 203 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; in hfi1_netdev_rxq_init() local 205 rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd); in hfi1_netdev_rxq_init() 209 hfi1_rcd_get(rxq->rcd); in hfi1_netdev_rxq_init() 210 rxq->rx = rx; in hfi1_netdev_rxq_init() 211 rxq->rcd->napi = &rxq in hfi1_netdev_rxq_init() 230 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; hfi1_netdev_rxq_init() local 250 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; hfi1_netdev_rxq_deinit() local 268 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; enable_queues() local 286 struct hfi1_netdev_rxq *rxq = &rx->rxq[i]; disable_queues() local [all...] |
H A D | vnic_main.c | 292 static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq, in hfi1_vnic_decap_skb() argument 295 struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; in hfi1_vnic_decap_skb() 303 vinfo->stats[rxq->idx].rx_oversize++; in hfi1_vnic_decap_skb() 305 vinfo->stats[rxq->idx].rx_runt++; in hfi1_vnic_decap_skb() 336 struct hfi1_vnic_rx_queue *rxq; in hfi1_vnic_bypass_rcv() local 370 rxq = &vinfo->rxq[q_idx]; in hfi1_vnic_bypass_rcv() 389 rc = hfi1_vnic_decap_skb(rxq, skb); in hfi1_vnic_bypass_rcv() 392 hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc); in hfi1_vnic_bypass_rcv() 399 skb->protocol = eth_type_trans(skb, rxq in hfi1_vnic_bypass_rcv() 599 struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; hfi1_vnic_alloc_rn() local [all...] |
/linux/drivers/net/ethernet/qlogic/qede/ |
H A D | qede_fp.c | 27 int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy) in qede_alloc_rx_buffer() argument 38 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer() 39 rxq->filled_buffers--; in qede_alloc_rx_buffer() 50 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer() 51 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer() 52 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer() 57 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer() 63 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer() 67 rxq in qede_alloc_rx_buffer() 502 qede_has_rx_work(struct qede_rx_queue * rxq) qede_has_rx_work() argument 515 qede_rx_bd_ring_consume(struct qede_rx_queue * rxq) qede_rx_bd_ring_consume() argument 524 qede_reuse_page(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons) qede_reuse_page() argument 547 qede_recycle_rx_bd_ring(struct qede_rx_queue * rxq,u8 count) qede_recycle_rx_bd_ring() argument 558 qede_realloc_rx_buffer(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons) qede_realloc_rx_buffer() argument 588 qede_update_rx_prod(struct qede_dev * edev,struct qede_rx_queue * rxq) qede_update_rx_prod() argument 639 qede_skb_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct sk_buff * skb,u16 vlan_tag) qede_skb_receive() argument 665 qede_fill_frag_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,u8 tpa_agg_index,u16 len_on_bd) qede_fill_frag_skb() argument 740 qede_build_skb(struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad) qede_build_skb() argument 760 qede_tpa_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad,bool alloc_skb) qede_tpa_rx_build_skb() argument 791 qede_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad) qede_rx_build_skb() argument 832 qede_tpa_start(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe) qede_tpa_start() argument 958 qede_tpa_cont(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe) qede_tpa_cont() argument 976 struct qede_rx_queue *rxq = fp->rxq; qede_tpa_end() local 1081 qede_rx_xdp(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct bpf_prog * prog,struct sw_rx_data * bd,struct eth_fast_path_rx_reg_cqe * cqe,u16 * data_offset,u16 * len) qede_rx_xdp() argument 1170 qede_rx_build_jumbo(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sk_buff * skb,struct eth_fast_path_rx_reg_cqe * cqe,u16 first_bd_len) qede_rx_build_jumbo() argument 1225 qede_rx_process_tpa_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,union eth_rx_cqe * cqe,enum eth_rx_cqe_type type) qede_rx_process_tpa_cqe() argument 1245 qede_rx_process_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq) qede_rx_process_cqe() argument 1341 struct qede_rx_queue *rxq = fp->rxq; qede_rx_int() local [all...] |
H A D | qede_main.c | 964 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) in qede_free_fp_array() 965 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); in qede_free_fp_array() 966 kfree(fp->rxq); in qede_free_fp_array() 1035 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL); in qede_alloc_fp_array() 1036 if (!fp->rxq) in qede_alloc_fp_array() 1528 struct qede_rx_queue *rxq) in qede_free_rx_buffers() argument 1532 for (i = rxq->sw_rx_cons; i != rxq in qede_free_rx_buffers() 1547 qede_free_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq) qede_free_mem_rxq() argument 1560 qede_set_tpa_param(struct qede_rx_queue * rxq) qede_set_tpa_param() argument 1572 qede_alloc_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq) qede_alloc_mem_rxq() argument 2287 struct qede_rx_queue *rxq = fp->rxq; qede_start_queues() local [all...] |
/linux/net/core/ |
H A D | netdev_rx_queue.c | 14 struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); in netdev_rx_queue_restart() local 39 err = page_pool_check_memory_provider(dev, rxq); in netdev_rx_queue_restart() 94 struct netdev_rx_queue *rxq; in __net_mp_open_rxq() local 119 rxq = __netif_get_rx_queue(dev, rxq_idx); in __net_mp_open_rxq() 120 if (rxq->mp_params.mp_ops) { in __net_mp_open_rxq() 125 if (rxq->pool) { in __net_mp_open_rxq() 131 rxq->mp_params = *p; in __net_mp_open_rxq() 134 rxq->mp_params.mp_ops = NULL; in __net_mp_open_rxq() 135 rxq->mp_params.mp_priv = NULL; in __net_mp_open_rxq() 154 struct netdev_rx_queue *rxq; in __net_mp_close_rxq() local [all...] |
H A D | devmem.c | 120 struct netdev_rx_queue *rxq; in net_devmem_unbind_dmabuf() local 134 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { in net_devmem_unbind_dmabuf() 140 rxq_idx = get_netdev_rx_queue_index(rxq); in net_devmem_unbind_dmabuf() 156 struct netdev_rx_queue *rxq; in net_devmem_bind_dmabuf_to_queue() local 164 rxq = __netif_get_rx_queue(dev, rxq_idx); in net_devmem_bind_dmabuf_to_queue() 165 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, in net_devmem_bind_dmabuf_to_queue() 460 struct netdev_rx_queue *rxq) in mp_dmabuf_devmem_nl_fill() argument 463 int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF; in mp_dmabuf_devmem_nl_fill() 469 struct netdev_rx_queue *rxq) in mp_dmabuf_devmem_uninstall() argument 476 if (bound_rxq == rxq) { in mp_dmabuf_devmem_uninstall() [all...] |
/linux/drivers/net/ethernet/microsoft/mana/ |
H A D | mana_en.c | 587 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) in mana_get_rxbuf_pre() argument 589 struct net_device *ndev = rxq->ndev; in mana_get_rxbuf_pre() 601 if (mpc->rxbpre_datasize != rxq->datasize) { in mana_get_rxbuf_pre() 603 mpc->rxbpre_datasize, rxq->datasize); in mana_get_rxbuf_pre() 607 if (mpc->rxbpre_alloc_size != rxq->alloc_size) { in mana_get_rxbuf_pre() 609 mpc->rxbpre_alloc_size, rxq->alloc_size); in mana_get_rxbuf_pre() 613 if (mpc->rxbpre_headroom != rxq->headroom) { in mana_get_rxbuf_pre() 615 mpc->rxbpre_headroom, rxq->headroom); in mana_get_rxbuf_pre() 1510 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq) in mana_fence_rq() argument 1516 init_completion(&rxq in mana_fence_rq() 1552 struct mana_rxq *rxq; mana_fence_rqs() local 1713 mana_post_pkt_rxq(struct mana_rxq * rxq) mana_post_pkt_rxq() argument 1733 mana_build_skb(struct mana_rxq * rxq,void * buf_va,uint pkt_len,struct xdp_buff * xdp) mana_build_skb() argument 1758 mana_rx_skb(void * buf_va,bool from_pool,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq) mana_rx_skb() argument 1858 mana_get_rxfrag(struct mana_rxq * rxq,struct device * dev,dma_addr_t * da,bool * from_pool) mana_get_rxfrag() argument 1894 mana_refill_rx_oob(struct device * dev,struct mana_rxq * rxq,struct mana_recv_buf_oob * rxoob,void ** old_buf,bool * old_fp) mana_refill_rx_oob() argument 1916 mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe) mana_process_rx_cqe() argument 1986 struct mana_rxq *rxq = cq->rxq; mana_poll_rx_cq() local 2277 mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool napi_initialized) mana_destroy_rxq() argument 2340 mana_fill_rx_oob(struct mana_recv_buf_oob * rx_oob,u32 mem_key,struct mana_rxq * rxq,struct device * dev) mana_fill_rx_oob() argument 2369 mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,u32 * rxq_size,u32 * cq_size) mana_alloc_rx_wqe() argument 2408 mana_push_wqe(struct mana_rxq * rxq) mana_push_wqe() argument 2426 mana_create_page_pool(struct mana_rxq * rxq,struct gdma_context * gc) mana_create_page_pool() argument 2460 struct mana_rxq *rxq; mana_create_rxq() local 2582 struct mana_rxq *rxq; mana_create_rxq_debugfs() local 2606 struct mana_rxq *rxq; mana_add_rx_queues() local 2633 struct mana_rxq *rxq; mana_destroy_vport() local [all...] |
H A D | mana_bpf.c | 80 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, in mana_run_xdp() argument 88 prog = rcu_dereference(rxq->bpf_prog); in mana_run_xdp() 93 xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); in mana_run_xdp() 98 rx_stats = &rxq->stats; in mana_run_xdp() 107 rxq->xdp_rc = xdp_do_redirect(ndev, xdp, prog); in mana_run_xdp() 108 if (!rxq->xdp_rc) { in mana_run_xdp() 109 rxq->xdp_flush = true; in mana_run_xdp()
|
/linux/drivers/net/ethernet/marvell/ |
H A D | mvneta.c | 109 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) argument 134 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) argument 857 struct mvneta_rx_queue *rxq, in mvneta_rxq_non_occup_desc_add() argument 864 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add() 870 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add() 876 struct mvneta_rx_queue *rxq) in mvneta_rxq_busy_desc_num_get() argument 880 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get() 888 struct mvneta_rx_queue *rxq, in mvneta_rxq_desc_num_update() argument 922 mvneta_rxq_next_desc_get(struct mvneta_rx_queue * rxq) mvneta_rxq_next_desc_get() argument 946 mvneta_rxq_offset_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int offset) mvneta_rxq_offset_set() argument 1003 mvneta_rxq_buf_size_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int buf_size) mvneta_rxq_buf_size_set() argument 1018 mvneta_rxq_bm_disable(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_bm_disable() argument 1029 mvneta_rxq_bm_enable(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_bm_enable() argument 1040 mvneta_rxq_long_pool_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_long_pool_set() argument 1053 mvneta_rxq_short_pool_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_short_pool_set() argument 1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_port_up() local 1498 int rxq, txq; mvneta_defaults_set() local 1686 mvneta_rx_pkts_coal_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,u32 value) mvneta_rx_pkts_coal_set() argument 1696 mvneta_rx_time_coal_set(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,u32 value) mvneta_rx_time_coal_set() argument 1724 mvneta_rx_desc_fill(struct mvneta_rx_desc * rx_desc,u32 phys_addr,void * virt_addr,struct mvneta_rx_queue * rxq) mvneta_rx_desc_fill() argument 1937 mvneta_rx_refill(struct mvneta_port * pp,struct mvneta_rx_desc * rx_desc,struct mvneta_rx_queue * rxq,gfp_t gfp_mask) mvneta_rx_refill() argument 1987 mvneta_rxq_drop_pkts(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_drop_pkts() argument 2041 mvneta_rx_refill_queue(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rx_refill_queue() argument 2072 mvneta_xdp_put_buff(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,struct xdp_buff * xdp,int sync_len) mvneta_xdp_put_buff() argument 2257 mvneta_run_xdp(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,struct bpf_prog * prog,struct xdp_buff * xdp,u32 frame_sz,struct mvneta_stats * stats) mvneta_run_xdp() argument 2316 mvneta_swbm_rx_frame(struct mvneta_port * pp,struct mvneta_rx_desc * rx_desc,struct mvneta_rx_queue * rxq,struct xdp_buff * xdp,int * size,struct page * page) mvneta_swbm_rx_frame() argument 2351 mvneta_swbm_add_rx_fragment(struct mvneta_port * pp,struct mvneta_rx_desc * rx_desc,struct mvneta_rx_queue * rxq,struct xdp_buff * xdp,int * size,struct page * page) mvneta_swbm_add_rx_fragment() argument 2430 mvneta_rx_swbm(struct napi_struct * napi,struct mvneta_port * pp,int budget,struct mvneta_rx_queue * rxq) mvneta_rx_swbm() argument 2543 mvneta_rx_hwbm(struct napi_struct * napi,struct mvneta_port * pp,int rx_todo,struct mvneta_rx_queue * rxq) mvneta_rx_hwbm() argument 3351 mvneta_create_page_pool(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int size) mvneta_create_page_pool() argument 3394 mvneta_rxq_fill(struct mvneta_port * pp,struct mvneta_rx_queue * rxq,int num) mvneta_rxq_fill() argument 3444 mvneta_rxq_sw_init(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_sw_init() argument 3461 mvneta_rxq_hw_init(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_hw_init() argument 3494 mvneta_rxq_init(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_init() argument 3510 mvneta_rxq_deinit(struct mvneta_port * pp,struct mvneta_rx_queue * rxq) mvneta_rxq_deinit() argument 4386 int rxq; mvneta_percpu_elect() local 4730 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_ethtool_set_coalesce() local 5169 mvneta_map_vlan_prio_to_rxq(struct mvneta_port * pp,u8 pri,u8 rxq) mvneta_map_vlan_prio_to_rxq() argument 5246 int rxq, txq, tc, ret; mvneta_setup_mqprio() local 5393 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_init() local 5821 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_suspend() local 5873 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_resume() local [all...] |
H A D | mv643xx_eth.c | 399 struct rx_queue rxq[8]; member 440 /* rxq/txq helper functions *************************************************/ 441 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) in rxq_to_mp() argument 443 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); in rxq_to_mp() 451 static void rxq_enable(struct rx_queue *rxq) in rxq_enable() argument 453 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_enable() 454 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable() 457 static void rxq_disable(struct rx_queue *rxq) in rxq_disable() argument 506 rxq_process(struct rx_queue * rxq,int budget) rxq_process() argument 598 rxq_refill(struct rx_queue * rxq,int budget) rxq_refill() argument 1937 struct rx_queue *rxq = mp->rxq + index; rxq_init() local 2002 rxq_deinit(struct rx_queue * rxq) rxq_deinit() argument 2380 struct rx_queue *rxq = mp->rxq + i; port_start() local [all...] |
/linux/drivers/net/ethernet/atheros/alx/ |
H A D | main.c | 73 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq; in alx_refill_rx_ring() local 79 next = cur = rxq->write_idx; in alx_refill_rx_ring() 82 cur_buf = &rxq->bufs[cur]; in alx_refill_rx_ring() 84 while (!cur_buf->skb && next != rxq->read_idx) { in alx_refill_rx_ring() 85 struct alx_rfd *rfd = &rxq->rfd[cur]; in alx_refill_rx_ring() 127 cur_buf = &rxq->bufs[cur]; in alx_refill_rx_ring() 134 rxq->write_idx = cur; in alx_refill_rx_ring() 216 static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget) in alx_clean_rx_irq() argument 225 alx = netdev_priv(rxq in alx_clean_rx_irq() 506 alx_free_rxring_buf(struct alx_rx_queue * rxq) alx_free_rxring_buf() argument 630 alx_alloc_rx_ring(struct alx_priv * alx,struct alx_rx_queue * rxq,int offset) alx_alloc_rx_ring() argument 741 struct alx_rx_queue *rxq; alx_alloc_napis() local [all...] |
/linux/drivers/net/ethernet/intel/idpf/ |
H A D | idpf_txrx.c | 443 * @rxq: queue to be cleaned 445 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq) in idpf_rx_buf_rel_all() argument 448 .fqes = rxq->rx_buf, in idpf_rx_buf_rel_all() 449 .pp = rxq->pp, in idpf_rx_buf_rel_all() 452 if (!rxq->rx_buf) in idpf_rx_buf_rel_all() 455 for (u32 i = 0; i < rxq->desc_count; i++) in idpf_rx_buf_rel_all() 456 idpf_rx_page_rel(&rxq->rx_buf[i]); in idpf_rx_buf_rel_all() 459 rxq->rx_buf = NULL; in idpf_rx_buf_rel_all() 460 rxq->pp = NULL; in idpf_rx_buf_rel_all() 465 * @rxq 471 idpf_rx_desc_rel(struct idpf_rx_queue * rxq,struct device * dev,u32 model) idpf_rx_desc_rel() argument 706 idpf_rx_buf_alloc_singleq(struct idpf_rx_queue * rxq) idpf_rx_buf_alloc_singleq() argument 725 idpf_rx_bufs_init_singleq(struct idpf_rx_queue * rxq) idpf_rx_bufs_init_singleq() argument 864 idpf_rx_desc_alloc(const struct idpf_vport * vport,struct idpf_rx_queue * rxq) idpf_rx_desc_alloc() argument 2986 idpf_rx_hash(const struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded) idpf_rx_hash() argument 3011 idpf_rx_csum(struct idpf_rx_queue * rxq,struct sk_buff * skb,struct libeth_rx_csum csum_bits,struct libeth_rx_pt decoded) idpf_rx_csum() argument 3102 idpf_rx_rsc(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded) idpf_rx_rsc() argument 3169 idpf_rx_hwtstamp(const struct idpf_rx_queue * rxq,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct sk_buff * skb) idpf_rx_hwtstamp() argument 3200 idpf_rx_process_skb_fields(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc) idpf_rx_process_skb_fields() argument 3359 idpf_rx_splitq_clean(struct idpf_rx_queue * rxq,int budget) idpf_rx_splitq_clean() argument 3846 struct idpf_rx_queue *rxq = q_vector->rx[i]; idpf_net_dim() local 4133 struct idpf_rx_queue *rxq = q_vec->rx[i]; idpf_rx_splitq_clean_all() local [all...] |
/linux/drivers/net/ethernet/hisilicon/ |
H A D | hisi_femac.c | 121 struct hisi_femac_queue rxq; member 212 struct hisi_femac_queue *rxq = &priv->rxq; in hisi_femac_rx_refill() local 218 pos = rxq->head; in hisi_femac_rx_refill() 220 if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) in hisi_femac_rx_refill() 222 if (unlikely(rxq->skb[pos])) { in hisi_femac_rx_refill() 224 pos, rxq->skb[pos]); in hisi_femac_rx_refill() 237 rxq->dma_phys[pos] = addr; in hisi_femac_rx_refill() 238 rxq in hisi_femac_rx_refill() 248 struct hisi_femac_queue *rxq = &priv->rxq; hisi_femac_rx() local 388 struct hisi_femac_queue *rxq = &priv->rxq; hisi_femac_free_skb_rings() local [all...] |
/linux/tools/testing/selftests/bpf/ |
H A D | xdp_hw_metadata.c | 73 int rxq; variable 444 static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id) in verify_metadata() argument 447 struct pollfd fds[rxq + 1]; in verify_metadata() 455 for (i = 0; i < rxq; i++) { in verify_metadata() 461 fds[rxq].fd = server_fd; in verify_metadata() 462 fds[rxq].events = POLLIN; in verify_metadata() 463 fds[rxq].revents = 0; in verify_metadata() 468 for (i = 0; i < rxq; i++) { in verify_metadata() 474 ret = poll(fds, rxq + 1, 1000); in verify_metadata() 483 if (fds[rxq] in verify_metadata() [all...] |
/linux/drivers/bluetooth/ |
H A D | btintel_pcie.c | 327 snprintf(buf, sizeof(buf), "rxq: cr_tia: %u cr_hia: %u", cr_tia, cr_hia); in btintel_pcie_dump_debug_registers() 433 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index) in btintel_pcie_prepare_rx() argument 439 buf = &rxq->bufs[frbd_index]; in btintel_pcie_prepare_rx() 441 frbd = &rxq->frbds[frbd_index]; in btintel_pcie_prepare_rx() 452 struct rxq *rxq = &data->rxq; in btintel_pcie_submit_rx() local 456 if (frbd_index > rxq->count) in btintel_pcie_submit_rx() 462 btintel_pcie_prepare_rx(rxq, frbd_inde in btintel_pcie_submit_rx() 477 struct rxq *rxq = &data->rxq; btintel_pcie_start_rx() local 1441 struct rxq *rxq; btintel_pcie_msix_rx_handle() local 1753 btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq) btintel_pcie_free_rxq_bufs() argument 1762 btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq) btintel_pcie_setup_rxq_bufs() argument [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_arfs.c | 90 int rxq; member 437 arfs_rule->rxq, arfs_rule->flow_id, in arfs_may_expire_flow() 449 priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++; in arfs_may_expire_flow() 528 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++; in arfs_add_rule() 597 dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq); in arfs_add_rule() 601 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++; in arfs_add_rule() 604 __func__, arfs_rule->filter_id, arfs_rule->rxq, in arfs_add_rule() 614 struct mlx5_flow_handle *rule, u16 rxq) in arfs_modify_rule_rq() argument 620 dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq); in arfs_modify_rule_rq() 623 priv->channel_stats[rxq] in arfs_modify_rule_rq() 659 arfs_alloc_rule(struct mlx5e_priv * priv,struct arfs_table * arfs_t,const struct flow_keys * fk,u16 rxq,u32 flow_id) arfs_alloc_rule() argument [all...] |
/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_trace.h | 75 TP_PROTO(const struct funeth_rxq *rxq, 81 TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec), 90 __string(devname, rxq->netdev->name) 94 __entry->qidx = rxq->qidx; 95 __entry->cq_head = rxq->cq_head;
|
/linux/drivers/net/ethernet/freescale/ |
H A D | fec_main.c | 469 struct fec_enet_priv_rx_q *rxq, int size) in fec_enet_create_page_pool() argument 484 rxq->page_pool = page_pool_create(&pp_params); in fec_enet_create_page_pool() 485 if (IS_ERR(rxq->page_pool)) { in fec_enet_create_page_pool() 486 err = PTR_ERR(rxq->page_pool); in fec_enet_create_page_pool() 487 rxq->page_pool = NULL; in fec_enet_create_page_pool() 491 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); in fec_enet_create_page_pool() 495 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, in fec_enet_create_page_pool() 496 rxq->page_pool); in fec_enet_create_page_pool() 503 xdp_rxq_info_unreg(&rxq in fec_enet_create_page_pool() 990 struct fec_enet_priv_rx_q *rxq; fec_enet_bd_init() local 1080 struct fec_enet_priv_rx_q *rxq; fec_enet_enable_ring() local 1634 fec_enet_update_cbd(struct fec_enet_priv_rx_q * rxq,struct bufdesc * bdp,int index) fec_enet_update_cbd() argument 1654 fec_enet_run_xdp(struct fec_enet_private * fep,struct bpf_prog * prog,struct xdp_buff * xdp,struct fec_enet_priv_rx_q * rxq,int cpu) fec_enet_run_xdp() argument 1742 struct fec_enet_priv_rx_q *rxq; fec_enet_rx_queue() local 2996 struct fec_enet_priv_rx_q *rxq; fec_enet_get_xdp_stats() local 3013 struct fec_enet_priv_rx_q *rxq; fec_enet_page_pool_stats() local 3087 struct fec_enet_priv_rx_q *rxq; fec_enet_clear_ethtool_stats() local 3319 struct fec_enet_priv_rx_q *rxq; fec_enet_free_buffers() local 3436 struct fec_enet_priv_rx_q *rxq; fec_enet_alloc_rxq_buffers() local 4105 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; fec_enet_init() local [all...] |
/linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | sge.c | 1557 * @rxq: ingress RX Ethernet Queue 1564 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, in do_gro() argument 1567 struct adapter *adapter = rxq->rspq.adapter; in do_gro() 1573 skb = napi_get_frags(&rxq->rspq.napi); in do_gro() 1576 rxq->stats.rx_drops++; in do_gro() 1585 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro() 1591 rxq->stats.vlan_ex++; in do_gro() 1593 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro() 1596 rxq->stats.lro_pkts++; in do_gro() 1598 rxq in do_gro() 1618 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); t4vf_ethrx_handler() local 1756 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); process_responses() local 2094 struct sge_eth_rxq *rxq; sge_rx_timer_cb() local 2564 struct sge_eth_rxq *rxq = s->ethrxq; t4vf_free_sge_resources() local [all...] |