Lines Matching full:rx
27 struct gve_rx_ring *rx, in gve_rx_unfill_pages() argument
30 u32 slots = rx->mask + 1; in gve_rx_unfill_pages()
33 if (!rx->data.page_info) in gve_rx_unfill_pages()
36 if (rx->data.raw_addressing) { in gve_rx_unfill_pages()
38 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], in gve_rx_unfill_pages()
39 &rx->data.data_ring[i]); in gve_rx_unfill_pages()
42 page_ref_sub(rx->data.page_info[i].page, in gve_rx_unfill_pages()
43 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
45 for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { in gve_rx_unfill_pages()
46 page_ref_sub(rx->qpl_copy_pool[i].page, in gve_rx_unfill_pages()
47 rx->qpl_copy_pool[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
48 put_page(rx->qpl_copy_pool[i].page); in gve_rx_unfill_pages()
51 kvfree(rx->data.page_info); in gve_rx_unfill_pages()
52 rx->data.page_info = NULL; in gve_rx_unfill_pages()
64 static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx) in gve_rx_init_ring_state_gqi() argument
66 rx->desc.seqno = 1; in gve_rx_init_ring_state_gqi()
67 rx->cnt = 0; in gve_rx_init_ring_state_gqi()
68 gve_rx_ctx_clear(&rx->ctx); in gve_rx_init_ring_state_gqi()
73 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_reset_ring_gqi() local
78 if (rx->desc.desc_ring) { in gve_rx_reset_ring_gqi()
79 size = slots * sizeof(rx->desc.desc_ring[0]); in gve_rx_reset_ring_gqi()
80 memset(rx->desc.desc_ring, 0, size); in gve_rx_reset_ring_gqi()
84 if (rx->q_resources) in gve_rx_reset_ring_gqi()
85 memset(rx->q_resources, 0, sizeof(*rx->q_resources)); in gve_rx_reset_ring_gqi()
87 gve_rx_init_ring_state_gqi(rx); in gve_rx_reset_ring_gqi()
102 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_free_ring_gqi() argument
106 u32 slots = rx->mask + 1; in gve_rx_free_ring_gqi()
107 int idx = rx->q_num; in gve_rx_free_ring_gqi()
111 if (rx->desc.desc_ring) { in gve_rx_free_ring_gqi()
113 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring_gqi()
114 rx->desc.desc_ring = NULL; in gve_rx_free_ring_gqi()
117 if (rx->q_resources) { in gve_rx_free_ring_gqi()
118 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring_gqi()
119 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring_gqi()
120 rx->q_resources = NULL; in gve_rx_free_ring_gqi()
123 gve_rx_unfill_pages(priv, rx, cfg); in gve_rx_free_ring_gqi()
125 if (rx->data.data_ring) { in gve_rx_free_ring_gqi()
126 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring_gqi()
127 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring_gqi()
128 rx->data.data_bus); in gve_rx_free_ring_gqi()
129 rx->data.data_ring = NULL; in gve_rx_free_ring_gqi()
132 kvfree(rx->qpl_copy_pool); in gve_rx_free_ring_gqi()
133 rx->qpl_copy_pool = NULL; in gve_rx_free_ring_gqi()
135 if (rx->data.qpl) { in gve_rx_free_ring_gqi()
137 gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); in gve_rx_free_ring_gqi()
138 rx->data.qpl = NULL; in gve_rx_free_ring_gqi()
141 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring_gqi()
144 static void gve_setup_rx_buffer(struct gve_rx_ring *rx, in gve_setup_rx_buffer() argument
152 page_info->buf_size = rx->packet_buffer_size; in gve_setup_rx_buffer()
162 struct gve_rx_ring *rx) in gve_rx_alloc_buffer() argument
171 u64_stats_update_begin(&rx->statss); in gve_rx_alloc_buffer()
172 rx->rx_buf_alloc_fail++; in gve_rx_alloc_buffer()
173 u64_stats_update_end(&rx->statss); in gve_rx_alloc_buffer()
177 gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr); in gve_rx_alloc_buffer()
181 static int gve_rx_prefill_pages(struct gve_rx_ring *rx, in gve_rx_prefill_pages() argument
184 struct gve_priv *priv = rx->gve; in gve_rx_prefill_pages()
190 /* Allocate one page per Rx queue slot. Each page is split into two in gve_rx_prefill_pages()
193 slots = rx->mask + 1; in gve_rx_prefill_pages()
195 rx->data.page_info = kvzalloc(slots * in gve_rx_prefill_pages()
196 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_rx_prefill_pages()
197 if (!rx->data.page_info) in gve_rx_prefill_pages()
201 if (!rx->data.raw_addressing) { in gve_rx_prefill_pages()
202 struct page *page = rx->data.qpl->pages[i]; in gve_rx_prefill_pages()
205 gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr, in gve_rx_prefill_pages()
207 &rx->data.data_ring[i].qpl_offset); in gve_rx_prefill_pages()
211 &rx->data.page_info[i], in gve_rx_prefill_pages()
212 &rx->data.data_ring[i], rx); in gve_rx_prefill_pages()
217 if (!rx->data.raw_addressing) { in gve_rx_prefill_pages()
218 for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) { in gve_rx_prefill_pages()
226 rx->qpl_copy_pool[j].page = page; in gve_rx_prefill_pages()
227 rx->qpl_copy_pool[j].page_offset = 0; in gve_rx_prefill_pages()
228 rx->qpl_copy_pool[j].page_address = page_address(page); in gve_rx_prefill_pages()
229 rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size; in gve_rx_prefill_pages()
233 rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; in gve_rx_prefill_pages()
242 page_ref_sub(rx->qpl_copy_pool[j].page, in gve_rx_prefill_pages()
243 rx->qpl_copy_pool[j].pagecnt_bias - 1); in gve_rx_prefill_pages()
244 put_page(rx->qpl_copy_pool[j].page); in gve_rx_prefill_pages()
251 page_ref_sub(rx->data.page_info[i].page, in gve_rx_prefill_pages()
252 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_prefill_pages()
259 &rx->data.page_info[i], in gve_rx_prefill_pages()
260 &rx->data.data_ring[i]); in gve_rx_prefill_pages()
274 struct gve_rx_ring *rx, in gve_rx_alloc_ring_gqi() argument
285 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); in gve_rx_alloc_ring_gqi()
287 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring_gqi()
289 rx->gve = priv; in gve_rx_alloc_ring_gqi()
290 rx->q_num = idx; in gve_rx_alloc_ring_gqi()
291 rx->packet_buffer_size = cfg->packet_buffer_size; in gve_rx_alloc_ring_gqi()
293 rx->mask = slots - 1; in gve_rx_alloc_ring_gqi()
294 rx->data.raw_addressing = cfg->raw_addressing; in gve_rx_alloc_ring_gqi()
296 /* alloc rx data ring */ in gve_rx_alloc_ring_gqi()
297 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring_gqi()
298 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring_gqi()
299 &rx->data.data_bus, in gve_rx_alloc_ring_gqi()
301 if (!rx->data.data_ring) in gve_rx_alloc_ring_gqi()
304 rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1; in gve_rx_alloc_ring_gqi()
305 rx->qpl_copy_pool_head = 0; in gve_rx_alloc_ring_gqi()
306 rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1, in gve_rx_alloc_ring_gqi()
307 sizeof(rx->qpl_copy_pool[0]), in gve_rx_alloc_ring_gqi()
310 if (!rx->qpl_copy_pool) { in gve_rx_alloc_ring_gqi()
315 if (!rx->data.raw_addressing) { in gve_rx_alloc_ring_gqi()
316 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); in gve_rx_alloc_ring_gqi()
319 rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_rx_alloc_ring_gqi()
321 if (!rx->data.qpl) { in gve_rx_alloc_ring_gqi()
327 filled_pages = gve_rx_prefill_pages(rx, cfg); in gve_rx_alloc_ring_gqi()
332 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring_gqi()
337 rx->q_resources = in gve_rx_alloc_ring_gqi()
339 sizeof(*rx->q_resources), in gve_rx_alloc_ring_gqi()
340 &rx->q_resources_bus, in gve_rx_alloc_ring_gqi()
342 if (!rx->q_resources) { in gve_rx_alloc_ring_gqi()
346 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, in gve_rx_alloc_ring_gqi()
347 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring_gqi()
349 /* alloc rx desc ring */ in gve_rx_alloc_ring_gqi()
351 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring_gqi()
353 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring_gqi()
357 rx->db_threshold = slots / 2; in gve_rx_alloc_ring_gqi()
358 gve_rx_init_ring_state_gqi(rx); in gve_rx_alloc_ring_gqi()
360 gve_rx_ctx_clear(&rx->ctx); in gve_rx_alloc_ring_gqi()
365 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring_gqi()
366 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring_gqi()
367 rx->q_resources = NULL; in gve_rx_alloc_ring_gqi()
369 gve_rx_unfill_pages(priv, rx, cfg); in gve_rx_alloc_ring_gqi()
371 if (!rx->data.raw_addressing) { in gve_rx_alloc_ring_gqi()
372 gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); in gve_rx_alloc_ring_gqi()
373 rx->data.qpl = NULL; in gve_rx_alloc_ring_gqi()
376 kvfree(rx->qpl_copy_pool); in gve_rx_alloc_ring_gqi()
377 rx->qpl_copy_pool = NULL; in gve_rx_alloc_ring_gqi()
379 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring_gqi()
380 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring_gqi()
381 rx->data.data_ring = NULL; in gve_rx_alloc_ring_gqi()
389 struct gve_rx_ring *rx; in gve_rx_alloc_rings_gqi() local
393 rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring), in gve_rx_alloc_rings_gqi()
395 if (!rx) in gve_rx_alloc_rings_gqi()
399 err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i); in gve_rx_alloc_rings_gqi()
402 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings_gqi()
408 cfg->rx = rx; in gve_rx_alloc_rings_gqi()
413 gve_rx_free_ring_gqi(priv, &rx[j], cfg); in gve_rx_alloc_rings_gqi()
414 kvfree(rx); in gve_rx_alloc_rings_gqi()
421 struct gve_rx_ring *rx = cfg->rx; in gve_rx_free_rings_gqi() local
424 if (!rx) in gve_rx_free_rings_gqi()
428 gve_rx_free_ring_gqi(priv, &rx[i], cfg); in gve_rx_free_rings_gqi()
430 kvfree(rx); in gve_rx_free_rings_gqi()
431 cfg->rx = NULL; in gve_rx_free_rings_gqi()
434 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_write_doorbell() argument
436 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
438 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
538 static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, in gve_rx_copy_to_pool() argument
542 u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask; in gve_rx_copy_to_pool()
545 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_copy_to_pool()
550 copy_page_info = &rx->qpl_copy_pool[pool_idx]; in gve_rx_copy_to_pool()
555 gve_schedule_reset(rx->gve); in gve_rx_copy_to_pool()
569 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
585 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
586 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
587 rx->rx_frag_alloc_cnt++; in gve_rx_copy_to_pool()
588 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
610 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
611 prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page); in gve_rx_copy_to_pool()
616 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
617 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
618 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
625 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, in gve_rx_qpl() argument
629 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_qpl()
647 skb = gve_rx_copy_to_pool(rx, page_info, len, napi); in gve_rx_qpl()
652 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_skb() argument
658 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_skb()
665 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
666 rx->rx_copied_pkt++; in gve_rx_skb()
667 rx->rx_frag_copy_cnt++; in gve_rx_skb()
668 rx->rx_copybreak_pkt++; in gve_rx_skb()
669 u64_stats_update_end(&rx->statss); in gve_rx_skb()
680 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
681 rx->rx_frag_flip_cnt++; in gve_rx_skb()
682 u64_stats_update_end(&rx->statss); in gve_rx_skb()
685 if (rx->data.raw_addressing) { in gve_rx_skb()
691 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, in gve_rx_skb()
699 struct gve_rx_ring *rx, in gve_xsk_pool_redirect() argument
706 if (rx->xsk_pool->frame_len < len) in gve_xsk_pool_redirect()
708 xdp = xsk_buff_alloc(rx->xsk_pool); in gve_xsk_pool_redirect()
710 u64_stats_update_begin(&rx->statss); in gve_xsk_pool_redirect()
711 rx->xdp_alloc_fails++; in gve_xsk_pool_redirect()
712 u64_stats_update_end(&rx->statss); in gve_xsk_pool_redirect()
723 static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx, in gve_xdp_redirect() argument
732 if (rx->xsk_pool) in gve_xdp_redirect()
733 return gve_xsk_pool_redirect(dev, rx, orig->data, in gve_xdp_redirect()
738 frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); in gve_xdp_redirect()
740 u64_stats_update_begin(&rx->statss); in gve_xdp_redirect()
741 rx->xdp_alloc_fails++; in gve_xdp_redirect()
742 u64_stats_update_end(&rx->statss); in gve_xdp_redirect()
745 xdp_init_buff(&new, total_len, &rx->xdp_rxq); in gve_xdp_redirect()
756 static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_xdp_done() argument
770 tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); in gve_xdp_done()
778 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
779 rx->xdp_tx_errors++; in gve_xdp_done()
780 u64_stats_update_end(&rx->statss); in gve_xdp_done()
784 err = gve_xdp_redirect(priv->dev, rx, xdp, xprog); in gve_xdp_done()
787 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
788 rx->xdp_redirect_errors++; in gve_xdp_done()
789 u64_stats_update_end(&rx->statss); in gve_xdp_done()
793 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
795 rx->xdp_actions[xdp_act]++; in gve_xdp_done()
796 u64_stats_update_end(&rx->statss); in gve_xdp_done()
800 static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, in gve_rx() argument
807 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
809 struct gve_priv *priv = rx->gve; in gve_rx()
817 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
832 if (unlikely(frag_size > rx->packet_buffer_size)) { in gve_rx()
834 frag_size, rx->packet_buffer_size); in gve_rx()
837 gve_schedule_reset(rx->gve); in gve_rx()
842 page_info = &rx->data.page_info[(idx + 2) & rx->mask]; in gve_rx()
848 page_info = &rx->data.page_info[idx]; in gve_rx()
849 data_slot = &rx->data.data_ring[idx]; in gve_rx()
850 page_bus = (rx->data.raw_addressing) ? in gve_rx()
852 rx->data.qpl->page_buses[idx]; in gve_rx()
864 xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq); in gve_rx()
871 gve_xdp_done(priv, rx, &xdp, xprog, xdp_act); in gve_rx()
879 u64_stats_update_begin(&rx->statss); in gve_rx()
880 rx->xdp_actions[XDP_PASS]++; in gve_rx()
881 u64_stats_update_end(&rx->statss); in gve_rx()
884 skb = gve_rx_skb(priv, rx, page_info, napi, len, in gve_rx()
887 u64_stats_update_begin(&rx->statss); in gve_rx()
888 rx->rx_skb_alloc_fail++; in gve_rx()
889 u64_stats_update_end(&rx->statss); in gve_rx()
915 skb_record_rx_queue(skb, rx->q_num); in gve_rx()
937 bool gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
943 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
944 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
948 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
951 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_refill_buffers() argument
953 int refill_target = rx->mask + 1; in gve_rx_refill_buffers()
954 u32 fill_cnt = rx->fill_cnt; in gve_rx_refill_buffers()
956 while (fill_cnt - rx->cnt < refill_target) { in gve_rx_refill_buffers()
958 u32 idx = fill_cnt & rx->mask; in gve_rx_refill_buffers()
960 page_info = &rx->data.page_info[idx]; in gve_rx_refill_buffers()
966 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
981 if (!rx->data.raw_addressing) in gve_rx_refill_buffers()
988 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
993 data_slot, rx)) { in gve_rx_refill_buffers()
1000 rx->fill_cnt = fill_cnt; in gve_rx_refill_buffers()
1004 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget, in gve_clean_rx_done() argument
1007 u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; in gve_clean_rx_done()
1008 u64 xdp_txs = rx->xdp_actions[XDP_TX]; in gve_clean_rx_done()
1009 struct gve_rx_ctx *ctx = &rx->ctx; in gve_clean_rx_done()
1010 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
1013 u32 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
1016 struct gve_rx_desc *desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
1019 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
1021 next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask]; in gve_clean_rx_done()
1024 gve_rx(rx, feat, desc, idx, &cnts); in gve_clean_rx_done()
1026 rx->cnt++; in gve_clean_rx_done()
1027 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
1028 desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
1029 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_clean_rx_done()
1035 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_clean_rx_done()
1038 gve_rx_ctx_clear(&rx->ctx); in gve_clean_rx_done()
1040 GVE_SEQNO(desc->flags_seq), rx->desc.seqno); in gve_clean_rx_done()
1041 gve_schedule_reset(rx->gve); in gve_clean_rx_done()
1044 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) in gve_clean_rx_done()
1048 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
1049 rx->rpackets += cnts.ok_pkt_cnt; in gve_clean_rx_done()
1050 rx->rbytes += cnts.ok_pkt_bytes; in gve_clean_rx_done()
1051 rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt; in gve_clean_rx_done()
1052 rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt; in gve_clean_rx_done()
1053 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
1056 if (xdp_txs != rx->xdp_actions[XDP_TX]) in gve_clean_rx_done()
1057 gve_xdp_tx_flush(priv, rx->q_num); in gve_clean_rx_done()
1059 if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) in gve_clean_rx_done()
1063 if (!rx->data.raw_addressing) { in gve_clean_rx_done()
1065 rx->fill_cnt += work_done; in gve_clean_rx_done()
1066 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
1070 if (!gve_rx_refill_buffers(priv, rx)) in gve_clean_rx_done()
1076 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
1077 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
1082 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
1088 struct gve_rx_ring *rx = block->rx; in gve_rx_poll() local
1095 work_done = gve_clean_rx_done(rx, budget, feat); in gve_rx_poll()