Lines Matching full:rx

19 static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)  in gve_rx_free_hdr_bufs()  argument
22 int buf_count = rx->dqo.bufq.mask + 1; in gve_rx_free_hdr_bufs()
24 if (rx->dqo.hdr_bufs.data) { in gve_rx_free_hdr_bufs()
26 rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr); in gve_rx_free_hdr_bufs()
27 rx->dqo.hdr_bufs.data = NULL; in gve_rx_free_hdr_bufs()
31 static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx, in gve_rx_init_ring_state_dqo() argument
38 rx->dqo.bufq.mask = buffer_queue_slots - 1; in gve_rx_init_ring_state_dqo()
39 rx->dqo.bufq.head = 0; in gve_rx_init_ring_state_dqo()
40 rx->dqo.bufq.tail = 0; in gve_rx_init_ring_state_dqo()
43 rx->dqo.complq.num_free_slots = completion_queue_slots; in gve_rx_init_ring_state_dqo()
44 rx->dqo.complq.mask = completion_queue_slots - 1; in gve_rx_init_ring_state_dqo()
45 rx->dqo.complq.cur_gen_bit = 0; in gve_rx_init_ring_state_dqo()
46 rx->dqo.complq.head = 0; in gve_rx_init_ring_state_dqo()
48 /* Set RX SKB context */ in gve_rx_init_ring_state_dqo()
49 rx->ctx.skb_head = NULL; in gve_rx_init_ring_state_dqo()
50 rx->ctx.skb_tail = NULL; in gve_rx_init_ring_state_dqo()
53 if (rx->dqo.buf_states) { in gve_rx_init_ring_state_dqo()
54 for (i = 0; i < rx->dqo.num_buf_states - 1; i++) in gve_rx_init_ring_state_dqo()
55 rx->dqo.buf_states[i].next = i + 1; in gve_rx_init_ring_state_dqo()
56 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; in gve_rx_init_ring_state_dqo()
59 rx->dqo.free_buf_states = 0; in gve_rx_init_ring_state_dqo()
60 rx->dqo.recycled_buf_states.head = -1; in gve_rx_init_ring_state_dqo()
61 rx->dqo.recycled_buf_states.tail = -1; in gve_rx_init_ring_state_dqo()
62 rx->dqo.used_buf_states.head = -1; in gve_rx_init_ring_state_dqo()
63 rx->dqo.used_buf_states.tail = -1; in gve_rx_init_ring_state_dqo()
68 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_reset_ring_dqo() local
76 if (rx->dqo.bufq.desc_ring) { in gve_rx_reset_ring_dqo()
77 size = sizeof(rx->dqo.bufq.desc_ring[0]) * in gve_rx_reset_ring_dqo()
79 memset(rx->dqo.bufq.desc_ring, 0, size); in gve_rx_reset_ring_dqo()
83 if (rx->dqo.complq.desc_ring) { in gve_rx_reset_ring_dqo()
84 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_reset_ring_dqo()
86 memset(rx->dqo.complq.desc_ring, 0, size); in gve_rx_reset_ring_dqo()
90 if (rx->q_resources) in gve_rx_reset_ring_dqo()
91 memset(rx->q_resources, 0, sizeof(*rx->q_resources)); in gve_rx_reset_ring_dqo()
94 if (rx->dqo.buf_states) { in gve_rx_reset_ring_dqo()
95 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_reset_ring_dqo()
96 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_reset_ring_dqo()
98 if (rx->dqo.page_pool) in gve_rx_reset_ring_dqo()
99 gve_free_to_page_pool(rx, bs, false); in gve_rx_reset_ring_dqo()
105 gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, in gve_rx_reset_ring_dqo()
112 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_stop_ring_dqo() local
117 if (rx->dqo.page_pool) in gve_rx_stop_ring_dqo()
118 page_pool_disable_direct_recycling(rx->dqo.page_pool); in gve_rx_stop_ring_dqo()
124 void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_free_ring_dqo() argument
130 int idx = rx->q_num; in gve_rx_free_ring_dqo()
135 completion_queue_slots = rx->dqo.complq.mask + 1; in gve_rx_free_ring_dqo()
136 buffer_queue_slots = rx->dqo.bufq.mask + 1; in gve_rx_free_ring_dqo()
138 if (rx->q_resources) { in gve_rx_free_ring_dqo()
139 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_free_ring_dqo()
140 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring_dqo()
141 rx->q_resources = NULL; in gve_rx_free_ring_dqo()
144 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_free_ring_dqo()
145 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_free_ring_dqo()
147 if (rx->dqo.page_pool) in gve_rx_free_ring_dqo()
148 gve_free_to_page_pool(rx, bs, false); in gve_rx_free_ring_dqo()
153 if (rx->dqo.qpl) { in gve_rx_free_ring_dqo()
154 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); in gve_rx_free_ring_dqo()
155 gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id); in gve_rx_free_ring_dqo()
156 rx->dqo.qpl = NULL; in gve_rx_free_ring_dqo()
159 if (rx->dqo.bufq.desc_ring) { in gve_rx_free_ring_dqo()
160 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_free_ring_dqo()
161 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, in gve_rx_free_ring_dqo()
162 rx->dqo.bufq.bus); in gve_rx_free_ring_dqo()
163 rx->dqo.bufq.desc_ring = NULL; in gve_rx_free_ring_dqo()
166 if (rx->dqo.complq.desc_ring) { in gve_rx_free_ring_dqo()
167 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_free_ring_dqo()
169 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, in gve_rx_free_ring_dqo()
170 rx->dqo.complq.bus); in gve_rx_free_ring_dqo()
171 rx->dqo.complq.desc_ring = NULL; in gve_rx_free_ring_dqo()
174 kvfree(rx->dqo.buf_states); in gve_rx_free_ring_dqo()
175 rx->dqo.buf_states = NULL; in gve_rx_free_ring_dqo()
177 if (rx->dqo.page_pool) { in gve_rx_free_ring_dqo()
178 page_pool_destroy(rx->dqo.page_pool); in gve_rx_free_ring_dqo()
179 rx->dqo.page_pool = NULL; in gve_rx_free_ring_dqo()
182 gve_rx_free_hdr_bufs(priv, rx); in gve_rx_free_ring_dqo()
184 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring_dqo()
187 static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_rx_alloc_hdr_bufs() argument
192 rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count, in gve_rx_alloc_hdr_bufs()
193 &rx->dqo.hdr_bufs.addr, GFP_KERNEL); in gve_rx_alloc_hdr_bufs()
194 if (!rx->dqo.hdr_bufs.data) in gve_rx_alloc_hdr_bufs()
210 struct gve_rx_ring *rx, in gve_rx_alloc_ring_dqo() argument
222 netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); in gve_rx_alloc_ring_dqo()
224 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring_dqo()
225 rx->gve = priv; in gve_rx_alloc_ring_dqo()
226 rx->q_num = idx; in gve_rx_alloc_ring_dqo()
227 rx->packet_buffer_size = cfg->packet_buffer_size; in gve_rx_alloc_ring_dqo()
230 rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO; in gve_rx_alloc_ring_dqo()
231 rx->rx_headroom = XDP_PACKET_HEADROOM; in gve_rx_alloc_ring_dqo()
233 rx->packet_buffer_truesize = rx->packet_buffer_size; in gve_rx_alloc_ring_dqo()
234 rx->rx_headroom = 0; in gve_rx_alloc_ring_dqo()
237 rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : in gve_rx_alloc_ring_dqo()
239 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, in gve_rx_alloc_ring_dqo()
240 sizeof(rx->dqo.buf_states[0]), in gve_rx_alloc_ring_dqo()
242 if (!rx->dqo.buf_states) in gve_rx_alloc_ring_dqo()
247 if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots)) in gve_rx_alloc_ring_dqo()
250 /* Allocate RX completion queue */ in gve_rx_alloc_ring_dqo()
251 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_alloc_ring_dqo()
253 rx->dqo.complq.desc_ring = in gve_rx_alloc_ring_dqo()
254 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
255 if (!rx->dqo.complq.desc_ring) in gve_rx_alloc_ring_dqo()
258 /* Allocate RX buffer queue */ in gve_rx_alloc_ring_dqo()
259 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_alloc_ring_dqo()
260 rx->dqo.bufq.desc_ring = in gve_rx_alloc_ring_dqo()
261 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
262 if (!rx->dqo.bufq.desc_ring) in gve_rx_alloc_ring_dqo()
266 pool = gve_rx_create_page_pool(priv, rx, cfg->xdp); in gve_rx_alloc_ring_dqo()
270 rx->dqo.page_pool = pool; in gve_rx_alloc_ring_dqo()
272 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); in gve_rx_alloc_ring_dqo()
275 rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_rx_alloc_ring_dqo()
277 if (!rx->dqo.qpl) in gve_rx_alloc_ring_dqo()
279 rx->dqo.next_qpl_page_idx = 0; in gve_rx_alloc_ring_dqo()
282 rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring_dqo()
283 &rx->q_resources_bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
284 if (!rx->q_resources) in gve_rx_alloc_ring_dqo()
287 gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, in gve_rx_alloc_ring_dqo()
293 gve_rx_free_ring_dqo(priv, rx, cfg); in gve_rx_alloc_ring_dqo()
299 const struct gve_rx_ring *rx = &priv->rx[queue_idx]; in gve_rx_write_doorbell_dqo() local
300 u64 index = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell_dqo()
302 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); in gve_rx_write_doorbell_dqo()
308 struct gve_rx_ring *rx; in gve_rx_alloc_rings_dqo() local
312 rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring), in gve_rx_alloc_rings_dqo()
314 if (!rx) in gve_rx_alloc_rings_dqo()
318 err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i); in gve_rx_alloc_rings_dqo()
321 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings_dqo()
327 cfg->rx = rx; in gve_rx_alloc_rings_dqo()
332 gve_rx_free_ring_dqo(priv, &rx[i], cfg); in gve_rx_alloc_rings_dqo()
333 kvfree(rx); in gve_rx_alloc_rings_dqo()
340 struct gve_rx_ring *rx = cfg->rx; in gve_rx_free_rings_dqo() local
343 if (!rx) in gve_rx_free_rings_dqo()
347 gve_rx_free_ring_dqo(priv, &rx[i], cfg); in gve_rx_free_rings_dqo()
349 kvfree(rx); in gve_rx_free_rings_dqo()
350 cfg->rx = NULL; in gve_rx_free_rings_dqo()
353 void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) in gve_rx_post_buffers_dqo() argument
355 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_post_buffers_dqo()
356 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_post_buffers_dqo()
357 struct gve_priv *priv = rx->gve; in gve_rx_post_buffers_dqo()
369 if (unlikely(gve_alloc_buffer(rx, desc))) { in gve_rx_post_buffers_dqo()
370 u64_stats_update_begin(&rx->statss); in gve_rx_post_buffers_dqo()
371 rx->rx_buf_alloc_fail++; in gve_rx_post_buffers_dqo()
372 u64_stats_update_end(&rx->statss); in gve_rx_post_buffers_dqo()
376 if (rx->dqo.hdr_bufs.data) in gve_rx_post_buffers_dqo()
378 cpu_to_le64(rx->dqo.hdr_bufs.addr + in gve_rx_post_buffers_dqo()
386 gve_rx_write_doorbell_dqo(priv, rx->q_num); in gve_rx_post_buffers_dqo()
389 rx->fill_cnt += num_posted; in gve_rx_post_buffers_dqo()
440 static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) in gve_rx_free_skb() argument
442 if (!rx->ctx.skb_head) in gve_rx_free_skb()
445 if (rx->ctx.skb_head == napi->skb) in gve_rx_free_skb()
447 dev_kfree_skb_any(rx->ctx.skb_head); in gve_rx_free_skb()
448 rx->ctx.skb_head = NULL; in gve_rx_free_skb()
449 rx->ctx.skb_tail = NULL; in gve_rx_free_skb()
452 static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx) in gve_rx_should_trigger_copy_ondemand() argument
454 if (!rx->dqo.qpl) in gve_rx_should_trigger_copy_ondemand()
456 if (rx->dqo.used_buf_states_cnt < in gve_rx_should_trigger_copy_ondemand()
457 (rx->dqo.num_buf_states - in gve_rx_should_trigger_copy_ondemand()
463 static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, in gve_rx_copy_ondemand() argument
477 num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_copy_ondemand()
478 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page, in gve_rx_copy_ondemand()
481 u64_stats_update_begin(&rx->statss); in gve_rx_copy_ondemand()
482 rx->rx_frag_alloc_cnt++; in gve_rx_copy_ondemand()
483 u64_stats_update_end(&rx->statss); in gve_rx_copy_ondemand()
485 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_copy_ondemand()
489 static void gve_skb_add_rx_frag(struct gve_rx_ring *rx, in gve_skb_add_rx_frag() argument
493 if (rx->dqo.page_pool) { in gve_skb_add_rx_frag()
494 skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags, in gve_skb_add_rx_frag()
500 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, in gve_skb_add_rx_frag()
508 /* Chains multi skbs for single rx packet.
513 u16 buf_len, struct gve_rx_ring *rx, in gve_rx_append_frags() argument
516 int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_append_frags()
525 if (rx->dqo.page_pool) in gve_rx_append_frags()
528 if (rx->ctx.skb_tail == rx->ctx.skb_head) in gve_rx_append_frags()
529 skb_shinfo(rx->ctx.skb_head)->frag_list = skb; in gve_rx_append_frags()
531 rx->ctx.skb_tail->next = skb; in gve_rx_append_frags()
532 rx->ctx.skb_tail = skb; in gve_rx_append_frags()
535 if (rx->ctx.skb_tail != rx->ctx.skb_head) { in gve_rx_append_frags()
536 rx->ctx.skb_head->len += buf_len; in gve_rx_append_frags()
537 rx->ctx.skb_head->data_len += buf_len; in gve_rx_append_frags()
538 rx->ctx.skb_head->truesize += buf_state->page_info.buf_size; in gve_rx_append_frags()
542 if (gve_rx_should_trigger_copy_ondemand(rx)) in gve_rx_append_frags()
543 return gve_rx_copy_ondemand(rx, buf_state, buf_len); in gve_rx_append_frags()
545 gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len); in gve_rx_append_frags()
546 gve_reuse_buffer(rx, buf_state); in gve_rx_append_frags()
550 static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_xdp_done_dqo() argument
555 u64_stats_update_begin(&rx->statss); in gve_xdp_done_dqo()
560 rx->xdp_actions[xdp_act]++; in gve_xdp_done_dqo()
563 rx->xdp_tx_errors++; in gve_xdp_done_dqo()
566 rx->xdp_redirect_errors++; in gve_xdp_done_dqo()
569 u64_stats_update_end(&rx->statss); in gve_xdp_done_dqo()
570 gve_free_buffer(rx, buf_state); in gve_xdp_done_dqo()
577 static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, in gve_rx_dqo() argument
586 struct gve_priv *priv = rx->gve; in gve_rx_dqo()
591 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { in gve_rx_dqo()
592 net_err_ratelimited("%s: Invalid RX buffer_id=%u\n", in gve_rx_dqo()
596 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_rx_dqo()
597 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { in gve_rx_dqo()
598 net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n", in gve_rx_dqo()
604 gve_free_buffer(rx, buf_state); in gve_rx_dqo()
614 if (rx->dqo.page_pool) { in gve_rx_dqo()
626 rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi, in gve_rx_dqo()
627 rx->dqo.hdr_bufs.data + in gve_rx_dqo()
630 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
632 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
634 if (rx->dqo.page_pool) in gve_rx_dqo()
635 skb_mark_for_recycle(rx->ctx.skb_head); in gve_rx_dqo()
639 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
640 rx->rx_hsplit_pkt++; in gve_rx_dqo()
641 rx->rx_hsplit_unsplit_pkt += unsplit; in gve_rx_dqo()
642 rx->rx_hsplit_bytes += hdr_len; in gve_rx_dqo()
643 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
653 if (rx->ctx.skb_head) { in gve_rx_dqo()
654 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, in gve_rx_dqo()
668 &rx->xdp_rxq); in gve_rx_dqo()
679 gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act, in gve_rx_dqo()
684 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
685 rx->xdp_actions[XDP_PASS]++; in gve_rx_dqo()
686 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
690 rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, in gve_rx_dqo()
692 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
694 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
696 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
697 rx->rx_copied_pkt++; in gve_rx_dqo()
698 rx->rx_copybreak_pkt++; in gve_rx_dqo()
699 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
701 gve_free_buffer(rx, buf_state); in gve_rx_dqo()
705 rx->ctx.skb_head = napi_get_frags(napi); in gve_rx_dqo()
706 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
708 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
710 if (gve_rx_should_trigger_copy_ondemand(rx)) { in gve_rx_dqo()
711 if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0) in gve_rx_dqo()
716 if (rx->dqo.page_pool) in gve_rx_dqo()
717 skb_mark_for_recycle(rx->ctx.skb_head); in gve_rx_dqo()
719 gve_skb_add_rx_frag(rx, buf_state, 0, buf_len); in gve_rx_dqo()
720 gve_reuse_buffer(rx, buf_state); in gve_rx_dqo()
724 gve_free_buffer(rx, buf_state); in gve_rx_dqo()
754 static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, in gve_rx_complete_skb() argument
759 rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; in gve_rx_complete_skb()
762 skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); in gve_rx_complete_skb()
765 gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
768 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
774 err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
779 if (skb_headlen(rx->ctx.skb_head) == 0) in gve_rx_complete_skb()
782 napi_gro_receive(napi, rx->ctx.skb_head); in gve_rx_complete_skb()
792 struct gve_rx_ring *rx = block->rx; in gve_rx_poll_dqo() local
793 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_poll_dqo()
815 err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num); in gve_rx_poll_dqo()
817 gve_rx_free_skb(napi, rx); in gve_rx_poll_dqo()
818 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
820 rx->rx_skb_alloc_fail++; in gve_rx_poll_dqo()
822 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
823 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
836 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_poll_dqo()
842 rx->cnt++; in gve_rx_poll_dqo()
844 if (!rx->ctx.skb_head) in gve_rx_poll_dqo()
851 pkt_bytes = rx->ctx.skb_head->len; in gve_rx_poll_dqo()
855 if (skb_headlen(rx->ctx.skb_head)) in gve_rx_poll_dqo()
859 if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { in gve_rx_poll_dqo()
860 gve_rx_free_skb(napi, rx); in gve_rx_poll_dqo()
861 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
862 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
863 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
868 rx->ctx.skb_head = NULL; in gve_rx_poll_dqo()
869 rx->ctx.skb_tail = NULL; in gve_rx_poll_dqo()
872 gve_rx_post_buffers_dqo(rx); in gve_rx_poll_dqo()
874 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
875 rx->rpackets += work_done; in gve_rx_poll_dqo()
876 rx->rbytes += bytes; in gve_rx_poll_dqo()
877 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()