Lines Matching +full:tx +full:- +full:slots
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
4 * Copyright (C) 2015-2021 Google, Inc.
19 dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & in gve_rx_free_buffer()
22 page_ref_sub(page_info->page, page_info->pagecnt_bias - 1); in gve_rx_free_buffer()
23 gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); in gve_rx_free_buffer()
28 u32 slots = rx->mask + 1; in gve_rx_unfill_pages() local
31 if (rx->data.raw_addressing) { in gve_rx_unfill_pages()
32 for (i = 0; i < slots; i++) in gve_rx_unfill_pages()
33 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], in gve_rx_unfill_pages()
34 &rx->data.data_ring[i]); in gve_rx_unfill_pages()
36 for (i = 0; i < slots; i++) in gve_rx_unfill_pages()
37 page_ref_sub(rx->data.page_info[i].page, in gve_rx_unfill_pages()
38 rx->data.page_info[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
39 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_unfill_pages()
40 rx->data.qpl = NULL; in gve_rx_unfill_pages()
42 for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { in gve_rx_unfill_pages()
43 page_ref_sub(rx->qpl_copy_pool[i].page, in gve_rx_unfill_pages()
44 rx->qpl_copy_pool[i].pagecnt_bias - 1); in gve_rx_unfill_pages()
45 put_page(rx->qpl_copy_pool[i].page); in gve_rx_unfill_pages()
48 kvfree(rx->data.page_info); in gve_rx_unfill_pages()
49 rx->data.page_info = NULL; in gve_rx_unfill_pages()
54 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring()
55 struct device *dev = &priv->pdev->dev; in gve_rx_free_ring()
56 u32 slots = rx->mask + 1; in gve_rx_free_ring() local
61 bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; in gve_rx_free_ring()
62 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring()
63 rx->desc.desc_ring = NULL; in gve_rx_free_ring()
65 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring()
66 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring()
67 rx->q_resources = NULL; in gve_rx_free_ring()
71 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring()
72 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring()
73 rx->data.data_bus); in gve_rx_free_ring()
74 rx->data.data_ring = NULL; in gve_rx_free_ring()
76 kvfree(rx->qpl_copy_pool); in gve_rx_free_ring()
77 rx->qpl_copy_pool = NULL; in gve_rx_free_ring()
79 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring()
85 page_info->page = page; in gve_setup_rx_buffer()
86 page_info->page_offset = 0; in gve_setup_rx_buffer()
87 page_info->page_address = page_address(page); in gve_setup_rx_buffer()
90 page_ref_add(page, INT_MAX - 1); in gve_setup_rx_buffer()
91 page_info->pagecnt_bias = INT_MAX; in gve_setup_rx_buffer()
107 gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); in gve_rx_alloc_buffer()
113 struct gve_priv *priv = rx->gve; in gve_prefill_rx_pages()
114 u32 slots; in gve_prefill_rx_pages() local
122 slots = rx->mask + 1; in gve_prefill_rx_pages()
124 rx->data.page_info = kvzalloc(slots * in gve_prefill_rx_pages()
125 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_prefill_rx_pages()
126 if (!rx->data.page_info) in gve_prefill_rx_pages()
127 return -ENOMEM; in gve_prefill_rx_pages()
129 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
130 rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); in gve_prefill_rx_pages()
131 if (!rx->data.qpl) { in gve_prefill_rx_pages()
132 kvfree(rx->data.page_info); in gve_prefill_rx_pages()
133 rx->data.page_info = NULL; in gve_prefill_rx_pages()
134 return -ENOMEM; in gve_prefill_rx_pages()
137 for (i = 0; i < slots; i++) { in gve_prefill_rx_pages()
138 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
139 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages()
142 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, in gve_prefill_rx_pages()
143 &rx->data.data_ring[i].qpl_offset); in gve_prefill_rx_pages()
146 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], in gve_prefill_rx_pages()
147 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
152 if (!rx->data.raw_addressing) { in gve_prefill_rx_pages()
153 for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) { in gve_prefill_rx_pages()
157 err = -ENOMEM; in gve_prefill_rx_pages()
161 rx->qpl_copy_pool[j].page = page; in gve_prefill_rx_pages()
162 rx->qpl_copy_pool[j].page_offset = 0; in gve_prefill_rx_pages()
163 rx->qpl_copy_pool[j].page_address = page_address(page); in gve_prefill_rx_pages()
166 page_ref_add(page, INT_MAX - 1); in gve_prefill_rx_pages()
167 rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX; in gve_prefill_rx_pages()
171 return slots; in gve_prefill_rx_pages()
175 while (j--) { in gve_prefill_rx_pages()
176 page_ref_sub(rx->qpl_copy_pool[j].page, in gve_prefill_rx_pages()
177 rx->qpl_copy_pool[j].pagecnt_bias - 1); in gve_prefill_rx_pages()
178 put_page(rx->qpl_copy_pool[j].page); in gve_prefill_rx_pages()
181 /* Do not fully free QPL pages - only remove the bias added in this in gve_prefill_rx_pages()
184 while (i--) in gve_prefill_rx_pages()
185 page_ref_sub(rx->data.page_info[i].page, in gve_prefill_rx_pages()
186 rx->data.page_info[i].pagecnt_bias - 1); in gve_prefill_rx_pages()
188 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_prefill_rx_pages()
189 rx->data.qpl = NULL; in gve_prefill_rx_pages()
194 while (i--) in gve_prefill_rx_pages()
195 gve_rx_free_buffer(&priv->pdev->dev, in gve_prefill_rx_pages()
196 &rx->data.page_info[i], in gve_prefill_rx_pages()
197 &rx->data.data_ring[i]); in gve_prefill_rx_pages()
203 ctx->skb_head = NULL; in gve_rx_ctx_clear()
204 ctx->skb_tail = NULL; in gve_rx_ctx_clear()
205 ctx->total_size = 0; in gve_rx_ctx_clear()
206 ctx->frag_cnt = 0; in gve_rx_ctx_clear()
207 ctx->drop_pkt = false; in gve_rx_ctx_clear()
212 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring()
213 struct device *hdev = &priv->pdev->dev; in gve_rx_alloc_ring()
216 u32 slots; in gve_rx_alloc_ring() local
219 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); in gve_rx_alloc_ring()
223 rx->gve = priv; in gve_rx_alloc_ring()
224 rx->q_num = idx; in gve_rx_alloc_ring()
226 slots = priv->rx_data_slot_cnt; in gve_rx_alloc_ring()
227 rx->mask = slots - 1; in gve_rx_alloc_ring()
228 rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; in gve_rx_alloc_ring()
231 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
232 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring()
233 &rx->data.data_bus, in gve_rx_alloc_ring()
235 if (!rx->data.data_ring) in gve_rx_alloc_ring()
236 return -ENOMEM; in gve_rx_alloc_ring()
238 rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1; in gve_rx_alloc_ring()
239 rx->qpl_copy_pool_head = 0; in gve_rx_alloc_ring()
240 rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1, in gve_rx_alloc_ring()
241 sizeof(rx->qpl_copy_pool[0]), in gve_rx_alloc_ring()
244 if (!rx->qpl_copy_pool) { in gve_rx_alloc_ring()
245 err = -ENOMEM; in gve_rx_alloc_ring()
251 err = -ENOMEM; in gve_rx_alloc_ring()
254 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring()
255 /* Ensure data ring slots (packet buffers) are visible. */ in gve_rx_alloc_ring()
259 rx->q_resources = in gve_rx_alloc_ring()
261 sizeof(*rx->q_resources), in gve_rx_alloc_ring()
262 &rx->q_resources_bus, in gve_rx_alloc_ring()
264 if (!rx->q_resources) { in gve_rx_alloc_ring()
265 err = -ENOMEM; in gve_rx_alloc_ring()
268 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, in gve_rx_alloc_ring()
269 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring()
272 bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; in gve_rx_alloc_ring()
273 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring()
275 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring()
276 err = -ENOMEM; in gve_rx_alloc_ring()
279 rx->cnt = 0; in gve_rx_alloc_ring()
280 rx->db_threshold = priv->rx_desc_cnt / 2; in gve_rx_alloc_ring()
281 rx->desc.seqno = 1; in gve_rx_alloc_ring()
283 /* Allocating half-page buffers allows page-flipping which is faster in gve_rx_alloc_ring()
286 rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; in gve_rx_alloc_ring()
287 gve_rx_ctx_clear(&rx->ctx); in gve_rx_alloc_ring()
293 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring()
294 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring()
295 rx->q_resources = NULL; in gve_rx_alloc_ring()
299 kvfree(rx->qpl_copy_pool); in gve_rx_alloc_ring()
300 rx->qpl_copy_pool = NULL; in gve_rx_alloc_ring()
302 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
303 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring()
304 rx->data.data_ring = NULL; in gve_rx_alloc_ring()
314 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_rx_alloc_rings()
317 netif_err(priv, drv, priv->dev, in gve_rx_alloc_rings()
337 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_rx_free_rings_gqi()
343 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
345 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
362 u32 offset = page_info->page_offset + page_info->pad; in gve_rx_add_frags()
363 struct sk_buff *skb = ctx->skb_tail; in gve_rx_add_frags()
371 ctx->skb_head = skb; in gve_rx_add_frags()
372 ctx->skb_tail = skb; in gve_rx_add_frags()
374 num_frags = skb_shinfo(ctx->skb_tail)->nr_frags; in gve_rx_add_frags()
381 // which is why we do not need to chain by using skb->next in gve_rx_add_frags()
382 skb_shinfo(ctx->skb_tail)->frag_list = skb; in gve_rx_add_frags()
384 ctx->skb_tail = skb; in gve_rx_add_frags()
389 if (skb != ctx->skb_head) { in gve_rx_add_frags()
390 ctx->skb_head->len += len; in gve_rx_add_frags()
391 ctx->skb_head->data_len += len; in gve_rx_add_frags()
392 ctx->skb_head->truesize += truesize; in gve_rx_add_frags()
394 skb_add_rx_frag(skb, num_frags, page_info->page, in gve_rx_add_frags()
397 return ctx->skb_head; in gve_rx_add_frags()
405 page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET; in gve_rx_flip_buff()
411 int pagecount = page_count(page_info->page); in gve_rx_can_recycle_buffer()
413 /* This page is not being used by any SKBs - reuse */ in gve_rx_can_recycle_buffer()
414 if (pagecount == page_info->pagecnt_bias) in gve_rx_can_recycle_buffer()
416 /* This page is still being used by an SKB - we can't reuse */ in gve_rx_can_recycle_buffer()
417 else if (pagecount > page_info->pagecnt_bias) in gve_rx_can_recycle_buffer()
419 WARN(pagecount < page_info->pagecnt_bias, in gve_rx_can_recycle_buffer()
421 return -1; in gve_rx_can_recycle_buffer()
449 u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask; in gve_rx_copy_to_pool()
450 void *src = page_info->page_address + page_info->page_offset; in gve_rx_copy_to_pool()
452 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_copy_to_pool()
457 copy_page_info = &rx->qpl_copy_pool[pool_idx]; in gve_rx_copy_to_pool()
458 if (!copy_page_info->can_flip) { in gve_rx_copy_to_pool()
462 gve_schedule_reset(rx->gve); in gve_rx_copy_to_pool()
474 * on alleviates head-of-line blocking. in gve_rx_copy_to_pool()
476 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
485 alloc_page_info.pad = page_info->pad; in gve_rx_copy_to_pool()
487 memcpy(alloc_page_info.page_address, src, page_info->pad + len); in gve_rx_copy_to_pool()
492 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
493 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
494 rx->rx_frag_alloc_cnt++; in gve_rx_copy_to_pool()
495 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
500 dst = copy_page_info->page_address + copy_page_info->page_offset; in gve_rx_copy_to_pool()
501 memcpy(dst, src, page_info->pad + len); in gve_rx_copy_to_pool()
502 copy_page_info->pad = page_info->pad; in gve_rx_copy_to_pool()
505 rx->packet_buffer_size, len, ctx); in gve_rx_copy_to_pool()
510 copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET; in gve_rx_copy_to_pool()
512 if (copy_page_info->can_flip) { in gve_rx_copy_to_pool()
516 copy_page_info->can_flip = false; in gve_rx_copy_to_pool()
517 rx->qpl_copy_pool_head++; in gve_rx_copy_to_pool()
518 prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page); in gve_rx_copy_to_pool()
520 copy_page_info->can_flip = true; in gve_rx_copy_to_pool()
523 u64_stats_update_begin(&rx->statss); in gve_rx_copy_to_pool()
524 rx->rx_frag_copy_cnt++; in gve_rx_copy_to_pool()
525 u64_stats_update_end(&rx->statss); in gve_rx_copy_to_pool()
536 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_qpl()
544 if (page_info->can_flip) { in gve_rx_qpl()
545 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); in gve_rx_qpl()
550 gve_rx_flip_buff(page_info, &data_slot->qpl_offset); in gve_rx_qpl()
563 struct net_device *netdev = priv->dev; in gve_rx_skb()
564 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx_skb()
567 if (len <= priv->rx_copybreak && is_only_frag) { in gve_rx_skb()
571 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
572 rx->rx_copied_pkt++; in gve_rx_skb()
573 rx->rx_frag_copy_cnt++; in gve_rx_skb()
574 rx->rx_copybreak_pkt++; in gve_rx_skb()
575 u64_stats_update_end(&rx->statss); in gve_rx_skb()
584 page_info->can_flip = recycle; in gve_rx_skb()
585 if (page_info->can_flip) { in gve_rx_skb()
586 u64_stats_update_begin(&rx->statss); in gve_rx_skb()
587 rx->rx_frag_flip_cnt++; in gve_rx_skb()
588 u64_stats_update_end(&rx->statss); in gve_rx_skb()
591 if (rx->data.raw_addressing) { in gve_rx_skb()
592 skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, in gve_rx_skb()
595 rx->packet_buffer_size, ctx); in gve_rx_skb()
597 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, in gve_rx_skb()
612 if (rx->xsk_pool->frame_len < len) in gve_xsk_pool_redirect()
613 return -E2BIG; in gve_xsk_pool_redirect()
614 xdp = xsk_buff_alloc(rx->xsk_pool); in gve_xsk_pool_redirect()
616 u64_stats_update_begin(&rx->statss); in gve_xsk_pool_redirect()
617 rx->xdp_alloc_fails++; in gve_xsk_pool_redirect()
618 u64_stats_update_end(&rx->statss); in gve_xsk_pool_redirect()
619 return -ENOMEM; in gve_xsk_pool_redirect()
621 xdp->data_end = xdp->data + len; in gve_xsk_pool_redirect()
622 memcpy(xdp->data, data, len); in gve_xsk_pool_redirect()
632 int total_len, len = orig->data_end - orig->data; in gve_xdp_redirect()
638 if (rx->xsk_pool) in gve_xdp_redirect()
639 return gve_xsk_pool_redirect(dev, rx, orig->data, in gve_xdp_redirect()
644 frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC); in gve_xdp_redirect()
646 u64_stats_update_begin(&rx->statss); in gve_xdp_redirect()
647 rx->xdp_alloc_fails++; in gve_xdp_redirect()
648 u64_stats_update_end(&rx->statss); in gve_xdp_redirect()
649 return -ENOMEM; in gve_xdp_redirect()
651 xdp_init_buff(&new, total_len, &rx->xdp_rxq); in gve_xdp_redirect()
653 memcpy(new.data, orig->data, len); in gve_xdp_redirect()
666 struct gve_tx_ring *tx; in gve_xdp_done() local
676 tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num); in gve_xdp_done()
677 tx = &priv->tx[tx_qid]; in gve_xdp_done()
678 spin_lock(&tx->xdp_lock); in gve_xdp_done()
679 err = gve_xdp_xmit_one(priv, tx, xdp->data, in gve_xdp_done()
680 xdp->data_end - xdp->data, NULL); in gve_xdp_done()
681 spin_unlock(&tx->xdp_lock); in gve_xdp_done()
684 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
685 rx->xdp_tx_errors++; in gve_xdp_done()
686 u64_stats_update_end(&rx->statss); in gve_xdp_done()
690 err = gve_xdp_redirect(priv->dev, rx, xdp, xprog); in gve_xdp_done()
693 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
694 rx->xdp_redirect_errors++; in gve_xdp_done()
695 u64_stats_update_end(&rx->statss); in gve_xdp_done()
699 u64_stats_update_begin(&rx->statss); in gve_xdp_done()
701 rx->xdp_actions[xdp_act]++; in gve_xdp_done()
702 u64_stats_update_end(&rx->statss); in gve_xdp_done()
710 bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq); in gve_rx()
712 u16 frag_size = be16_to_cpu(desc->len); in gve_rx()
713 struct gve_rx_ctx *ctx = &rx->ctx; in gve_rx()
715 struct gve_priv *priv = rx->gve; in gve_rx()
723 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
724 bool is_first_frag = ctx->frag_cnt == 0; in gve_rx()
728 if (unlikely(ctx->drop_pkt)) in gve_rx()
731 if (desc->flags_seq & GVE_RXF_ERR) { in gve_rx()
732 ctx->drop_pkt = true; in gve_rx()
733 cnts->desc_err_pkt_cnt++; in gve_rx()
738 if (unlikely(frag_size > rx->packet_buffer_size)) { in gve_rx()
739 netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset", in gve_rx()
740 frag_size, rx->packet_buffer_size); in gve_rx()
741 ctx->drop_pkt = true; in gve_rx()
743 gve_schedule_reset(rx->gve); in gve_rx()
748 page_info = &rx->data.page_info[(idx + 2) & rx->mask]; in gve_rx()
749 va = page_info->page_address + page_info->page_offset; in gve_rx()
750 prefetch(page_info->page); /* Kernel page struct. */ in gve_rx()
754 page_info = &rx->data.page_info[idx]; in gve_rx()
755 data_slot = &rx->data.data_ring[idx]; in gve_rx()
756 page_bus = (rx->data.raw_addressing) ? in gve_rx()
757 be64_to_cpu(data_slot->addr) - page_info->page_offset : in gve_rx()
758 rx->data.qpl->page_buses[idx]; in gve_rx()
759 dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, in gve_rx()
761 page_info->pad = is_first_frag ? GVE_RX_PAD : 0; in gve_rx()
762 len -= page_info->pad; in gve_rx()
763 frag_size -= page_info->pad; in gve_rx()
765 xprog = READ_ONCE(priv->xdp_prog); in gve_rx()
770 xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); in gve_rx()
771 xdp_prepare_buff(&xdp, page_info->page_address + in gve_rx()
772 page_info->page_offset, GVE_RX_PAD, in gve_rx()
778 ctx->total_size += frag_size; in gve_rx()
782 page_info->pad += xdp.data - old_data; in gve_rx()
783 len = xdp.data_end - xdp.data; in gve_rx()
785 u64_stats_update_begin(&rx->statss); in gve_rx()
786 rx->xdp_actions[XDP_PASS]++; in gve_rx()
787 u64_stats_update_end(&rx->statss); in gve_rx()
793 u64_stats_update_begin(&rx->statss); in gve_rx()
794 rx->rx_skb_alloc_fail++; in gve_rx()
795 u64_stats_update_end(&rx->statss); in gve_rx()
798 ctx->drop_pkt = true; in gve_rx()
801 ctx->total_size += frag_size; in gve_rx()
806 if (desc->csum) in gve_rx()
807 skb->ip_summed = CHECKSUM_COMPLETE; in gve_rx()
809 skb->ip_summed = CHECKSUM_NONE; in gve_rx()
810 skb->csum = csum_unfold(desc->csum); in gve_rx()
815 gve_needs_rss(desc->flags_seq)) in gve_rx()
816 skb_set_hash(skb, be32_to_cpu(desc->rss_hash), in gve_rx()
817 gve_rss_type(desc->flags_seq)); in gve_rx()
821 skb_record_rx_queue(skb, rx->q_num); in gve_rx()
832 cnts->ok_pkt_bytes += ctx->total_size; in gve_rx()
833 cnts->ok_pkt_cnt++; in gve_rx()
835 ctx->frag_cnt++; in gve_rx()
837 cnts->total_pkt_cnt++; in gve_rx()
838 cnts->cont_pkt_cnt += (ctx->frag_cnt > 1); in gve_rx()
849 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
850 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
852 flags_seq = desc->flags_seq; in gve_rx_work_pending()
854 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
859 int refill_target = rx->mask + 1; in gve_rx_refill_buffers()
860 u32 fill_cnt = rx->fill_cnt; in gve_rx_refill_buffers()
862 while (fill_cnt - rx->cnt < refill_target) { in gve_rx_refill_buffers()
864 u32 idx = fill_cnt & rx->mask; in gve_rx_refill_buffers()
866 page_info = &rx->data.page_info[idx]; in gve_rx_refill_buffers()
867 if (page_info->can_flip) { in gve_rx_refill_buffers()
872 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
874 gve_rx_flip_buff(page_info, &data_slot->addr); in gve_rx_refill_buffers()
875 page_info->can_flip = 0; in gve_rx_refill_buffers()
880 * Flipping is unnecessary here - if the networking stack still in gve_rx_refill_buffers()
887 if (!rx->data.raw_addressing) in gve_rx_refill_buffers()
892 /* We can't reuse the buffer - alloc a new one*/ in gve_rx_refill_buffers()
894 &rx->data.data_ring[idx]; in gve_rx_refill_buffers()
895 struct device *dev = &priv->pdev->dev; in gve_rx_refill_buffers()
897 page_info->page = NULL; in gve_rx_refill_buffers()
900 u64_stats_update_begin(&rx->statss); in gve_rx_refill_buffers()
901 rx->rx_buf_alloc_fail++; in gve_rx_refill_buffers()
902 u64_stats_update_end(&rx->statss); in gve_rx_refill_buffers()
909 rx->fill_cnt = fill_cnt; in gve_rx_refill_buffers()
916 u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT]; in gve_clean_rx_done()
917 u64 xdp_txs = rx->xdp_actions[XDP_TX]; in gve_clean_rx_done()
918 struct gve_rx_ctx *ctx = &rx->ctx; in gve_clean_rx_done()
919 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
922 u32 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
925 struct gve_rx_desc *desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
928 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
929 (work_done < budget || ctx->frag_cnt)) { in gve_clean_rx_done()
930 next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask]; in gve_clean_rx_done()
935 rx->cnt++; in gve_clean_rx_done()
936 idx = rx->cnt & rx->mask; in gve_clean_rx_done()
937 desc = &rx->desc.desc_ring[idx]; in gve_clean_rx_done()
938 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_clean_rx_done()
943 if (unlikely(ctx->frag_cnt)) { in gve_clean_rx_done()
944 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_clean_rx_done()
947 gve_rx_ctx_clear(&rx->ctx); in gve_clean_rx_done()
948 …netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling r… in gve_clean_rx_done()
949 GVE_SEQNO(desc->flags_seq), rx->desc.seqno); in gve_clean_rx_done()
950 gve_schedule_reset(rx->gve); in gve_clean_rx_done()
953 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold) in gve_clean_rx_done()
957 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
958 rx->rpackets += cnts.ok_pkt_cnt; in gve_clean_rx_done()
959 rx->rbytes += cnts.ok_pkt_bytes; in gve_clean_rx_done()
960 rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt; in gve_clean_rx_done()
961 rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt; in gve_clean_rx_done()
962 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
965 if (xdp_txs != rx->xdp_actions[XDP_TX]) in gve_clean_rx_done()
966 gve_xdp_tx_flush(priv, rx->q_num); in gve_clean_rx_done()
968 if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT]) in gve_clean_rx_done()
971 /* restock ring slots */ in gve_clean_rx_done()
972 if (!rx->data.raw_addressing) { in gve_clean_rx_done()
974 rx->fill_cnt += work_done; in gve_clean_rx_done()
975 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
985 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) { in gve_clean_rx_done()
997 struct gve_rx_ring *rx = block->rx; in gve_rx_poll()
1001 feat = block->napi.dev->features; in gve_rx_poll()