Lines Matching full:rx

16 	block->rx = NULL;  in gve_rx_remove_from_block()
21 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring() local
29 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); in gve_rx_free_ring()
30 rx->desc.desc_ring = NULL; in gve_rx_free_ring()
32 dma_free_coherent(dev, sizeof(*rx->q_resources), in gve_rx_free_ring()
33 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring()
34 rx->q_resources = NULL; in gve_rx_free_ring()
36 gve_unassign_qpl(priv, rx->data.qpl->id); in gve_rx_free_ring()
37 rx->data.qpl = NULL; in gve_rx_free_ring()
38 kvfree(rx->data.page_info); in gve_rx_free_ring()
40 slots = rx->mask + 1; in gve_rx_free_ring()
41 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_free_ring()
42 dma_free_coherent(dev, bytes, rx->data.data_ring, in gve_rx_free_ring()
43 rx->data.data_bus); in gve_rx_free_ring()
44 rx->data.data_ring = NULL; in gve_rx_free_ring()
45 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); in gve_rx_free_ring()
58 static int gve_prefill_rx_pages(struct gve_rx_ring *rx) in gve_prefill_rx_pages() argument
60 struct gve_priv *priv = rx->gve; in gve_prefill_rx_pages()
64 /* Allocate one page per Rx queue slot. Each page is split into two in gve_prefill_rx_pages()
67 slots = rx->mask + 1; in gve_prefill_rx_pages()
69 rx->data.page_info = kvzalloc(slots * in gve_prefill_rx_pages()
70 sizeof(*rx->data.page_info), GFP_KERNEL); in gve_prefill_rx_pages()
71 if (!rx->data.page_info) in gve_prefill_rx_pages()
74 rx->data.qpl = gve_assign_rx_qpl(priv); in gve_prefill_rx_pages()
77 struct page *page = rx->data.qpl->pages[i]; in gve_prefill_rx_pages()
80 gve_setup_rx_buffer(&rx->data.page_info[i], in gve_prefill_rx_pages()
81 &rx->data.data_ring[i], addr, page); in gve_prefill_rx_pages()
91 struct gve_rx_ring *rx = &priv->rx[queue_idx]; in gve_rx_add_to_block() local
93 block->rx = rx; in gve_rx_add_to_block()
94 rx->ntfy_id = ntfy_idx; in gve_rx_add_to_block()
99 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring() local
106 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); in gve_rx_alloc_ring()
108 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring()
110 rx->gve = priv; in gve_rx_alloc_ring()
111 rx->q_num = idx; in gve_rx_alloc_ring()
114 rx->mask = slots - 1; in gve_rx_alloc_ring()
116 /* alloc rx data ring */ in gve_rx_alloc_ring()
117 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
118 rx->data.data_ring = dma_alloc_coherent(hdev, bytes, in gve_rx_alloc_ring()
119 &rx->data.data_bus, in gve_rx_alloc_ring()
121 if (!rx->data.data_ring) in gve_rx_alloc_ring()
123 filled_pages = gve_prefill_rx_pages(rx); in gve_rx_alloc_ring()
128 rx->fill_cnt = filled_pages; in gve_rx_alloc_ring()
133 rx->q_resources = in gve_rx_alloc_ring()
135 sizeof(*rx->q_resources), in gve_rx_alloc_ring()
136 &rx->q_resources_bus, in gve_rx_alloc_ring()
138 if (!rx->q_resources) { in gve_rx_alloc_ring()
142 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx, in gve_rx_alloc_ring()
143 (unsigned long)rx->data.data_bus); in gve_rx_alloc_ring()
145 /* alloc rx desc ring */ in gve_rx_alloc_ring()
153 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, in gve_rx_alloc_ring()
155 if (!rx->desc.desc_ring) { in gve_rx_alloc_ring()
159 rx->mask = slots - 1; in gve_rx_alloc_ring()
160 rx->cnt = 0; in gve_rx_alloc_ring()
161 rx->desc.seqno = 1; in gve_rx_alloc_ring()
167 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring()
168 rx->q_resources, rx->q_resources_bus); in gve_rx_alloc_ring()
169 rx->q_resources = NULL; in gve_rx_alloc_ring()
171 kvfree(rx->data.page_info); in gve_rx_alloc_ring()
173 bytes = sizeof(*rx->data.data_ring) * slots; in gve_rx_alloc_ring()
174 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); in gve_rx_alloc_ring()
175 rx->data.data_ring = NULL; in gve_rx_alloc_ring()
189 "Failed to alloc rx ring=%d: err=%d\n", in gve_rx_alloc_rings()
212 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) in gve_rx_write_doorbell() argument
214 u32 db_idx = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell()
216 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); in gve_rx_write_doorbell()
228 static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx, in gve_rx_copy() argument
247 u64_stats_update_begin(&rx->statss); in gve_rx_copy()
248 rx->rx_copied_pkt++; in gve_rx_copy()
249 u64_stats_update_end(&rx->statss); in gve_rx_copy()
281 static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, in gve_rx() argument
285 struct gve_priv *priv = rx->gve; in gve_rx()
286 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; in gve_rx()
294 u64_stats_update_begin(&rx->statss); in gve_rx()
295 rx->rx_desc_err_dropped_pkt++; in gve_rx()
296 u64_stats_update_end(&rx->statss); in gve_rx()
301 page_info = &rx->data.page_info[idx]; in gve_rx()
302 dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx], in gve_rx()
313 skb = gve_rx_copy(rx, dev, napi, page_info, len); in gve_rx()
314 u64_stats_update_begin(&rx->statss); in gve_rx()
315 rx->rx_copybreak_pkt++; in gve_rx()
316 u64_stats_update_end(&rx->statss); in gve_rx()
320 skb = gve_rx_copy(rx, dev, napi, page_info, len); in gve_rx()
331 u64_stats_update_begin(&rx->statss); in gve_rx()
332 rx->rx_skb_alloc_fail++; in gve_rx()
333 u64_stats_update_end(&rx->statss); in gve_rx()
339 gve_rx_flip_buff(page_info, &rx->data.data_ring[idx]); in gve_rx()
344 skb = gve_rx_copy(rx, dev, napi, page_info, len); in gve_rx()
350 skb = gve_rx_copy(rx, dev, napi, page_info, len); in gve_rx()
358 u64_stats_update_begin(&rx->statss); in gve_rx()
359 rx->rx_skb_alloc_fail++; in gve_rx()
360 u64_stats_update_end(&rx->statss); in gve_rx()
386 static bool gve_rx_work_pending(struct gve_rx_ring *rx) in gve_rx_work_pending() argument
392 next_idx = rx->cnt & rx->mask; in gve_rx_work_pending()
393 desc = rx->desc.desc_ring + next_idx; in gve_rx_work_pending()
399 return (GVE_SEQNO(flags_seq) == rx->desc.seqno); in gve_rx_work_pending()
402 bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, in gve_clean_rx_done() argument
405 struct gve_priv *priv = rx->gve; in gve_clean_rx_done()
407 u32 cnt = rx->cnt; in gve_clean_rx_done()
408 u32 idx = cnt & rx->mask; in gve_clean_rx_done()
412 desc = rx->desc.desc_ring + idx; in gve_clean_rx_done()
413 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && in gve_clean_rx_done()
417 rx->q_num, idx, desc, desc->flags_seq); in gve_clean_rx_done()
419 "[%d] seqno=%d rx->desc.seqno=%d\n", in gve_clean_rx_done()
420 rx->q_num, GVE_SEQNO(desc->flags_seq), in gve_clean_rx_done()
421 rx->desc.seqno); in gve_clean_rx_done()
423 if (!gve_rx(rx, desc, feat, idx)) in gve_clean_rx_done()
426 idx = cnt & rx->mask; in gve_clean_rx_done()
427 desc = rx->desc.desc_ring + idx; in gve_clean_rx_done()
428 rx->desc.seqno = gve_next_seqno(rx->desc.seqno); in gve_clean_rx_done()
435 u64_stats_update_begin(&rx->statss); in gve_clean_rx_done()
436 rx->rpackets += work_done; in gve_clean_rx_done()
437 rx->rbytes += bytes; in gve_clean_rx_done()
438 u64_stats_update_end(&rx->statss); in gve_clean_rx_done()
439 rx->cnt = cnt; in gve_clean_rx_done()
440 rx->fill_cnt += work_done; in gve_clean_rx_done()
442 gve_rx_write_doorbell(priv, rx); in gve_clean_rx_done()
443 return gve_rx_work_pending(rx); in gve_clean_rx_done()
448 struct gve_rx_ring *rx = block->rx; in gve_rx_poll() local
459 repoll |= gve_clean_rx_done(rx, budget, feat); in gve_rx_poll()
461 repoll |= gve_rx_work_pending(rx); in gve_rx_poll()