Lines Matching full:tx
26 struct gve_tx_ring *tx = &priv->tx[tx_qid]; in gve_xdp_tx_flush() local
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_tx_flush()
32 * We copy skb payloads into the registered segment before writing Tx
33 * descriptors and ringing the Tx doorbell.
74 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
135 /* gve_tx_free_fifo - Return space to Tx FIFO
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_xdp_done() argument
168 idx = tx->done & tx->mask; in gve_clean_xdp_done()
169 info = &tx->info[idx]; in gve_clean_xdp_done()
170 tx->done++; in gve_clean_xdp_done()
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_xdp_done()
188 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done()
189 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done()
190 u64_stats_update_begin(&tx->statss); in gve_clean_xdp_done()
191 tx->bytes_done += bytes; in gve_clean_xdp_done()
192 tx->pkt_done += pkts; in gve_clean_xdp_done()
193 u64_stats_update_end(&tx->statss); in gve_clean_xdp_done()
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
203 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_stop_ring_gqi() local
209 if (tx->q_num < priv->tx_cfg.num_queues) in gve_tx_stop_ring_gqi()
210 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false); in gve_tx_stop_ring_gqi()
212 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt); in gve_tx_stop_ring_gqi()
213 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_stop_ring_gqi()
217 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_free_ring_gqi() argument
221 int idx = tx->q_num; in gve_tx_free_ring_gqi()
226 slots = tx->mask + 1; in gve_tx_free_ring_gqi()
227 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring_gqi()
228 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring_gqi()
229 tx->q_resources = NULL; in gve_tx_free_ring_gqi()
231 if (tx->tx_fifo.qpl) { in gve_tx_free_ring_gqi()
232 if (tx->tx_fifo.base) in gve_tx_free_ring_gqi()
233 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring_gqi()
235 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_free_ring_gqi()
236 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); in gve_tx_free_ring_gqi()
237 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring_gqi()
240 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring_gqi()
241 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring_gqi()
242 tx->desc = NULL; in gve_tx_free_ring_gqi()
244 vfree(tx->info); in gve_tx_free_ring_gqi()
245 tx->info = NULL; in gve_tx_free_ring_gqi()
247 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); in gve_tx_free_ring_gqi()
253 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_start_ring_gqi() local
257 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_start_ring_gqi()
263 struct gve_tx_ring *tx, in gve_tx_alloc_ring_gqi() argument
272 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring_gqi()
273 spin_lock_init(&tx->clean_lock); in gve_tx_alloc_ring_gqi()
274 spin_lock_init(&tx->xdp_lock); in gve_tx_alloc_ring_gqi()
275 tx->q_num = idx; in gve_tx_alloc_ring_gqi()
277 tx->mask = cfg->ring_size - 1; in gve_tx_alloc_ring_gqi()
280 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info)); in gve_tx_alloc_ring_gqi()
281 if (!tx->info) in gve_tx_alloc_ring_gqi()
284 /* alloc tx queue */ in gve_tx_alloc_ring_gqi()
285 bytes = sizeof(*tx->desc) * cfg->ring_size; in gve_tx_alloc_ring_gqi()
286 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring_gqi()
287 if (!tx->desc) in gve_tx_alloc_ring_gqi()
290 tx->raw_addressing = cfg->raw_addressing; in gve_tx_alloc_ring_gqi()
291 tx->dev = hdev; in gve_tx_alloc_ring_gqi()
292 if (!tx->raw_addressing) { in gve_tx_alloc_ring_gqi()
293 qpl_id = gve_tx_qpl_id(priv, tx->q_num); in gve_tx_alloc_ring_gqi()
296 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id, in gve_tx_alloc_ring_gqi()
298 if (!tx->tx_fifo.qpl) in gve_tx_alloc_ring_gqi()
301 /* map Tx FIFO */ in gve_tx_alloc_ring_gqi()
302 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring_gqi()
306 tx->q_resources = in gve_tx_alloc_ring_gqi()
308 sizeof(*tx->q_resources), in gve_tx_alloc_ring_gqi()
309 &tx->q_resources_bus, in gve_tx_alloc_ring_gqi()
311 if (!tx->q_resources) in gve_tx_alloc_ring_gqi()
317 if (!tx->raw_addressing) in gve_tx_alloc_ring_gqi()
318 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring_gqi()
320 if (!tx->raw_addressing) { in gve_tx_alloc_ring_gqi()
321 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); in gve_tx_alloc_ring_gqi()
322 tx->tx_fifo.qpl = NULL; in gve_tx_alloc_ring_gqi()
325 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring_gqi()
326 tx->desc = NULL; in gve_tx_alloc_ring_gqi()
328 vfree(tx->info); in gve_tx_alloc_ring_gqi()
329 tx->info = NULL; in gve_tx_alloc_ring_gqi()
336 struct gve_tx_ring *tx = cfg->tx; in gve_tx_alloc_rings_gqi() local
344 "Cannot alloc more than the max num of Tx rings\n"); in gve_tx_alloc_rings_gqi()
348 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), in gve_tx_alloc_rings_gqi()
350 if (!tx) in gve_tx_alloc_rings_gqi()
354 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i); in gve_tx_alloc_rings_gqi()
357 "Failed to alloc tx ring=%d: err=%d\n", in gve_tx_alloc_rings_gqi()
363 cfg->tx = tx; in gve_tx_alloc_rings_gqi()
368 gve_tx_free_ring_gqi(priv, &tx[j], cfg); in gve_tx_alloc_rings_gqi()
369 kvfree(tx); in gve_tx_alloc_rings_gqi()
376 struct gve_tx_ring *tx = cfg->tx; in gve_tx_free_rings_gqi() local
379 if (!tx) in gve_tx_free_rings_gqi()
383 gve_tx_free_ring_gqi(priv, &tx[i], cfg); in gve_tx_free_rings_gqi()
385 kvfree(tx); in gve_tx_free_rings_gqi()
386 cfg->tx = NULL; in gve_tx_free_rings_gqi()
390 * @tx: tx ring to check
396 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
398 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
401 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
411 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
446 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
450 if (!tx->raw_addressing) in gve_can_tx()
451 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); in gve_can_tx()
453 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); in gve_can_tx()
459 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_maybe_stop_tx() argument
467 if (!tx->raw_addressing) in gve_maybe_stop_tx()
468 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
470 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
474 spin_lock(&tx->clean_lock); in gve_maybe_stop_tx()
475 nic_done = gve_tx_load_event_counter(priv, tx); in gve_maybe_stop_tx()
476 to_do = nic_done - tx->done; in gve_maybe_stop_tx()
478 /* Only try to clean if there is hope for TX */ in gve_maybe_stop_tx()
479 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) { in gve_maybe_stop_tx()
482 gve_clean_tx_done(priv, tx, to_do, false); in gve_maybe_stop_tx()
484 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
489 tx->stop_queue++; in gve_maybe_stop_tx()
490 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
492 spin_unlock(&tx->clean_lock); in gve_maybe_stop_tx()
562 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) in gve_tx_add_skb_copy() argument
569 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_copy()
575 info = &tx->info[idx]; in gve_tx_add_skb_copy()
576 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_copy()
589 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb_copy()
590 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb_copy()
593 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb_copy()
602 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb_copy()
604 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
610 next_idx = (tx->req + 1) & tx->mask; in gve_tx_add_skb_copy()
611 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb); in gve_tx_add_skb_copy()
615 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask; in gve_tx_add_skb_copy()
616 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb_copy()
625 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb_copy()
627 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb_copy()
636 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_add_skb_no_copy() argument
645 u32 idx = tx->req & tx->mask; in gve_tx_add_skb_no_copy()
650 info = &tx->info[idx]; in gve_tx_add_skb_no_copy()
651 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
664 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
665 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
666 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
683 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
684 mtd_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
694 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
695 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
704 idx = (idx + 1) & tx->mask; in gve_tx_add_skb_no_copy()
705 seg_desc = &tx->desc[idx]; in gve_tx_add_skb_no_copy()
707 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); in gve_tx_add_skb_no_copy()
708 if (unlikely(dma_mapping_error(tx->dev, addr))) { in gve_tx_add_skb_no_copy()
709 tx->dma_mapping_error++; in gve_tx_add_skb_no_copy()
712 tx->info[idx].skb = NULL; in gve_tx_add_skb_no_copy()
713 dma_unmap_len_set(&tx->info[idx], len, len); in gve_tx_add_skb_no_copy()
714 dma_unmap_addr_set(&tx->info[idx], dma, addr); in gve_tx_add_skb_no_copy()
730 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); in gve_tx_add_skb_no_copy()
733 tx->dropped_pkt++; in gve_tx_add_skb_no_copy()
740 struct gve_tx_ring *tx; in gve_tx() local
745 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
746 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) { in gve_tx()
747 /* We need to ring the txq doorbell -- we have stopped the Tx in gve_tx()
752 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
755 if (tx->raw_addressing) in gve_tx()
756 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); in gve_tx()
758 nsegs = gve_tx_add_skb_copy(priv, tx, skb); in gve_tx()
762 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
764 tx->req += nsegs; in gve_tx()
769 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
775 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
779 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_tx_fill_xdp() argument
784 u32 reqi = tx->req; in gve_tx_fill_xdp()
786 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len); in gve_tx_fill_xdp()
789 info = &tx->info[reqi & tx->mask]; in gve_tx_fill_xdp()
794 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len, in gve_tx_fill_xdp()
802 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0, in gve_tx_fill_xdp()
807 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask], in gve_tx_fill_xdp()
812 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset, in gve_tx_fill_xdp()
815 tx->tx_fifo.qpl->page_buses, in gve_tx_fill_xdp()
830 struct gve_tx_ring *tx; in gve_xdp_xmit() local
842 tx = &priv->tx[qid]; in gve_xdp_xmit()
844 spin_lock(&tx->xdp_lock); in gve_xdp_xmit()
846 err = gve_xdp_xmit_one(priv, tx, frames[i]->data, in gve_xdp_xmit()
853 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xdp_xmit()
855 spin_unlock(&tx->xdp_lock); in gve_xdp_xmit()
857 u64_stats_update_begin(&tx->statss); in gve_xdp_xmit()
858 tx->xdp_xmit += n; in gve_xdp_xmit()
859 tx->xdp_xmit_errors += n - i; in gve_xdp_xmit()
860 u64_stats_update_end(&tx->statss); in gve_xdp_xmit()
865 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xdp_xmit_one() argument
870 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1)) in gve_xdp_xmit_one()
873 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false); in gve_xdp_xmit_one()
874 tx->req += nsegs; in gve_xdp_xmit_one()
881 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
892 idx = tx->done & tx->mask; in gve_clean_tx_done()
895 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
896 info = &tx->info[idx]; in gve_clean_tx_done()
900 if (tx->raw_addressing) in gve_clean_tx_done()
901 gve_tx_unmap_buf(tx->dev, info); in gve_clean_tx_done()
902 tx->done++; in gve_clean_tx_done()
909 if (tx->raw_addressing) in gve_clean_tx_done()
915 if (!tx->raw_addressing) in gve_clean_tx_done()
916 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
917 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
918 tx->bytes_done += bytes; in gve_clean_tx_done()
919 tx->pkt_done += pkts; in gve_clean_tx_done()
920 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
921 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
928 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
929 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
930 tx->wake_queue++; in gve_clean_tx_done()
931 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
938 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
940 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index); in gve_tx_load_event_counter()
946 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_xsk_tx() argument
953 spin_lock(&tx->xdp_lock); in gve_xsk_tx()
955 if (!gve_can_tx(tx, GVE_TX_START_THRESH) || in gve_xsk_tx()
956 !xsk_tx_peek_desc(tx->xsk_pool, &desc)) in gve_xsk_tx()
959 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx()
960 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); in gve_xsk_tx()
961 tx->req += nsegs; in gve_xsk_tx()
966 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_xsk_tx()
967 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx()
969 spin_unlock(&tx->xdp_lock); in gve_xsk_tx()
977 struct gve_tx_ring *tx; in gve_xsk_tx_poll() local
980 tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)]; in gve_xsk_tx_poll()
981 if (tx->xsk_pool) { in gve_xsk_tx_poll()
982 sent = gve_xsk_tx(priv, tx, budget); in gve_xsk_tx_poll()
984 u64_stats_update_begin(&tx->statss); in gve_xsk_tx_poll()
985 tx->xdp_xsk_sent += sent; in gve_xsk_tx_poll()
986 u64_stats_update_end(&tx->statss); in gve_xsk_tx_poll()
987 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xsk_tx_poll()
988 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xsk_tx_poll()
997 struct gve_tx_ring *tx = block->tx; in gve_xdp_poll() local
1002 nic_done = gve_tx_load_event_counter(priv, tx); in gve_xdp_poll()
1003 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_xdp_poll()
1004 gve_clean_xdp_done(priv, tx, to_do); in gve_xdp_poll()
1007 return nic_done != tx->done; in gve_xdp_poll()
1013 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
1021 /* In TX path, it may try to clean completed pkts in order to xmit, in gve_tx_poll()
1025 spin_lock(&tx->clean_lock); in gve_tx_poll()
1027 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
1028 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
1029 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
1030 spin_unlock(&tx->clean_lock); in gve_tx_poll()
1032 return nic_done != tx->done; in gve_tx_poll()
1035 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx) in gve_tx_clean_pending() argument
1037 u32 nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_clean_pending()
1039 return nic_done != tx->done; in gve_tx_clean_pending()