Lines Matching defs:tx
26 struct gve_tx_ring *tx = &priv->tx[tx_qid];
28 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
157 static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
168 idx = tx->done & tx->mask;
169 info = &tx->info[idx];
170 tx->done++;
187 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
188 if (xsk_complete > 0 && tx->xsk_pool)
189 xsk_tx_completed(tx->xsk_pool, xsk_complete);
190 u64_stats_update_begin(&tx->statss);
191 tx->bytes_done += bytes;
192 tx->pkt_done += pkts;
193 u64_stats_update_end(&tx->statss);
197 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
203 struct gve_tx_ring *tx = &priv->tx[idx];
209 if (tx->q_num < priv->tx_cfg.num_queues)
210 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
212 gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
213 netdev_tx_reset_queue(tx->netdev_txq);
217 static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
221 int idx = tx->q_num;
226 slots = tx->mask + 1;
227 dma_free_coherent(hdev, sizeof(*tx->q_resources),
228 tx->q_resources, tx->q_resources_bus);
229 tx->q_resources = NULL;
231 if (tx->tx_fifo.qpl) {
232 if (tx->tx_fifo.base)
233 gve_tx_fifo_release(priv, &tx->tx_fifo);
235 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
236 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
237 tx->tx_fifo.qpl = NULL;
240 bytes = sizeof(*tx->desc) * slots;
241 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
242 tx->desc = NULL;
244 vfree(tx->info);
245 tx->info = NULL;
247 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
253 struct gve_tx_ring *tx = &priv->tx[idx];
257 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
263 struct gve_tx_ring *tx,
272 memset(tx, 0, sizeof(*tx));
273 spin_lock_init(&tx->clean_lock);
274 spin_lock_init(&tx->xdp_lock);
275 tx->q_num = idx;
277 tx->mask = cfg->ring_size - 1;
280 tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
281 if (!tx->info)
284 /* alloc tx queue */
285 bytes = sizeof(*tx->desc) * cfg->ring_size;
286 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
287 if (!tx->desc)
290 tx->raw_addressing = cfg->raw_addressing;
291 tx->dev = hdev;
292 if (!tx->raw_addressing) {
293 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
296 tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
298 if (!tx->tx_fifo.qpl)
302 if (gve_tx_fifo_init(priv, &tx->tx_fifo))
306 tx->q_resources =
308 sizeof(*tx->q_resources),
309 &tx->q_resources_bus,
311 if (!tx->q_resources)
317 if (!tx->raw_addressing)
318 gve_tx_fifo_release(priv, &tx->tx_fifo);
320 if (!tx->raw_addressing) {
321 gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
322 tx->tx_fifo.qpl = NULL;
325 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
326 tx->desc = NULL;
328 vfree(tx->info);
329 tx->info = NULL;
336 struct gve_tx_ring *tx = cfg->tx;
348 tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
350 if (!tx)
354 err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
357 "Failed to alloc tx ring=%d: err=%d\n",
363 cfg->tx = tx;
368 gve_tx_free_ring_gqi(priv, &tx[j], cfg);
369 kvfree(tx);
376 struct gve_tx_ring *tx = cfg->tx;
379 if (!tx)
383 gve_tx_free_ring_gqi(priv, &tx[i], cfg);
385 kvfree(tx);
386 cfg->tx = NULL;
390 * @tx: tx ring to check
396 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
398 return tx->mask + 1 - (tx->req - tx->done);
401 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
411 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
446 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
450 if (!tx->raw_addressing)
451 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
453 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
459 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
467 if (!tx->raw_addressing)
468 bytes_required = gve_skb_fifo_bytes_required(tx, skb);
470 if (likely(gve_can_tx(tx, bytes_required)))
474 spin_lock(&tx->clean_lock);
475 nic_done = gve_tx_load_event_counter(priv, tx);
476 to_do = nic_done - tx->done;
479 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
482 gve_clean_tx_done(priv, tx, to_do, false);
484 if (likely(gve_can_tx(tx, bytes_required)))
489 tx->stop_queue++;
490 netif_tx_stop_queue(tx->netdev_txq);
492 spin_unlock(&tx->clean_lock);
562 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
569 u32 idx = tx->req & tx->mask;
575 info = &tx->info[idx];
576 pkt_desc = &tx->desc[idx];
589 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
590 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
593 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
602 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
604 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
610 next_idx = (tx->req + 1) & tx->mask;
611 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
615 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
616 seg_desc = &tx->desc[next_idx];
625 tx->tx_fifo.base + info->iov[i].iov_offset,
627 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
636 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
645 u32 idx = tx->req & tx->mask;
650 info = &tx->info[idx];
651 pkt_desc = &tx->desc[idx];
664 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
665 if (unlikely(dma_mapping_error(tx->dev, addr))) {
666 tx->dma_mapping_error++;
683 idx = (idx + 1) & tx->mask;
684 mtd_desc = &tx->desc[idx];
694 idx = (idx + 1) & tx->mask;
695 seg_desc = &tx->desc[idx];
704 idx = (idx + 1) & tx->mask;
705 seg_desc = &tx->desc[idx];
707 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
708 if (unlikely(dma_mapping_error(tx->dev, addr))) {
709 tx->dma_mapping_error++;
712 tx->info[idx].skb = NULL;
713 dma_unmap_len_set(&tx->info[idx], len, len);
714 dma_unmap_addr_set(&tx->info[idx], dma, addr);
730 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
733 tx->dropped_pkt++;
740 struct gve_tx_ring *tx;
745 tx = &priv->tx[skb_get_queue_mapping(skb)];
746 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
752 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
755 if (tx->raw_addressing)
756 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
758 nsegs = gve_tx_add_skb_copy(priv, tx, skb);
762 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
764 tx->req += nsegs;
769 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
775 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
779 static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
784 u32 reqi = tx->req;
786 pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
789 info = &tx->info[reqi & tx->mask];
794 nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
802 gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
807 gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
812 memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
815 tx->tx_fifo.qpl->page_buses,
830 struct gve_tx_ring *tx;
842 tx = &priv->tx[qid];
844 spin_lock(&tx->xdp_lock);
846 err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
853 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
855 spin_unlock(&tx->xdp_lock);
857 u64_stats_update_begin(&tx->statss);
858 tx->xdp_xmit += n;
859 tx->xdp_xmit_errors += n - i;
860 u64_stats_update_end(&tx->statss);
865 int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
870 if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
873 nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
874 tx->req += nsegs;
881 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
892 idx = tx->done & tx->mask;
895 tx->q_num, __func__, idx, tx->req, tx->done);
896 info = &tx->info[idx];
900 if (tx->raw_addressing)
901 gve_tx_unmap_buf(tx->dev, info);
902 tx->done++;
909 if (tx->raw_addressing)
915 if (!tx->raw_addressing)
916 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
917 u64_stats_update_begin(&tx->statss);
918 tx->bytes_done += bytes;
919 tx->pkt_done += pkts;
920 u64_stats_update_end(&tx->statss);
921 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
928 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
929 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
930 tx->wake_queue++;
931 netif_tx_wake_queue(tx->netdev_txq);
938 struct gve_tx_ring *tx)
940 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
946 static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
953 spin_lock(&tx->xdp_lock);
955 if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
956 !xsk_tx_peek_desc(tx->xsk_pool, &desc))
959 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
960 nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
961 tx->req += nsegs;
966 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
967 xsk_tx_release(tx->xsk_pool);
969 spin_unlock(&tx->xdp_lock);
977 struct gve_tx_ring *tx;
980 tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
981 if (tx->xsk_pool) {
982 sent = gve_xsk_tx(priv, tx, budget);
984 u64_stats_update_begin(&tx->statss);
985 tx->xdp_xsk_sent += sent;
986 u64_stats_update_end(&tx->statss);
987 if (xsk_uses_need_wakeup(tx->xsk_pool))
988 xsk_set_tx_need_wakeup(tx->xsk_pool);
997 struct gve_tx_ring *tx = block->tx;
1002 nic_done = gve_tx_load_event_counter(priv, tx);
1003 to_do = min_t(u32, (nic_done - tx->done), budget);
1004 gve_clean_xdp_done(priv, tx, to_do);
1007 return nic_done != tx->done;
1013 struct gve_tx_ring *tx = block->tx;
1025 spin_lock(&tx->clean_lock);
1027 nic_done = gve_tx_load_event_counter(priv, tx);
1028 to_do = min_t(u32, (nic_done - tx->done), budget);
1029 gve_clean_tx_done(priv, tx, to_do, true);
1030 spin_unlock(&tx->clean_lock);
1032 return nic_done != tx->done;
1035 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
1037 u32 nic_done = gve_tx_load_event_counter(priv, tx);
1039 return nic_done != tx->done;