Lines Matching full:tx

22  * We copy skb payloads into the registered segment before writing Tx
23 * descriptors and ringing the Tx doorbell.
64 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
125 /* gve_tx_free_fifo - Return space to Tx FIFO
139 block->tx = NULL; in gve_tx_remove_from_block()
142 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
147 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_free_ring() local
153 slots = tx->mask + 1; in gve_tx_free_ring()
154 gve_clean_tx_done(priv, tx, tx->req, false); in gve_tx_free_ring()
155 netdev_tx_reset_queue(tx->netdev_txq); in gve_tx_free_ring()
157 dma_free_coherent(hdev, sizeof(*tx->q_resources), in gve_tx_free_ring()
158 tx->q_resources, tx->q_resources_bus); in gve_tx_free_ring()
159 tx->q_resources = NULL; in gve_tx_free_ring()
161 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_free_ring()
162 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); in gve_tx_free_ring()
163 tx->tx_fifo.qpl = NULL; in gve_tx_free_ring()
165 bytes = sizeof(*tx->desc) * slots; in gve_tx_free_ring()
166 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_free_ring()
167 tx->desc = NULL; in gve_tx_free_ring()
169 vfree(tx->info); in gve_tx_free_ring()
170 tx->info = NULL; in gve_tx_free_ring()
172 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); in gve_tx_free_ring()
179 struct gve_tx_ring *tx = &priv->tx[queue_idx]; in gve_tx_add_to_block() local
181 block->tx = tx; in gve_tx_add_to_block()
182 tx->ntfy_id = ntfy_idx; in gve_tx_add_to_block()
187 struct gve_tx_ring *tx = &priv->tx[idx]; in gve_tx_alloc_ring() local
193 memset(tx, 0, sizeof(*tx)); in gve_tx_alloc_ring()
194 tx->q_num = idx; in gve_tx_alloc_ring()
196 tx->mask = slots - 1; in gve_tx_alloc_ring()
199 tx->info = vzalloc(sizeof(*tx->info) * slots); in gve_tx_alloc_ring()
200 if (!tx->info) in gve_tx_alloc_ring()
203 /* alloc tx queue */ in gve_tx_alloc_ring()
204 bytes = sizeof(*tx->desc) * slots; in gve_tx_alloc_ring()
205 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); in gve_tx_alloc_ring()
206 if (!tx->desc) in gve_tx_alloc_ring()
209 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); in gve_tx_alloc_ring()
211 /* map Tx FIFO */ in gve_tx_alloc_ring()
212 if (gve_tx_fifo_init(priv, &tx->tx_fifo)) in gve_tx_alloc_ring()
215 tx->q_resources = in gve_tx_alloc_ring()
217 sizeof(*tx->q_resources), in gve_tx_alloc_ring()
218 &tx->q_resources_bus, in gve_tx_alloc_ring()
220 if (!tx->q_resources) in gve_tx_alloc_ring()
223 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx, in gve_tx_alloc_ring()
224 (unsigned long)tx->bus); in gve_tx_alloc_ring()
225 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx); in gve_tx_alloc_ring()
231 gve_tx_fifo_release(priv, &tx->tx_fifo); in gve_tx_alloc_ring()
233 dma_free_coherent(hdev, bytes, tx->desc, tx->bus); in gve_tx_alloc_ring()
234 tx->desc = NULL; in gve_tx_alloc_ring()
236 vfree(tx->info); in gve_tx_alloc_ring()
237 tx->info = NULL; in gve_tx_alloc_ring()
250 "Failed to alloc tx ring=%d: err=%d\n", in gve_tx_alloc_rings()
274 * @tx: tx ring to check
280 static inline u32 gve_tx_avail(struct gve_tx_ring *tx) in gve_tx_avail() argument
282 return tx->mask + 1 - (tx->req - tx->done); in gve_tx_avail()
285 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, in gve_skb_fifo_bytes_required() argument
295 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, in gve_skb_fifo_bytes_required()
313 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) in gve_can_tx() argument
315 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && in gve_can_tx()
316 gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required)); in gve_can_tx()
320 static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) in gve_maybe_stop_tx() argument
324 bytes_required = gve_skb_fifo_bytes_required(tx, skb); in gve_maybe_stop_tx()
325 if (likely(gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
329 tx->stop_queue++; in gve_maybe_stop_tx()
330 netif_tx_stop_queue(tx->netdev_txq); in gve_maybe_stop_tx()
340 * if (tx queue stopped) in gve_maybe_stop_tx()
345 if (likely(!gve_can_tx(tx, bytes_required))) in gve_maybe_stop_tx()
348 netif_tx_start_queue(tx->netdev_txq); in gve_maybe_stop_tx()
349 tx->wake_queue++; in gve_maybe_stop_tx()
407 static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, in gve_tx_add_skb() argument
414 u32 idx = tx->req & tx->mask; in gve_tx_add_skb()
420 info = &tx->info[idx]; in gve_tx_add_skb()
421 pkt_desc = &tx->desc[idx]; in gve_tx_add_skb()
436 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen); in gve_tx_add_skb()
437 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes, in gve_tx_add_skb()
440 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen, in gve_tx_add_skb()
448 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, in gve_tx_add_skb()
450 gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb()
456 next_idx = (tx->req + 1 + i - payload_iov) & tx->mask; in gve_tx_add_skb()
457 seg_desc = &tx->desc[next_idx]; in gve_tx_add_skb()
464 tx->tx_fifo.base + info->iov[i].iov_offset, in gve_tx_add_skb()
466 gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, in gve_tx_add_skb()
478 struct gve_tx_ring *tx; in gve_tx() local
483 tx = &priv->tx[skb_get_queue_mapping(skb)]; in gve_tx()
484 if (unlikely(gve_maybe_stop_tx(tx, skb))) { in gve_tx()
485 /* We need to ring the txq doorbell -- we have stopped the Tx in gve_tx()
490 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
493 nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev); in gve_tx()
495 netdev_tx_sent_queue(tx->netdev_txq, skb->len); in gve_tx()
499 tx->req += nsegs; in gve_tx()
501 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) in gve_tx()
504 gve_tx_put_doorbell(priv, tx->q_resources, tx->req); in gve_tx()
510 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, in gve_clean_tx_done() argument
521 idx = tx->done & tx->mask; in gve_clean_tx_done()
524 tx->q_num, __func__, idx, tx->req, tx->done); in gve_clean_tx_done()
525 info = &tx->info[idx]; in gve_clean_tx_done()
542 tx->done++; in gve_clean_tx_done()
545 gve_tx_free_fifo(&tx->tx_fifo, space_freed); in gve_clean_tx_done()
546 u64_stats_update_begin(&tx->statss); in gve_clean_tx_done()
547 tx->bytes_done += bytes; in gve_clean_tx_done()
548 tx->pkt_done += pkts; in gve_clean_tx_done()
549 u64_stats_update_end(&tx->statss); in gve_clean_tx_done()
550 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes); in gve_clean_tx_done()
557 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) && in gve_clean_tx_done()
558 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) { in gve_clean_tx_done()
559 tx->wake_queue++; in gve_clean_tx_done()
560 netif_tx_wake_queue(tx->netdev_txq); in gve_clean_tx_done()
567 struct gve_tx_ring *tx) in gve_tx_load_event_counter() argument
569 u32 counter_index = be32_to_cpu((tx->q_resources->counter_index)); in gve_tx_load_event_counter()
577 struct gve_tx_ring *tx = block->tx; in gve_tx_poll() local
587 tx->last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_poll()
588 nic_done = be32_to_cpu(tx->last_nic_done); in gve_tx_poll()
593 to_do = min_t(u32, (nic_done - tx->done), budget); in gve_tx_poll()
594 gve_clean_tx_done(priv, tx, to_do, true); in gve_tx_poll()
597 repoll |= (nic_done != tx->done); in gve_tx_poll()