Lines Matching full:tx
108 * The tx queue len can be adjusted upward while the interface is in hfi1_ipoib_check_queue_stopped()
110 * The tx queue len can be large enough to overflow the txreq_ring. in hfi1_ipoib_check_queue_stopped()
119 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) in hfi1_ipoib_free_tx() argument
121 struct hfi1_ipoib_dev_priv *priv = tx->priv; in hfi1_ipoib_free_tx()
123 if (likely(!tx->sdma_status)) { in hfi1_ipoib_free_tx()
124 hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len); in hfi1_ipoib_free_tx()
129 __func__, tx->sdma_status, in hfi1_ipoib_free_tx()
130 le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, in hfi1_ipoib_free_tx()
131 tx->txq->sde->this_idx); in hfi1_ipoib_free_tx()
134 napi_consume_skb(tx->skb, budget); in hfi1_ipoib_free_tx()
135 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_free_tx()
136 kmem_cache_free(priv->txreq_cache, tx); in hfi1_ipoib_free_tx()
164 /* Finished freeing tx items so store the tail value. */ in hfi1_ipoib_drain_tx_ring()
187 static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx) in hfi1_ipoib_add_tx() argument
189 struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring; in hfi1_ipoib_add_tx()
201 tx_ring->items[head] = tx; in hfi1_ipoib_add_tx()
205 napi_schedule(tx->txq->napi); in hfi1_ipoib_add_tx()
207 struct hfi1_ipoib_txq *txq = tx->txq; in hfi1_ipoib_add_tx()
208 struct hfi1_ipoib_dev_priv *priv = tx->priv; in hfi1_ipoib_add_tx()
211 hfi1_ipoib_free_tx(tx, 0); in hfi1_ipoib_add_tx()
221 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_sdma_complete() local
223 tx->sdma_status = status; in hfi1_ipoib_sdma_complete()
225 hfi1_ipoib_add_tx(tx); in hfi1_ipoib_sdma_complete()
228 static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx, in hfi1_ipoib_build_ulp_payload() argument
232 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_ulp_payload()
233 struct sk_buff *skb = tx->skb; in hfi1_ipoib_build_ulp_payload()
258 static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, in hfi1_ipoib_build_tx_desc() argument
262 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_tx_desc()
263 struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; in hfi1_ipoib_build_tx_desc()
265 sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len; in hfi1_ipoib_build_tx_desc()
281 return hfi1_ipoib_build_ulp_payload(tx, txp); in hfi1_ipoib_build_tx_desc()
284 static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, in hfi1_ipoib_build_ib_tx_headers() argument
287 struct hfi1_ipoib_dev_priv *priv = tx->priv; in hfi1_ipoib_build_ib_tx_headers()
288 struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; in hfi1_ipoib_build_ib_tx_headers()
289 struct sk_buff *skb = tx->skb; in hfi1_ipoib_build_ib_tx_headers()
381 struct ipoib_txreq *tx; in hfi1_ipoib_send_dma_common() local
384 tx = kmem_cache_alloc_node(priv->txreq_cache, in hfi1_ipoib_send_dma_common()
387 if (unlikely(!tx)) in hfi1_ipoib_send_dma_common()
391 tx->txreq.num_desc = 0; in hfi1_ipoib_send_dma_common()
392 tx->priv = priv; in hfi1_ipoib_send_dma_common()
393 tx->txq = txp->txq; in hfi1_ipoib_send_dma_common()
394 tx->skb = skb; in hfi1_ipoib_send_dma_common()
395 INIT_LIST_HEAD(&tx->txreq.list); in hfi1_ipoib_send_dma_common()
397 hfi1_ipoib_build_ib_tx_headers(tx, txp); in hfi1_ipoib_send_dma_common()
399 ret = hfi1_ipoib_build_tx_desc(tx, txp); in hfi1_ipoib_send_dma_common()
410 return tx; in hfi1_ipoib_send_dma_common()
413 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_send_dma_common()
414 kmem_cache_free(priv->txreq_cache, tx); in hfi1_ipoib_send_dma_common()
432 dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret); in hfi1_ipoib_submit_tx_list()
455 struct ipoib_txreq *tx) in hfi1_ipoib_submit_tx() argument
461 &tx->txreq, in hfi1_ipoib_submit_tx()
477 struct ipoib_txreq *tx; in hfi1_ipoib_send_dma_single() local
480 tx = hfi1_ipoib_send_dma_common(dev, skb, txp); in hfi1_ipoib_send_dma_single()
481 if (IS_ERR(tx)) { in hfi1_ipoib_send_dma_single()
482 int ret = PTR_ERR(tx); in hfi1_ipoib_send_dma_single()
494 ret = hfi1_ipoib_submit_tx(txq, tx); in hfi1_ipoib_send_dma_single()
497 trace_sdma_output_ibhdr(tx->priv->dd, in hfi1_ipoib_send_dma_single()
498 &tx->sdma_hdr.hdr, in hfi1_ipoib_send_dma_single()
509 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_send_dma_single()
511 kmem_cache_free(priv->txreq_cache, tx); in hfi1_ipoib_send_dma_single()
522 struct ipoib_txreq *tx; in hfi1_ipoib_send_dma_list() local
536 tx = hfi1_ipoib_send_dma_common(dev, skb, txp); in hfi1_ipoib_send_dma_list()
537 if (IS_ERR(tx)) { in hfi1_ipoib_send_dma_list()
538 int ret = PTR_ERR(tx); in hfi1_ipoib_send_dma_list()
550 list_add_tail(&tx->txreq.list, &txq->tx_list); in hfi1_ipoib_send_dma_list()
554 trace_sdma_output_ibhdr(tx->priv->dd, in hfi1_ipoib_send_dma_list()
555 &tx->sdma_hdr.hdr, in hfi1_ipoib_send_dma_list()
613 * sdma descriptors available to send the packet. It adds Tx queue's wait
654 * This function gets called when SDMA descriptors becomes available and Tx
790 struct ipoib_txreq *tx = in hfi1_ipoib_drain_tx_list() local
794 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_list()
795 dev_kfree_skb_any(tx->skb); in hfi1_ipoib_drain_tx_list()
796 kmem_cache_free(txq->priv->txreq_cache, tx); in hfi1_ipoib_drain_tx_list()