Lines Matching defs:tx
41 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
42 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
45 dev_kfree_skb_any(tx->skb);
46 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
50 struct vnic_txreq *tx)
56 &tx->txreq,
57 tx->skb->data,
58 skb_headlen(tx->skb));
62 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
63 skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
67 &tx->txreq,
76 if (tx->plen)
77 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
78 tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
79 tx->plen);
86 struct vnic_txreq *tx,
93 &tx->txreq,
95 hdrbytes + tx->skb->len + tx->plen,
105 tx->pbc_val = cpu_to_le64(pbc);
108 &tx->txreq,
109 &tx->pbc_val,
115 ret = build_vnic_ulp_payload(sde, tx);
132 struct vnic_txreq *tx;
141 tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
142 if (unlikely(!tx)) {
147 tx->sdma = vnic_sdma;
148 tx->skb = skb;
149 hfi1_vnic_update_pad(tx->pad, plen);
150 tx->plen = plen;
151 ret = build_vnic_tx_desc(sde, tx, pbc);
156 &tx->txreq, vnic_sdma->pkts_sent);
168 sdma_txclean(dd, &tx->txreq);
169 kmem_cache_free(dd->vnic.txreq_cache, tx);