Lines Matching full:tx
231 * The tx request, once initialized, is manipulated with calls to
240 * in the tx. Memory locations added with sdma_txadd_page()
242 * to the tx and nmapped as part of the progress processing in the
246 * tx. An example of a use case would be a pre-allocated
253 * a tx to the ring after the appropriate number of
260 * long as the tx isn't in flight.
481 struct sdma_txreq *tx,
489 * @tx: tx request to initialize
520 * being submitted. The callback will be provided this tx, a status, and a flag.
540 struct sdma_txreq *tx, in sdma_txinit_ahg() argument
553 tx->desc_limit = ARRAY_SIZE(tx->descs); in sdma_txinit_ahg()
554 tx->descp = &tx->descs[0]; in sdma_txinit_ahg()
555 INIT_LIST_HEAD(&tx->list); in sdma_txinit_ahg()
556 tx->num_desc = 0; in sdma_txinit_ahg()
557 tx->flags = flags; in sdma_txinit_ahg()
558 tx->complete = cb; in sdma_txinit_ahg()
559 tx->coalesce_buf = NULL; in sdma_txinit_ahg()
560 tx->wait = NULL; in sdma_txinit_ahg()
561 tx->packet_len = tlen; in sdma_txinit_ahg()
562 tx->tlen = tx->packet_len; in sdma_txinit_ahg()
563 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; in sdma_txinit_ahg()
564 tx->descs[0].qw[1] = 0; in sdma_txinit_ahg()
566 tx->descs[0].qw[1] |= in sdma_txinit_ahg()
572 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen); in sdma_txinit_ahg()
578 * @tx: tx request to initialize
602 * The callback, if non-NULL, will be provided this tx and a status. The
608 struct sdma_txreq *tx, in sdma_txinit() argument
613 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb); in sdma_txinit()
636 struct sdma_txreq *tx, in make_tx_sdma_desc() argument
641 struct sdma_desc *desc = &tx->descp[tx->num_desc]; in make_tx_sdma_desc()
643 if (!tx->num_desc) { in make_tx_sdma_desc()
659 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
665 static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) in sdma_txclean() argument
667 if (tx->num_desc) in sdma_txclean()
668 __sdma_txclean(dd, tx); in sdma_txclean()
673 struct sdma_txreq *tx) in _sdma_close_tx() argument
675 tx->descp[tx->num_desc].qw[0] |= in _sdma_close_tx()
677 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
679 if (tx->flags & SDMA_TXREQ_F_URGENT) in _sdma_close_tx()
680 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
688 struct sdma_txreq *tx, in _sdma_txadd_daddr() argument
695 tx, in _sdma_txadd_daddr()
698 WARN_ON(len > tx->tlen); in _sdma_txadd_daddr()
699 tx->tlen -= len; in _sdma_txadd_daddr()
701 if (!tx->tlen) { in _sdma_txadd_daddr()
702 if (tx->packet_len & (sizeof(u32) - 1)) { in _sdma_txadd_daddr()
703 rval = _pad_sdma_tx_descs(dd, tx); in _sdma_txadd_daddr()
707 _sdma_close_tx(dd, tx); in _sdma_txadd_daddr()
710 tx->num_desc++; in _sdma_txadd_daddr()
717 * @tx: tx request to which the page is added
732 struct sdma_txreq *tx, in sdma_txadd_page() argument
740 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_page()
741 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE, in sdma_txadd_page()
755 __sdma_txclean(dd, tx); in sdma_txadd_page()
760 dd, SDMA_MAP_PAGE, tx, addr, len); in sdma_txadd_page()
766 * @tx: sdma_txreq to which the page is added
781 struct sdma_txreq *tx, in sdma_txadd_daddr() argument
787 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_daddr()
788 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE, in sdma_txadd_daddr()
794 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len); in sdma_txadd_daddr()
800 * @tx: sdma_txreq to which the page is added
815 struct sdma_txreq *tx, in sdma_txadd_kvaddr() argument
822 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_kvaddr()
823 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE, in sdma_txadd_kvaddr()
836 __sdma_txclean(dd, tx); in sdma_txadd_kvaddr()
841 dd, SDMA_MAP_SINGLE, tx, addr, len); in sdma_txadd_kvaddr()
848 struct sdma_txreq *tx,
888 * @tx: txreq for which we need to check descriptor availability
899 struct sdma_txreq *tx) in sdma_progress() argument
903 if (tx->num_desc > sde->desc_avail) in sdma_progress()