| /src/sys/ofed/drivers/infiniband/ulp/ipoib/ ! |
| H A D | ipoib_ib.c | 276 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max) in ipoib_dma_map_tx() argument 278 struct mbuf *mb = tx_req->mb; in ipoib_dma_map_tx() 279 u64 *mapping = tx_req->mapping; in ipoib_dma_map_tx() 295 tx_req->mb = mb = m_defrag(mb, M_NOWAIT); in ipoib_dma_map_tx() 322 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) in ipoib_dma_unmap_tx() argument 324 struct mbuf *mb = tx_req->mb; in ipoib_dma_unmap_tx() 325 u64 *mapping = tx_req->mapping; in ipoib_dma_unmap_tx() 337 struct ipoib_tx_buf *tx_req; in ipoib_ib_handle_tx_wc() local 348 tx_req = &priv->tx_ring[wr_id]; in ipoib_ib_handle_tx_wc() 350 ipoib_dma_unmap_tx(priv->ca, tx_req); in ipoib_ib_handle_tx_wc() [all …]
|
| H A D | ipoib_cm.c | 598 struct ipoib_cm_tx_buf *tx_req, in post_send() argument 602 struct mbuf *mb = tx_req->mb; in post_send() 603 u64 *mapping = tx_req->mapping; in post_send() 620 struct ipoib_cm_tx_buf *tx_req; in ipoib_cm_send() local 648 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; in ipoib_cm_send() 649 tx_req->mb = mb; in ipoib_cm_send() 650 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req, in ipoib_cm_send() 653 if (tx_req->mb) in ipoib_cm_send() 654 m_freem(tx_req->mb); in ipoib_cm_send() 658 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) { in ipoib_cm_send() [all …]
|
| H A D | ipoib.h | 523 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req, int max); 524 void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
|
| /src/sys/ofed/drivers/infiniband/ulp/sdp/ ! |
| H A D | sdp_tx.c | 68 struct sdp_buf *tx_req; in sdp_post_send() local 123 tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)]; in sdp_post_send() 124 tx_req->mb = mb; in sdp_post_send() 133 tx_req->mapping[i] = addr; in sdp_post_send() 144 if (unlikely(tx_req->mb->m_flags & M_URG)) in sdp_post_send() 152 sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE); in sdp_post_send() 155 m_freem(tx_req->mb); in sdp_post_send() 170 struct sdp_buf *tx_req; in sdp_send_completion() local 181 tx_req = &tx_ring->buffer[mseq & (SDP_TX_SIZE - 1)]; in sdp_send_completion() 182 mb = tx_req->mb; in sdp_send_completion() [all …]
|
| /src/sys/contrib/dev/rtw89/ ! |
| H A D | usb.c | 324 struct rtw89_core_tx_request *tx_req) in rtw89_usb_tx_write_fwcmd() argument 326 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_usb_tx_write_fwcmd() 328 struct sk_buff *skb = tx_req->skb; in rtw89_usb_tx_write_fwcmd() 351 tx_req->skb = skb512; in rtw89_usb_tx_write_fwcmd() 366 struct rtw89_core_tx_request *tx_req) in rtw89_usb_ops_tx_write() argument 368 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_usb_ops_tx_write() 371 struct sk_buff *skb = tx_req->skb; in rtw89_usb_ops_tx_write() 376 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && in rtw89_usb_ops_tx_write() 378 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { in rtw89_usb_ops_tx_write() 380 desc_info->ch_dma, tx_req->tx_type); in rtw89_usb_ops_tx_write() [all …]
|
| H A D | core.c | 579 struct rtw89_core_tx_request *tx_req, in rtw89_core_tx_update_ampdu_info() argument 582 struct rtw89_sta_link *rtwsta_link = tx_req->rtwsta_link; in rtw89_core_tx_update_ampdu_info() 583 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_tx_update_ampdu_info() 585 struct sk_buff *skb = tx_req->skb; in rtw89_core_tx_update_ampdu_info() 622 struct rtw89_core_tx_request *tx_req) in rtw89_core_tx_update_sec_key() argument 629 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_tx_update_sec_key() 630 struct sk_buff *skb = tx_req->skb; in rtw89_core_tx_update_sec_key() 690 struct rtw89_core_tx_request *tx_req, in rtw89_core_get_mgmt_rate() argument 693 struct sk_buff *skb = tx_req->skb; in rtw89_core_get_mgmt_rate() 694 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; in rtw89_core_get_mgmt_rate() [all …]
|
| H A D | txrx.h | 723 rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) in rtw89_core_get_qsel_mgmt() argument 725 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_get_qsel_mgmt() 726 struct rtw89_vif_link *rtwvif_link = tx_req->rtwvif_link; in rtw89_core_get_qsel_mgmt()
|
| H A D | mac.h | 1637 struct rtw89_core_tx_request *tx_req) in rtw89_tx_rpt_init() argument 1644 tx_req->desc_info.report = true; in rtw89_tx_rpt_init() 1646 tx_req->desc_info.sn = atomic_inc_return(&tx_rpt->sn) & in rtw89_tx_rpt_init() 1648 tx_req->desc_info.tx_cnt_lmt_en = true; in rtw89_tx_rpt_init() 1649 tx_req->desc_info.tx_cnt_lmt = 8; in rtw89_tx_rpt_init()
|
| H A D | pci.c | 1494 struct rtw89_core_tx_request *tx_req) in rtw89_pci_txwd_submit() argument 1498 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_pci_txwd_submit() 1502 struct sk_buff *skb = tx_req->skb; in rtw89_pci_txwd_submit() 1559 struct rtw89_core_tx_request *tx_req) in rtw89_pci_fwcmd_submit() argument 1563 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_pci_fwcmd_submit() 1567 struct sk_buff *skb = tx_req->skb; in rtw89_pci_fwcmd_submit() 1598 struct rtw89_core_tx_request *tx_req) in rtw89_pci_txbd_submit() argument 1609 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); in rtw89_pci_txbd_submit() 1618 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); in rtw89_pci_txbd_submit() 1642 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, in rtw89_pci_tx_write() argument [all …]
|
| H A D | core.h | 3678 int (*tx_write)(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req); 6357 struct rtw89_core_tx_request *tx_req) in rtw89_hci_tx_write() argument 6359 return rtwdev->hci.ops->tx_write(rtwdev, tx_req); in rtw89_hci_tx_write()
|