/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_req.c | 13 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 17 struct rxe_send_wqe *wqe, int npsn) in retry_first_write_send() argument 22 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 23 qp->mtu : wqe->dma.resid; in retry_first_write_send() 25 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 26 wqe->wr.opcode); in retry_first_write_send() 28 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 29 wqe->dma.resid -= to_send; in retry_first_write_send() 30 wqe->dma.sge_offset += to_send; in retry_first_write_send() 32 advance_dma_data(&wqe in retry_first_write_send() 39 struct rxe_send_wqe *wqe; req_retry() local 120 struct rxe_send_wqe *wqe; req_check_sq_drain_done() local 176 struct rxe_send_wqe *wqe; req_next_wqe() local 205 rxe_wqe_is_fenced(struct rxe_qp * qp,struct rxe_send_wqe * wqe) rxe_wqe_is_fenced() argument 353 next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode) next_opcode() argument 383 check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe) check_init_depth() argument 415 init_req_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,int opcode,u32 payload,struct rxe_pkt_info * pkt) init_req_packet() argument 499 finish_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,u32 payload) finish_packet() argument 542 update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt) update_wqe_state() argument 556 update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,u32 payload) update_wqe_psn() argument 593 rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe) rxe_do_local_ops() argument 644 struct rxe_send_wqe *wqe; rxe_requester() local [all...] |
H A D | rxe_mw.c | 50 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_check_bind_mw() argument 83 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) { in rxe_check_bind_mw() 117 if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) { in rxe_check_bind_mw() 123 if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) || in rxe_check_bind_mw() 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw() 135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, in rxe_do_bind_mw() argument 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw() 143 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw() 144 mw->length = wqe in rxe_do_bind_mw() 164 rxe_bind_mw(struct rxe_qp * qp,struct rxe_send_wqe * wqe) rxe_bind_mw() argument [all...] |
/linux/drivers/infiniband/sw/rdmavt/ |
H A D | trace_tx.h | 45 "[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u" 49 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge), 50 TP_ARGS(qp, wqe, wr_num_sge), 54 __field(struct rvt_swqe *, wqe) 73 __entry->wqe = wqe; 74 __entry->wr_id = wqe->wr.wr_id; 77 __entry->psn = wqe->psn; 78 __entry->lpsn = wqe->lpsn; 79 __entry->length = wqe [all...] |
H A D | qp.c | 591 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last); in rvt_clear_mr_refs() local 593 rvt_put_qp_swqe(qp, wqe); in rvt_clear_mr_refs() 616 * @wqe: the send wqe 621 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey) in rvt_swqe_has_lkey() argument 625 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey() 626 struct rvt_sge *sge = &wqe->sg_list[i]; in rvt_swqe_has_lkey() 644 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last); in rvt_qp_sends_has_lkey() local 646 if (rvt_swqe_has_lkey(wqe, lkey)) in rvt_qp_sends_has_lkey() 979 struct rvt_swqe *wqe; in free_ud_wq_attr() local 999 struct rvt_swqe *wqe; alloc_ud_wq_attr() local 1796 struct rvt_rwqe *wqe; rvt_post_recv() local 1963 struct rvt_swqe *wqe; rvt_post_one_wr() local 2211 struct rvt_rwqe *wqe; rvt_post_srq_recv() local 2266 init_sge(struct rvt_qp * qp,struct rvt_rwqe * wqe) init_sge() argument 2349 struct rvt_rwqe *wqe; rvt_get_rwqe() local 2777 rvt_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status) rvt_send_complete() argument 2906 struct rvt_swqe *wqe; rvt_ruc_loopback() local [all...] |
H A D | rc.c | 155 * rvt_restart_sge - rewind the sge state for a wqe 157 * @wqe: the wqe to rewind 158 * @len: the data length from the start of the wqe in bytes 162 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len) in rvt_restart_sge() argument 164 ss->sge = wqe->sg_list[0]; in rvt_restart_sge() 165 ss->sg_list = wqe->sg_list + 1; in rvt_restart_sge() 166 ss->num_sge = wqe->wr.num_sge; in rvt_restart_sge() 167 ss->total_len = wqe->length; in rvt_restart_sge() 169 return wqe in rvt_restart_sge() [all...] |
/linux/drivers/infiniband/sw/siw/ |
H A D | siw_qp_tx.c | 51 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_try_1seg() local 52 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 55 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) in siw_try_1seg() 61 if (tx_flags(wqe) & SIW_WQE_INLINE) { in siw_try_1seg() 62 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 64 struct siw_mem *mem = wqe->mem[0]; in siw_try_1seg() 119 struct siw_wqe *wqe = &c_tx->wqe_active; in siw_qp_prepare_tx() local 123 switch (tx_type(wqe)) { in siw_qp_prepare_tx() 135 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); in siw_qp_prepare_tx() 137 cpu_to_be64(wqe in siw_qp_prepare_tx() 440 struct siw_wqe *wqe = &c_tx->wqe_active; siw_tx_hdt() local 706 siw_prepare_fpdu(struct siw_qp * qp,struct siw_wqe * wqe) siw_prepare_fpdu() argument 763 siw_check_sgl_tx(struct ib_pd * pd,struct siw_wqe * wqe,enum ib_access_flags perms) siw_check_sgl_tx() argument 793 siw_qp_sq_proc_tx(struct siw_qp * qp,struct siw_wqe * wqe) siw_qp_sq_proc_tx() argument 978 siw_qp_sq_proc_local(struct siw_qp * qp,struct siw_wqe * wqe) siw_qp_sq_proc_local() argument 1025 struct siw_wqe *wqe = tx_wqe(qp); siw_qp_sq_process() local [all...] |
H A D | siw_qp.c | 238 struct siw_wqe *wqe = tx_wqe(qp); in siw_qp_mpa_rts() local 244 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts() 248 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts() 250 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts() 251 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 252 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 253 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts() 254 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts() 255 wqe in siw_qp_mpa_rts() 438 struct siw_wqe *wqe = tx_wqe(qp); siw_send_terminate() local 855 struct siw_wqe *wqe = tx_wqe(qp); siw_activate_tx_from_sq() local 951 struct siw_wqe *wqe = tx_wqe(qp); siw_activate_tx() local 1162 struct siw_wqe *wqe = tx_wqe(qp); siw_sq_flush() local 1237 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; siw_rq_flush() local [all...] |
H A D | siw_qp_rx.c | 169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh() local 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh() 207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh() 281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh() local 301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh() 303 qp_id(rx_qp(srx)), ddp_mo, wqe in siw_send_check_ntoh() 334 struct siw_wqe *wqe = NULL; siw_rqe_get() local 439 struct siw_wqe *wqe; siw_proc_send() local 565 struct siw_wqe *wqe = rx_wqe(frx); siw_proc_write() local 742 struct siw_wqe *wqe = NULL; siw_orqe_start_rx() local 786 struct siw_wqe *wqe = rx_wqe(frx); siw_proc_rresp() local 1201 struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); siw_rdmap_complete() local [all...] |
/linux/drivers/infiniband/hw/irdma/ |
H A D | uda.c | 23 __le64 *wqe; in irdma_sc_access_ah() local 26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_access_ah() 27 if (!wqe) in irdma_sc_access_ah() 30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); in irdma_sc_access_ah() 41 set_64bit_val(wqe, 40, in irdma_sc_access_ah() 44 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 48 set_64bit_val(wqe, 56, in irdma_sc_access_ah() 51 set_64bit_val(wqe, 48, in irdma_sc_access_ah() 55 set_64bit_val(wqe, 32, in irdma_sc_access_ah() 58 set_64bit_val(wqe, 4 in irdma_sc_access_ah() 119 __le64 *wqe; irdma_access_mcast_grp() local [all...] |
H A D | uk.c | 9 * irdma_set_fragment - set fragment in wqe 10 * @wqe: wqe for setting fragment 13 * @valid: The wqe valid 15 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, in irdma_set_fragment() argument 19 set_64bit_val(wqe, offset, in irdma_set_fragment() 21 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 26 set_64bit_val(wqe, offset, 0); in irdma_set_fragment() 27 set_64bit_val(wqe, offset + 8, in irdma_set_fragment() 33 * irdma_set_fragment_gen_1 - set fragment in wqe 39 irdma_set_fragment_gen_1(__le64 * wqe,u32 offset,struct ib_sge * sge,u8 valid) irdma_set_fragment_gen_1() argument 61 __le64 *wqe; irdma_nop_1() local 158 __le64 *wqe; irdma_qp_get_next_send_wqe() local 208 __le64 *wqe; irdma_qp_get_next_recv_wqe() local 236 __le64 *wqe; irdma_uk_rdma_write() local 335 __le64 *wqe; irdma_uk_rdma_read() local 410 __le64 *wqe; irdma_uk_send() local 501 irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,struct irdma_bind_window * op_info) irdma_set_mw_bind_wqe_gen_1() argument 518 irdma_copy_inline_data_gen_1(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity) irdma_copy_inline_data_gen_1() argument 563 irdma_set_mw_bind_wqe(__le64 * wqe,struct irdma_bind_window * op_info) irdma_set_mw_bind_wqe() argument 580 irdma_copy_inline_data(u8 * wqe,struct ib_sge * sge_list,u32 num_sges,u8 polarity) irdma_copy_inline_data() argument 657 __le64 *wqe; irdma_uk_inline_rdma_write() local 725 __le64 *wqe; irdma_uk_inline_send() local 798 __le64 *wqe; irdma_uk_stag_local_invalidate() local 846 __le64 *wqe; irdma_uk_post_receive() local 1533 __le64 *wqe; irdma_nop() local [all...] |
H A D | ctrl.c | 176 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry 186 __le64 *wqe; in irdma_sc_add_arp_cache_entry() local 189 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_add_arp_cache_entry() 190 if (!wqe) in irdma_sc_add_arp_cache_entry() 192 set_64bit_val(wqe, 8, info->reach_max); in irdma_sc_add_arp_cache_entry() 193 set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr)); in irdma_sc_add_arp_cache_entry() 202 set_64bit_val(wqe, 24, hdr); in irdma_sc_add_arp_cache_entry() 205 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); in irdma_sc_add_arp_cache_entry() 222 __le64 *wqe; in irdma_sc_del_arp_cache_entry() local 225 wqe in irdma_sc_del_arp_cache_entry() 256 __le64 *wqe; irdma_sc_manage_apbvt_entry() local 304 __le64 *wqe; irdma_sc_manage_qhash_table_entry() local 452 __le64 *wqe; irdma_sc_qp_create() local 502 __le64 *wqe; irdma_sc_qp_modify() local 572 __le64 *wqe; irdma_sc_qp_destroy() local 764 __le64 *wqe; irdma_sc_alloc_local_mac_entry() local 799 __le64 *wqe; irdma_sc_add_local_mac_entry() local 837 __le64 *wqe; irdma_sc_del_local_mac_entry() local 1059 __le64 *wqe; irdma_sc_alloc_stag() local 1123 __le64 *wqe; irdma_sc_mr_reg_non_shared() local 1215 __le64 *wqe; irdma_sc_dealloc_stag() local 1256 __le64 *wqe; irdma_sc_mw_alloc() local 1296 __le64 *wqe; irdma_sc_mr_fast_register() local 1366 __le64 *wqe; irdma_sc_gen_rts_ae() local 1403 __le64 *wqe; irdma_sc_send_lsmm() local 1445 __le64 *wqe; irdma_sc_send_rtt() local 2051 __le64 *wqe; irdma_sc_gather_stats() local 2098 __le64 *wqe; irdma_sc_manage_stats_inst() local 2134 __le64 *wqe; irdma_sc_set_up_map() local 2177 __le64 *wqe; irdma_sc_manage_ws_node() local 2220 __le64 *wqe; irdma_sc_qp_flush_wqes() local 2294 __le64 *wqe; irdma_sc_gen_ae() local 2333 __le64 *wqe; irdma_sc_qp_upload_context() local 2373 __le64 *wqe; irdma_sc_manage_push_page() local 2412 __le64 *wqe; irdma_sc_suspend_qp() local 2442 __le64 *wqe; irdma_sc_resume_qp() local 2519 __le64 *wqe; irdma_sc_cq_create() local 2592 __le64 *wqe; irdma_sc_cq_destroy() local 2660 __le64 *wqe; irdma_sc_cq_modify() local 3263 __le64 *wqe = NULL; irdma_sc_cqp_get_next_send_wqe_idx() local 3460 __le64 *wqe; irdma_sc_manage_hmc_pm_func_table() local 3518 __le64 *wqe; irdma_sc_commit_fpm_val() local 3579 __le64 *wqe; irdma_sc_query_fpm_val() local 3669 __le64 *wqe; irdma_sc_ceq_create() local 3767 __le64 *wqe; irdma_sc_ceq_destroy() local 3932 __le64 *wqe; irdma_sc_aeq_create() local 3971 __le64 *wqe; irdma_sc_aeq_destroy() local 4261 __le64 *wqe; irdma_sc_ccq_destroy() local 4418 __le64 *wqe; cqp_sds_wqe_fill() local 4541 __le64 *wqe; irdma_sc_static_hmc_pages_allocated() local 4637 __le64 *wqe; irdma_sc_query_rdma_features() local [all...] |
/linux/drivers/infiniband/hw/hfi1/ |
H A D | rc.c | 394 struct rvt_swqe *wqe; in hfi1_make_rc_req() local 449 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() 469 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_rc_req() 495 if ((wqe->wr.send_flags & IB_SEND_FENCE) && in hfi1_make_rc_req() 497 (wqe->wr.opcode != IB_WR_TID_RDMA_READ || in hfi1_make_rc_req() 506 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_rc_req() 507 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_rc_req() 517 if (!(wqe->wr.send_flags & in hfi1_make_rc_req() 521 wqe in hfi1_make_rc_req() 1418 update_num_rd_atomic(struct rvt_qp * qp,u32 psn,struct rvt_swqe * wqe) update_num_rd_atomic() argument 1461 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); reset_psn() local 1567 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); hfi1_restart_rc() local 1637 struct rvt_swqe *wqe; reset_sending_psn() local 1697 struct rvt_swqe *wqe; hfi1_rc_send_complete() local 1825 do_rc_completion(struct rvt_qp * qp,struct rvt_swqe * wqe,struct hfi1_ibport * ibp) do_rc_completion() argument 1970 struct rvt_swqe *wqe; do_rc_ack() local 2266 struct rvt_swqe *wqe; rdma_seq_err() local 2310 struct rvt_swqe *wqe; rc_rcv_resp() local [all...] |
H A D | uc.c | 26 struct rvt_swqe *wqe; in hfi1_make_uc_req() local 49 wqe = rvt_get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() 50 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req() 72 wqe = rvt_get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req() 88 if (wqe->wr.opcode == IB_WR_REG_MR || in hfi1_make_uc_req() 89 wqe->wr.opcode == IB_WR_LOCAL_INV) { in hfi1_make_uc_req() 97 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) { in hfi1_make_uc_req() 99 qp, wqe->wr.ex.invalidate_rkey); in hfi1_make_uc_req() 102 rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR in hfi1_make_uc_req() 111 qp->s_psn = wqe in hfi1_make_uc_req() [all...] |
H A D | tid_rdma.h | 214 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe); 219 * @wqe: the send wqe 221 static inline void trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in trdma_clean_swqe() argument 223 if (!wqe->priv) in trdma_clean_swqe() 225 __trdma_clean_swqe(qp, wqe); in trdma_clean_swqe() 244 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, 247 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 258 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe, 261 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe); 265 hfi1_setup_tid_rdma_wqe(struct rvt_qp * qp,struct rvt_swqe * wqe) hfi1_setup_tid_rdma_wqe() argument [all...] |
H A D | tid_rdma.c | 378 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_init() local 386 priv->tid_req.e.swqe = wqe; in hfi1_qp_priv_init() 387 wqe->priv = priv; in hfi1_qp_priv_init() 416 struct rvt_swqe *wqe; in hfi1_qp_priv_tid_free() local 421 wqe = rvt_get_swqe_ptr(qp, i); in hfi1_qp_priv_tid_free() 422 kfree(wqe->priv); in hfi1_qp_priv_tid_free() 423 wqe->priv = NULL; in hfi1_qp_priv_tid_free() 1619 * @wqe: the send wqe 1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe) in __trdma_clean_swqe() argument 1703 hfi1_build_tid_rdma_read_packet(struct rvt_swqe * wqe,struct ib_other_headers * ohdr,u32 * bth1,u32 * bth2,u32 * len) hfi1_build_tid_rdma_read_packet() argument 1787 hfi1_build_tid_rdma_read_req(struct rvt_qp * qp,struct rvt_swqe * wqe,struct ib_other_headers * ohdr,u32 * bth1,u32 * bth2,u32 * len) hfi1_build_tid_rdma_read_req() argument 2418 struct rvt_swqe *wqe; find_tid_request() local 2582 struct rvt_swqe *wqe; hfi1_kern_read_tid_flow_free() local 2630 restart_tid_rdma_read_req(struct hfi1_ctxtdata * rcd,struct rvt_qp * qp,struct rvt_swqe * wqe) restart_tid_rdma_read_req() argument 2663 struct rvt_swqe *wqe; handle_read_kdeth_eflags() local 3046 hfi1_tid_rdma_restart_req(struct rvt_qp * qp,struct rvt_swqe * wqe,u32 * bth2) hfi1_tid_rdma_restart_req() argument 3181 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i); hfi1_qp_kern_exp_rcv_clear_all() local 3210 hfi1_tid_rdma_wqe_interlock(struct rvt_qp * qp,struct rvt_swqe * wqe) hfi1_tid_rdma_wqe_interlock() argument 3282 setup_tid_rdma_wqe(struct rvt_qp * qp,struct rvt_swqe * wqe) setup_tid_rdma_wqe() argument 3366 hfi1_build_tid_rdma_write_req(struct rvt_qp * qp,struct rvt_swqe * wqe,struct ib_other_headers * ohdr,u32 * bth1,u32 * bth2,u32 * len) hfi1_build_tid_rdma_write_req() argument 4044 struct rvt_swqe *wqe; hfi1_rc_rcv_tid_rdma_write_resp() local 4205 hfi1_build_tid_rdma_packet(struct rvt_swqe * wqe,struct ib_other_headers * ohdr,u32 * bth1,u32 * bth2,u32 * len) hfi1_build_tid_rdma_packet() argument 4502 struct rvt_swqe *wqe; hfi1_rc_rcv_tid_rdma_ack() local 4803 struct rvt_swqe *wqe; hfi1_tid_retry_timeout() local 4844 hfi1_build_tid_rdma_resync(struct rvt_qp * qp,struct rvt_swqe * wqe,struct ib_other_headers * ohdr,u32 * bth1,u32 * bth2,u16 fidx) hfi1_build_tid_rdma_resync() argument 4989 struct rvt_swqe *wqe; update_tid_tail() local 5013 struct rvt_swqe *wqe; hfi1_make_tid_rdma_pkt() local [all...] |
H A D | ud.c | 224 static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe, in hfi1_make_bth_deth() argument 232 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { in hfi1_make_bth_deth() 233 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; in hfi1_make_bth_deth() 239 if (wqe->wr.send_flags & IB_SEND_SOLICITED) in hfi1_make_bth_deth() 243 *pkey = hfi1_get_pkey(ibp, rvt_get_swqe_pkey_index(wqe)); in hfi1_make_bth_deth() 249 ohdr->bth[1] = cpu_to_be32(rvt_get_swqe_remote_qpn(wqe)); in hfi1_make_bth_deth() 250 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn)); in hfi1_make_bth_deth() 256 cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey : in hfi1_make_bth_deth() 257 rvt_get_swqe_remote_qkey(wqe)); in hfi1_make_bth_deth() 262 struct rvt_swqe *wqe) in hfi1_make_ud_req_9B() argument 333 hfi1_make_ud_req_16B(struct rvt_qp * qp,struct hfi1_pkt_state * ps,struct rvt_swqe * wqe) hfi1_make_ud_req_16B() argument 441 struct rvt_swqe *wqe; hfi1_make_ud_req() local [all...] |
H A D | trace_rc.h | 80 struct rvt_swqe *wqe), 81 TP_ARGS(qp, aeth, psn, wqe), 96 __entry->opcode = wqe->wr.opcode; 97 __entry->spsn = wqe->psn; 98 __entry->lpsn = wqe->lpsn; 115 struct rvt_swqe *wqe), 116 TP_ARGS(qp, aeth, psn, wqe)
|
/linux/drivers/infiniband/hw/mlx5/ |
H A D | umr.c | 238 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_post_send() argument 269 mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size); in mlx5r_umr_post_send() 285 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_recover() argument 304 err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe, in mlx5r_umr_recover() 354 struct mlx5r_umr_wqe *wqe, bool with_data) in mlx5r_umr_post_send_wait() argument 360 err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask)); in mlx5r_umr_post_send_wait() 381 err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe, in mlx5r_umr_post_send_wait() 402 err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data); in mlx5r_umr_post_send_wait() 424 struct mlx5r_umr_wqe wqe = {}; in mlx5r_umr_revoke_mr() local 429 wqe in mlx5r_umr_revoke_mr() 464 struct mlx5r_umr_wqe wqe = {}; mlx5r_umr_rereg_pd_access() local 637 mlx5r_umr_final_update_xlt(struct mlx5_ib_dev * dev,struct mlx5r_umr_wqe * wqe,struct mlx5_ib_mr * mr,struct ib_sge * sg,unsigned int flags) mlx5r_umr_final_update_xlt() argument 673 _mlx5r_umr_init_wqe(struct mlx5_ib_mr * mr,struct mlx5r_umr_wqe * wqe,struct ib_sge * sg,unsigned int flags,unsigned int page_shift,bool dd) _mlx5r_umr_init_wqe() argument 693 struct mlx5r_umr_wqe wqe = {}; _mlx5r_umr_update_mr_pas() local 852 struct mlx5r_umr_wqe wqe = {}; mlx5r_umr_update_xlt() local 939 struct mlx5r_umr_wqe wqe = {}; mlx5r_umr_update_mr_page_shift() local [all...] |
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 489 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 504 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 506 wqe->send.stag_inv = 0; in build_rdma_send() 510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 513 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 521 wqe->send.r3 = 0; in build_rdma_send() 522 wqe->send.r4 = 0; in build_rdma_send() 527 ret = build_immd(sq, wqe in build_rdma_send() 556 build_rdma_write(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16) build_rdma_write() argument 660 build_rdma_read(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16) build_rdma_read() argument 697 union t4_wr *wqe; post_write_cmpl() local 759 build_rdma_recv(struct c4iw_qp * qhp,union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16) build_rdma_recv() argument 774 build_srq_recv(union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16) build_srq_recv() argument 820 build_memreg(struct t4_sq * sq,union t4_wr * wqe,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16,bool dsgl_supported) build_memreg() argument 884 build_inv_stag(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16) build_inv_stag() argument 1086 union t4_wr *wqe = NULL; c4iw_post_send() local 1266 union t4_recv_wr *wqe = NULL; c4iw_post_receive() local 1341 defer_srq_wr(struct t4_srq * srq,union t4_recv_wr * wqe,u64 wr_id,u8 len16) defer_srq_wr() argument 1360 union t4_recv_wr *wqe, lwqe; c4iw_post_srq_recv() local 1563 struct fw_ri_wr *wqe; post_terminate() local 1703 struct fw_ri_wr *wqe; rdma_fini() local 1759 struct fw_ri_wr *wqe; rdma_init() local 2672 c4iw_copy_wr_to_srq(struct t4_srq * srq,union t4_recv_wr * wqe,u8 len16) c4iw_copy_wr_to_srq() argument [all...] |
/linux/drivers/scsi/lpfc/ |
H A D | lpfc_nvmet.c | 80 union lpfc_wqe128 *wqe; in lpfc_nvmet_cmd_template() local 83 wqe = &lpfc_tsend_cmd_template; in lpfc_nvmet_cmd_template() 84 memset(wqe, 0, sizeof(union lpfc_wqe128)); in lpfc_nvmet_cmd_template() 97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE); in lpfc_nvmet_cmd_template() 98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF); in lpfc_nvmet_cmd_template() 99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3); in lpfc_nvmet_cmd_template() 100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI); in lpfc_nvmet_cmd_template() 101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1); in lpfc_nvmet_cmd_template() 108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG); in lpfc_nvmet_cmd_template() 109 bf_set(wqe_dbde, &wqe in lpfc_nvmet_cmd_template() 1499 union lpfc_wqe128 *wqe; lpfc_nvmet_setup_io_context() local 2595 union lpfc_wqe128 *wqe; lpfc_nvmet_prep_ls_wqe() local 2721 union lpfc_wqe128 *wqe; lpfc_nvmet_prep_fcp_wqe() local [all...] |
H A D | lpfc_nvme.c | 399 union lpfc_wqe128 *wqe; in lpfc_nvme_gen_req() local 410 wqe = &genwqe->wqe; in lpfc_nvme_gen_req() 412 memset(wqe, 0, sizeof(union lpfc_wqe)); in lpfc_nvme_gen_req() 450 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; in lpfc_nvme_gen_req() 451 wqe->generic.bde.tus.f.bdeSize = first_len; in lpfc_nvme_gen_req() 452 wqe->generic.bde.addrLow = bpl[0].addrLow; in lpfc_nvme_gen_req() 453 wqe->generic.bde.addrHigh = bpl[0].addrHigh; in lpfc_nvme_gen_req() 456 wqe->gen_req.request_payload_len = first_len; in lpfc_nvme_gen_req() 461 bf_set(wqe_dfctl, &wqe in lpfc_nvme_gen_req() 711 struct lpfc_iocbq *wqe, *next_wqe; __lpfc_nvme_ls_abort() local 831 union lpfc_wqe128 *wqe; lpfc_nvme_adj_fcp_sgls() local 1216 union lpfc_wqe128 *wqe = &pwqeq->wqe; lpfc_nvme_prep_io_cmd() local 1342 union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; lpfc_nvme_prep_io_dma() local 2043 union lpfc_wqe128 *wqe; lpfc_get_nvme_buf() local [all...] |
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
H A D | ktls_txrx.c | 74 mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, in mlx5e_ktls_build_static_params() argument 80 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; in mlx5e_ktls_build_static_params() 81 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_static_params() 86 #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_static_params() 97 fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn); in mlx5e_ktls_build_static_params() 117 mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, in mlx5e_ktls_build_progress_params() argument 123 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_ktls_build_progress_params() 128 #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) in mlx5e_ktls_build_progress_params() 136 fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn); in mlx5e_ktls_build_progress_params()
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 92 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 158 void *wqe; in mthca_alloc_srq_buf() local 185 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 188 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 191 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 195 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 196 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 495 void *wqe; in mthca_tavor_post_srq_recv() local 504 wqe in mthca_tavor_post_srq_recv() 588 void *wqe; mthca_arbel_post_srq_recv() local [all...] |
H A D | mthca_qp.c | 1630 void *wqe; in mthca_tavor_post_send() local 1666 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send() 1668 qp->sq.last = wqe; in mthca_tavor_post_send() 1670 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send() 1671 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send() 1672 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send() 1680 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send() 1682 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send() 1690 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send() 1692 wqe in mthca_tavor_post_send() 1842 void *wqe; mthca_tavor_post_receive() local 1934 void *wqe; mthca_arbel_post_send() local 2172 void *wqe; mthca_arbel_post_receive() local [all...] |
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_trace.h | 49 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, 51 TP_ARGS(qpn, idx, wqe, len, id, type), 55 __array(u32, wqe, 68 __entry->wqe[i] = le32_to_cpu(((__le32 *)wqe)[i]); 71 TP_printk("%s 0x%lx wqe(0x%x/0x%llx): %s", 74 __print_array(__entry->wqe, __entry->len, 79 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u32 len, u64 id, 81 TP_ARGS(qpn, idx, wqe, len, id, type)); 83 TP_PROTO(unsigned long qpn, u32 idx, void *wqe, u3 [all...] |