Lines Matching defs:wqe

378 			struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
386 priv->tid_req.e.swqe = wqe;
387 wqe->priv = priv;
416 struct rvt_swqe *wqe;
421 wqe = rvt_get_swqe_ptr(qp, i);
422 kfree(wqe->priv);
423 wqe->priv = NULL;
1619 * @wqe: the send wqe
1621 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
1623 struct hfi1_swqe_priv *p = wqe->priv;
1703 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
1707 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1711 struct hfi1_swqe_priv *wpriv = wqe->priv;
1748 rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
1750 rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey);
1787 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
1793 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1800 trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
1801 wqe->lpsn, req);
1829 restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1879 hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len);
2418 struct rvt_swqe *wqe;
2426 wqe = rvt_get_swqe_ptr(qp, i);
2427 if (cmp_psn(psn, wqe->psn) >= 0 &&
2428 cmp_psn(psn, wqe->lpsn) <= 0) {
2429 if (wqe->wr.opcode == opcode)
2430 req = wqe_to_tid_req(wqe);
2566 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
2567 * == qp->s_head), it would be unsafe to complete the wqe pointed by
2582 struct rvt_swqe *wqe;
2589 wqe = rvt_get_swqe_ptr(qp, n);
2590 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2591 req = wqe_to_tid_req(wqe);
2630 struct rvt_qp *qp, struct rvt_swqe *wqe)
2637 req = wqe_to_tid_req(wqe);
2663 struct rvt_swqe *wqe;
2689 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2693 while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) {
2699 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2700 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2701 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2702 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2706 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2708 wqe);
2728 wqe = do_rc_completion(qp, wqe, ibp);
2737 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2740 req = wqe_to_tid_req(wqe);
2741 trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn,
2742 wqe->lpsn, req);
2808 wqe);
3046 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3049 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3056 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3063 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
3064 wqe->psn, wqe->lpsn,
3074 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3106 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3125 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3132 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
3133 wqe->lpsn, req);
3135 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3152 wqe = rvt_get_swqe_ptr(qp, i);
3153 } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
3154 req = wqe_to_tid_req(wqe);
3181 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
3186 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
3189 struct hfi1_swqe_priv *priv = wqe->priv;
3210 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
3220 switch (wqe->wr.opcode) {
3282 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
3285 struct hfi1_swqe_priv *priv = wqe->priv;
3306 if (wqe->wr.opcode == IB_WR_RDMA_READ) {
3307 if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
3308 wqe->wr.num_sge)) {
3312 } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
3319 if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
3320 !(wqe->length & ~PAGE_MASK)) {
3329 wqe->wr.opcode = new_opcode;
3331 min_t(u32, remote->max_len, wqe->length);
3333 DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len);
3335 wqe->lpsn = wqe->psn;
3336 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3339 wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
3341 wqe->lpsn += priv->tid_req.total_segs - 1;
3356 trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
3357 wqe->psn, wqe->lpsn,
3366 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3371 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3386 cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len));
3388 cpu_to_be32(wqe->rdma_wr.rkey);
4044 struct rvt_swqe *wqe;
4081 wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
4082 if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE))
4085 req = wqe_to_tid_req(wqe);
4120 (wqe->length - (req->comp_seg * req->seg_len)));
4155 trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
4156 wqe->lpsn, req);
4161 if (!cmp_psn(psn, wqe->psn)) {
4162 req->r_last_acked = mask_psn(wqe->psn - 1);
4183 wqe = rvt_get_swqe_ptr(qp, i);
4186 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
4205 bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
4209 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4221 hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
4502 struct rvt_swqe *wqe;
4537 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4539 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4542 req = wqe_to_tid_req(wqe);
4543 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4544 wqe->lpsn, req);
4560 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4561 wqe->lpsn, req);
4564 wqe = do_rc_completion(qp, wqe,
4571 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4573 req = wqe_to_tid_req(wqe);
4579 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4580 wqe->lpsn, req);
4587 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
4634 if (delta_psn(ack_psn, wqe->lpsn))
4635 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4636 req = wqe_to_tid_req(wqe);
4704 wqe = rvt_get_swqe_ptr(qp, last_acked);
4705 rptr = wqe_to_tid_req(wqe);
4803 struct rvt_swqe *wqe;
4818 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4819 hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
4822 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4823 req = wqe_to_tid_req(wqe);
4825 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4844 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
4850 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4989 struct rvt_swqe *wqe;
5001 wqe = rvt_get_swqe_ptr(qp, i);
5002 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
5013 struct rvt_swqe *wqe;
5069 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5070 req = wqe_to_tid_req(wqe);
5071 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
5072 wqe->lpsn, req);
5076 priv->tid_ss.sge = wqe->sg_list[0];
5077 priv->tid_ss.sg_list = wqe->sg_list + 1;
5078 priv->tid_ss.num_sge = wqe->wr.num_sge;
5079 priv->tid_ss.total_len = wqe->length;
5082 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
5102 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5103 req = wqe_to_tid_req(wqe);
5104 len = wqe->length;
5109 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
5110 wqe->psn, wqe->lpsn, req);
5111 last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2,
5137 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
5138 req = wqe_to_tid_req(wqe);
5141 wqe = rvt_get_swqe_ptr(qp,
5144 req = wqe_to_tid_req(wqe);
5146 hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,