Home
last modified time | relevance | path

Searched refs:send_cq (Results 1 – 25 of 57) sorted by relevance

123

/linux/drivers/infiniband/core/
H A Duverbs_std_types_qp.c95 struct ib_cq *send_cq = NULL; in UVERBS_HANDLER() local
166 /* send_cq is optional */ in UVERBS_HANDLER()
168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
170 if (IS_ERR(send_cq)) in UVERBS_HANDLER()
171 return PTR_ERR(send_cq); in UVERBS_HANDLER()
175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_HANDLER()
177 if (IS_ERR(send_cq)) in UVERBS_HANDLER()
178 return PTR_ERR(send_cq); in UVERBS_HANDLER()
234 attr.send_cq = send_cq; in UVERBS_HANDLER()
[all...]
H A Dverbs.c1190 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user()
1244 qp->send_cq = attr->send_cq; in create_qp()
1255 * TODO: The mlx4 internally overwrites send_cq and recv_cq. in create_qp()
1258 qp->send_cq = attr->send_cq; in create_qp()
1318 if (qp->send_cq) in ib_qp_usecnt_inc()
1319 atomic_inc(&qp->send_cq->usecnt); in ib_qp_usecnt_inc()
1337 if (qp->send_cq) in ib_qp_usecnt_dec()
1338 atomic_dec(&qp->send_cq in ib_qp_usecnt_dec()
[all...]
/linux/drivers/infiniband/hw/mana/
H A Dqp.c270 struct mana_ib_cq *send_cq = in mana_ib_create_qp_raw() local
271 container_of(attr->send_cq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_raw()
340 cq_spec.gdma_region = send_cq->queue.gdma_region; in mana_ib_create_qp_raw()
341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; in mana_ib_create_qp_raw()
343 eq_vec = send_cq->comp_vector; in mana_ib_create_qp_raw()
358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; in mana_ib_create_qp_raw()
361 send_cq->queue.id = cq_spec.queue_index; in mana_ib_create_qp_raw()
364 err = mana_ib_install_cq_cb(mdev, send_cq); in mana_ib_create_qp_raw()
370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id); in mana_ib_create_qp_raw()
373 resp.cqid = send_cq in mana_ib_create_qp_raw()
605 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); mana_add_qp_to_cqs() local
620 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); mana_remove_qp_from_cqs() local
[all...]
H A Dmain.c1023 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_ib_gd_create_rc_qp() local
1035 req.send_cq_handle = send_cq->cq_handle; in mana_ib_gd_create_rc_qp()
1082 struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq); in mana_ib_gd_create_ud_qp() local
1094 req.send_cq_handle = send_cq->cq_handle; in mana_ib_gd_create_ud_qp()
/linux/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init()
189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init()
197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init()
218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init()
244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init()
266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
H A Dipoib_ib.c440 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in poll_tx()
508 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); in ipoib_tx_poll()
520 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_tx_poll()
674 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP | in ipoib_send()
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp()
837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp()
838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp()
1164 struct mthca_cq *send_cq, in mthca_alloc_qp_common() argument
1293 struct mthca_cq *send_cq, in mthca_alloc_qp() argument
1321 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp()
1336 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) in mthca_lock_cqs() argument
1337 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mthca_lock_cqs()
1339 if (send_cq == recv_cq) { in mthca_lock_cqs()
1340 spin_lock_irq(&send_cq in mthca_lock_cqs()
1351 mthca_unlock_cqs(struct mthca_cq * send_cq,struct mthca_cq * recv_cq) mthca_unlock_cqs() argument
1368 mthca_alloc_sqp(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_cq * send_cq,struct mthca_cq * recv_cq,enum ib_sig_type send_policy,struct ib_qp_cap * cap,int qpn,u32 port,struct mthca_qp * qp,struct ib_udata * udata) mthca_alloc_sqp() argument
1448 struct mthca_cq *send_cq; mthca_free_qp() local
[all...]
H A Dmthca_dev.h535 struct mthca_cq *send_cq,
544 struct mthca_cq *send_cq,
/linux/drivers/infiniband/hw/mlx5/
H A Dqp.c88 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
827 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
829 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
1297 static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) in get_sq_ts_format() argument
1301 return get_ts_format(dev, send_cq, fr_supported(ts_cap), in get_sq_ts_format()
1305 static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, in get_qp_ts_format() argument
1314 send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : in get_qp_ts_format()
1591 to_mcq(init_attr->send_cq)); in create_raw_packet_qp()
1932 scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); in configure_requester_scat_cqe()
2082 struct mlx5_ib_cq *send_cq; create_dci() local
2243 struct mlx5_ib_cq *send_cq; create_user_qp() local
2444 struct mlx5_ib_cq *send_cq; create_kernel_qp() local
2567 mlx5_ib_lock_cqs(struct mlx5_ib_cq * send_cq,struct mlx5_ib_cq * recv_cq) mlx5_ib_lock_cqs() argument
2597 mlx5_ib_unlock_cqs(struct mlx5_ib_cq * send_cq,struct mlx5_ib_cq * recv_cq) mlx5_ib_unlock_cqs() argument
2627 get_cqs(enum ib_qp_type qp_type,struct ib_cq * ib_send_cq,struct ib_cq * ib_recv_cq,struct mlx5_ib_cq ** send_cq,struct mlx5_ib_cq ** recv_cq) get_cqs() argument
2663 struct mlx5_ib_cq *send_cq, *recv_cq; destroy_qp_common() local
4169 struct mlx5_ib_cq *send_cq, *recv_cq; __mlx5_ib_modify_qp() local
[all...]
H A Dgsi.c50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions()
141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi()
205 .send_cq = gsi->cq, in create_gsi_ud_qp()
/linux/include/rdma/
H A Drdmavt_qp.h806 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq()
916 * @send_cq - The cq for send
921 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() argument
923 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail()
925 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail()
927 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail()
932 * @send_cq - The cq for send
937 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() argument
939 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head()
941 return ibcq_to_rvtcq(send_cq) in ib_cq_head()
[all...]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/
H A Dsend.h120 struct mlx5hws_send_ring_cq send_cq; member
228 struct mlx5hws_send_ring_cq *send_cq = &queue->send_ring.send_cq; in mlx5hws_send_engine_empty() local
230 return ((send_sq->cur_post & send_sq->buf_mask) == send_cq->poll_wqe); in mlx5hws_send_engine_empty()
H A Dsend.c603 struct mlx5hws_send_ring_cq *cq = &send_ring->send_cq; in hws_send_engine_poll_cq()
998 hws_send_ring_close_cq(&queue->send_ring.send_cq); in hws_send_ring_close()
1008 err = hws_send_ring_open_cq(ctx->mdev, queue, numa_node, &ring->send_cq); in mlx5hws_send_ring_open()
1013 &ring->send_cq); in mlx5hws_send_ring_open()
1020 hws_send_ring_close_cq(&ring->send_cq); in mlx5hws_send_ring_open()
/linux/drivers/infiniband/hw/mlx4/
H A Dqp.c52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
762 if (init_attr->send_cq || init_attr->cap.max_send_wr) { in _mlx4_ib_create_qp_rss()
964 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_rq()
973 mcq = to_mcq(init_attr->send_cq); in create_rq()
977 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_rq()
1242 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common()
1251 mcq = to_mcq(init_attr->send_cq); in create_qp_common()
1255 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common()
1310 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struc argument
1325 mlx4_ib_unlock_cqs(struct mlx4_ib_cq * send_cq,struct mlx4_ib_cq * recv_cq) mlx4_ib_unlock_cqs() argument
1359 get_cqs(struct mlx4_ib_qp * qp,enum mlx4_ib_source_type src,struct mlx4_ib_cq ** send_cq,struct mlx4_ib_cq ** recv_cq) get_cqs() argument
1412 struct mlx4_ib_cq *send_cq, *recv_cq; destroy_qp_common() local
2129 struct mlx4_ib_cq *send_cq, *recv_cq; __mlx4_ib_modify_qp() local
[all...]
/linux/Documentation/translations/zh_CN/infiniband/
H A Dtag_matching.rst
/linux/net/sunrpc/xprtrdma/
H A Dverbs.c350 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
351 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
352 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
421 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
424 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
425 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
426 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create()
624 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
/linux/fs/smb/common/smbdirect/
H A Dsmbdirect_socket.h30 struct ib_cq *send_cq; member
/linux/fs/smb/server/
H A Dtransport_rdma.c105 struct ib_cq *send_cq; member
427 if (t->send_cq) in free_transport()
428 ib_free_cq(t->send_cq); in free_transport()
1864 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1867 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair()
1869 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair()
1870 t->send_cq = NULL; in smb_direct_create_qpair()
1889 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair()
1924 if (t->send_cq) { in smb_direct_create_qpair()
[all...]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_qp.c58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument
61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs()
359 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp()
1034 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
/linux/drivers/infiniband/ulp/srp/
H A Dib_srp.h157 struct ib_cq *send_cq; member
H A Dib_srp.c516 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
529 struct ib_cq *recv_cq, *send_cq; in srp_create_ch_ib() local
547 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
549 if (IS_ERR(send_cq)) { in srp_create_ch_ib()
550 ret = PTR_ERR(send_cq); in srp_create_ch_ib()
561 init_attr->send_cq = send_cq; in srp_create_ch_ib()
599 if (ch->send_cq) in srp_create_ch_ib()
600 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
604 ch->send_cq in srp_create_ch_ib()
[all...]
/linux/fs/smb/client/
H A Dsmbdirect.c1377 ib_free_cq(sc->ib.send_cq); in smbd_destroy()
1582 sc->ib.send_cq = in _smbd_get_connection()
1585 if (IS_ERR(sc->ib.send_cq)) { in _smbd_get_connection()
1586 sc->ib.send_cq = NULL; in _smbd_get_connection()
1608 qp_attr.send_cq = sc->ib.send_cq; in _smbd_get_connection()
1724 if (sc->ib.send_cq) in _smbd_get_connection()
1725 ib_free_cq(sc->ib.send_cq); in _smbd_get_connection()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c716 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send()
2717 free_mr->rsv_qp[i]->ibqp.send_cq = cq; in free_mr_init_qp()
2775 qp_init_attr.send_cq = cq; in free_mr_alloc_res()
4350 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_reset_to_init()
4378 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_init_to_init()
5298 if (ibqp->send_cq) in clear_qp()
5299 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in clear_qp()
5302 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) in clear_qp()
5610 qp_init_attr->send_cq = ibqp->send_cq; in hns_roce_v2_query_qp()
5634 struct hns_roce_cq *send_cq, *recv_cq; hns_roce_v2_destroy_qp_common() local
[all...]
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea.h351 struct ehea_cq *send_cq; member
/linux/Documentation/infiniband/
H A Dtag_matching.rst32 processed by the sender. A completion send is received in the send_cq

123