Lines Matching defs:lnk

47 static int smc_ib_modify_qp_init(struct smc_link *lnk)
54 qp_attr.port_num = lnk->ibport;
57 return ib_modify_qp(lnk->roce_qp, &qp_attr,
62 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
72 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
74 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
75 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
77 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0);
78 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
79 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
80 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac,
81 sizeof(lnk->lgr->nexthop_mac));
83 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
84 sizeof(lnk->peer_mac));
85 qp_attr.dest_qp_num = lnk->peer_qpn;
86 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
92 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
95 int smc_ib_modify_qp_rts(struct smc_link *lnk)
104 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
108 return ib_modify_qp(lnk->roce_qp, &qp_attr,
114 int smc_ib_modify_qp_error(struct smc_link *lnk)
120 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
123 int smc_ib_ready_link(struct smc_link *lnk)
125 struct smc_link_group *lgr = smc_get_lgr(lnk);
128 rc = smc_ib_modify_qp_init(lnk);
132 rc = smc_ib_modify_qp_rtr(lnk);
135 smc_wr_remember_qp_attr(lnk);
136 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
140 rc = smc_wr_rx_post_init(lnk);
143 smc_wr_remember_qp_attr(lnk);
146 rc = smc_ib_modify_qp_rts(lnk);
149 smc_wr_remember_qp_attr(lnk);
346 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
347 lgr->lnk[i].smcibdev != smcibdev)
349 if (!smc_ib_check_link_gid(lgr->lnk[i].gid,
451 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
453 if (lnk->roce_pd)
454 ib_dealloc_pd(lnk->roce_pd);
455 lnk->roce_pd = NULL;
458 int smc_ib_create_protection_domain(struct smc_link *lnk)
462 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
463 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
464 if (IS_ERR(lnk->roce_pd))
465 lnk->roce_pd = NULL;
481 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
482 lgr->lnk[i].smcibdev != smcibdev)
636 struct smc_link *lnk = (struct smc_link *)priv;
637 struct smc_ib_device *smcibdev = lnk->smcibdev;
655 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
657 if (lnk->roce_qp)
658 ib_destroy_qp(lnk->roce_qp);
659 lnk->roce_qp = NULL;
663 int smc_ib_create_queue_pair(struct smc_link *lnk)
667 .qp_context = lnk,
668 .send_cq = lnk->smcibdev->roce_cq_send,
669 .recv_cq = lnk->smcibdev->roce_cq_recv,
678 .max_recv_sge = lnk->wr_rx_sge_cnt,
686 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
687 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
688 if (IS_ERR(lnk->roce_qp))
689 lnk->roce_qp = NULL;
691 smc_wr_remember_qp_attr(lnk);
738 bool smc_ib_is_sg_need_sync(struct smc_link *lnk,
746 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
747 buf_slot->sgt[lnk->link_idx].nents, i) {
750 if (dma_need_sync(lnk->smcibdev->ibdev->dma_device,
762 void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
769 if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
773 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
774 buf_slot->sgt[lnk->link_idx].nents, i) {
777 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
785 void smc_ib_sync_sg_for_device(struct smc_link *lnk,
792 if (!(buf_slot->is_dma_need_sync & (1U << lnk->link_idx)))
796 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
797 buf_slot->sgt[lnk->link_idx].nents, i) {
800 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
808 int smc_ib_buf_map_sg(struct smc_link *lnk,
814 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
815 buf_slot->sgt[lnk->link_idx].sgl,
816 buf_slot->sgt[lnk->link_idx].orig_nents,
824 void smc_ib_buf_unmap_sg(struct smc_link *lnk,
828 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
831 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
832 buf_slot->sgt[lnk->link_idx].sgl,
833 buf_slot->sgt[lnk->link_idx].orig_nents,
835 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;