Lines Matching defs:lnk
71 static void smc_ibdev_cnt_inc(struct smc_link *lnk)
73 atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
76 static void smc_ibdev_cnt_dec(struct smc_link *lnk)
78 atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
130 conn->lnk = NULL; /* reset conn->lnk first */
132 struct smc_link *lnk = &conn->lgr->lnk[i];
134 if (lnk->state != expected || lnk->link_is_asym)
137 conn->lnk = lnk; /* temporary, SMC server assigns link*/
144 lnk2 = &conn->lgr->lnk[j];
147 conn->lnk = lnk2;
152 if (!conn->lnk)
153 conn->lnk = lnk;
156 if (!conn->lnk)
158 atomic_inc(&conn->lnk->conn_cnt);
202 if (conn->lnk)
203 atomic_dec(&conn->lnk->conn_cnt);
498 if (!smc_link_usable(&lgr->lnk[i]))
500 if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
703 struct smc_link *lnk = &lgr->lnk[i];
705 if (smc_link_sendable(lnk))
706 lnk->state = SMC_LNK_INACTIVE;
770 if (smc_link_usable(&lgr->lnk[i]) &&
771 lgr->lnk[i].link_id == link_id)
788 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
796 lnk->smcibdev = ini->smcrv2.ib_dev_v2;
797 lnk->ibport = ini->smcrv2.ib_port_v2;
798 lnk->wr_rx_sge_cnt = lnk->smcibdev->ibdev->attrs.max_recv_sge < 2 ? 1 : 2;
799 lnk->wr_rx_buflen = smc_link_shared_v2_rxbuf(lnk) ?
802 lnk->smcibdev = ini->ib_dev;
803 lnk->ibport = ini->ib_port;
804 lnk->wr_rx_sge_cnt = 1;
805 lnk->wr_rx_buflen = SMC_WR_BUF_SIZE;
807 get_device(&lnk->smcibdev->ibdev->dev);
808 atomic_inc(&lnk->smcibdev->lnk_cnt);
809 refcount_set(&lnk->refcnt, 1); /* link refcnt is set to 1 */
810 lnk->clearing = 0;
811 lnk->path_mtu = lnk->smcibdev->pattr[lnk->ibport - 1].active_mtu;
812 lnk->link_id = smcr_next_link_id(lgr);
813 lnk->lgr = lgr;
815 lnk->link_idx = link_idx;
816 lnk->wr_rx_id_compl = 0;
817 smc_ibdev_cnt_inc(lnk);
818 smcr_copy_dev_info_to_link(lnk);
819 atomic_set(&lnk->conn_cnt, 0);
820 smc_llc_link_set_uid(lnk);
821 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
822 if (!lnk->smcibdev->initialized) {
823 rc = (int)smc_ib_setup_per_ibdev(lnk->smcibdev);
828 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
830 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
831 ini->vlan_id, lnk->gid, &lnk->sgid_index,
836 rc = smc_llc_link_init(lnk);
839 rc = smc_wr_alloc_link_mem(lnk);
842 rc = smc_ib_create_protection_domain(lnk);
845 rc = smc_ib_create_queue_pair(lnk);
848 rc = smc_wr_create_link(lnk);
851 lnk->state = SMC_LNK_ACTIVATING;
855 smc_ib_destroy_queue_pair(lnk);
857 smc_ib_dealloc_protection_domain(lnk);
859 smc_wr_free_link_mem(lnk);
861 smc_llc_link_clear(lnk, false);
863 smc_ibdev_cnt_dec(lnk);
864 put_device(&lnk->smcibdev->ibdev->dev);
865 smcibdev = lnk->smcibdev;
866 memset(lnk, 0, sizeof(struct smc_link));
867 lnk->state = SMC_LNK_UNUSED;
880 struct smc_link *lnk;
970 lnk = &lgr->lnk[link_idx];
971 rc = smcr_link_init(lgr, lnk, link_idx, ini);
976 lgr->net = smc_ib_net(lnk->smcibdev);
1063 smc_wr_tx_put_slot(conn->lnk,
1072 atomic_dec(&conn->lnk->conn_cnt);
1074 smcr_link_put(conn->lnk);
1075 conn->lnk = to_lnk;
1076 atomic_inc(&conn->lnk->conn_cnt);
1078 smcr_link_hold(conn->lnk);
1096 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
1098 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
1099 from_lnk->ibport == lgr->lnk[i].ibport) {
1102 to_lnk = &lgr->lnk[i];
1113 if (conn->lnk != from_lnk)
1116 /* conn->lnk not yet set in SMC_INIT state */
1276 smcr_link_put(conn->lnk); /* link_hold in smc_conn_create() */
1282 struct smc_link *lnk)
1285 buf_desc->is_reg_mr[lnk->link_idx] = false;
1286 if (!buf_desc->is_map_ib[lnk->link_idx])
1290 buf_desc->mr[lnk->link_idx]) {
1291 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]);
1292 buf_desc->mr[lnk->link_idx] = NULL;
1295 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1297 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1299 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1300 buf_desc->is_map_ib[lnk->link_idx] = false;
1304 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1306 struct smc_link_group *lgr = lnk->lgr;
1313 smcr_buf_unmap_link(buf_desc, true, lnk);
1319 smcr_buf_unmap_link(buf_desc, false, lnk);
1324 static void smcr_rtoken_clear_link(struct smc_link *lnk)
1326 struct smc_link_group *lgr = lnk->lgr;
1330 lgr->rtokens[i][lnk->link_idx].rkey = 0;
1331 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1335 static void __smcr_link_clear(struct smc_link *lnk)
1337 struct smc_link_group *lgr = lnk->lgr;
1340 smc_wr_free_link_mem(lnk);
1341 smc_ibdev_cnt_dec(lnk);
1342 put_device(&lnk->smcibdev->ibdev->dev);
1343 smcibdev = lnk->smcibdev;
1344 memset(lnk, 0, sizeof(struct smc_link));
1345 lnk->state = SMC_LNK_UNUSED;
1352 void smcr_link_clear(struct smc_link *lnk, bool log)
1354 if (!lnk->lgr || lnk->clearing ||
1355 lnk->state == SMC_LNK_UNUSED)
1357 lnk->clearing = 1;
1358 lnk->peer_qpn = 0;
1359 smc_llc_link_clear(lnk, log);
1360 smcr_buf_unmap_lgr(lnk);
1361 smcr_rtoken_clear_link(lnk);
1362 smc_ib_modify_qp_error(lnk);
1363 smc_wr_free_link(lnk);
1364 smc_ib_destroy_queue_pair(lnk);
1365 smc_ib_dealloc_protection_domain(lnk);
1366 smcr_link_put(lnk); /* theoretically last link_put */
1369 void smcr_link_hold(struct smc_link *lnk)
1371 refcount_inc(&lnk->refcnt);
1374 void smcr_link_put(struct smc_link *lnk)
1376 if (refcount_dec_and_test(&lnk->refcnt))
1377 __smcr_link_clear(lnk);
1386 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1467 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1468 smcr_link_clear(&lgr->lnk[i], false);
1663 if (lgr->lnk[i].smcibdev == smcibdev)
1664 smcr_link_down_cond_sched(&lgr->lnk[i]);
1693 if (smc_link_usable(&lgr->lnk[i]))
1694 lgr->lnk[i].link_is_asym = false;
1726 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1772 static void smcr_link_down(struct smc_link *lnk)
1774 struct smc_link_group *lgr = lnk->lgr;
1778 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1781 to_lnk = smc_switch_conns(lgr, lnk, true);
1783 smcr_link_clear(lnk, true);
1787 del_link_id = lnk->link_id;
1806 smcr_link_clear(lnk, true);
1813 void smcr_link_down_cond(struct smc_link *lnk)
1815 if (smc_link_downing(&lnk->state)) {
1816 trace_smcr_link_down(lnk, __builtin_return_address(0));
1817 smcr_link_down(lnk);
1822 void smcr_link_down_cond_sched(struct smc_link *lnk)
1824 if (smc_link_downing(&lnk->state)) {
1825 trace_smcr_link_down(lnk, __builtin_return_address(0));
1826 smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
1827 if (!schedule_work(&lnk->link_down_wrk))
1828 smcr_link_put(lnk);
1844 struct smc_link *lnk = &lgr->lnk[i];
1846 if (smc_link_usable(lnk) &&
1847 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1848 smcr_link_down_cond_sched(lnk);
1925 struct smc_link *lnk;
1933 lnk = &lgr->lnk[i];
1935 if (!smc_link_active(lnk))
1938 if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
1940 if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
1941 !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
1943 !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
2042 smcr_link_hold(conn->lnk); /* link_put in smc_conn_free() */
2130 struct smc_link *lnk)
2136 if (buf_desc->is_map_ib[lnk->link_idx])
2148 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL);
2154 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) {
2163 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
2168 rc = smc_ib_buf_map_sg(lnk, buf_desc,
2177 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx;
2185 rc = smc_ib_get_memory_region(lnk->roce_pd, access_flags,
2186 buf_desc, lnk->link_idx);
2189 smc_ib_sync_sg_for_device(lnk, buf_desc,
2192 buf_desc->is_map_ib[lnk->link_idx] = true;
2196 smc_ib_buf_unmap_sg(lnk, buf_desc,
2199 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
2224 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
2234 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
2244 int smcr_buf_map_lgr(struct smc_link *lnk)
2246 struct smc_link_group *lgr = lnk->lgr;
2250 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
2254 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
2265 int smcr_buf_reg_lgr(struct smc_link *lnk)
2267 struct smc_link_group *lgr = lnk->lgr;
2277 rc = smcr_link_reg_buf(lnk, buf_desc);
2295 rc = smcr_link_reg_buf(lnk, buf_desc);
2362 struct smc_link *lnk = &lgr->lnk[i];
2364 if (!smc_link_usable(lnk))
2366 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
2508 !smc_link_active(conn->lnk))
2510 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2522 if (!smc_link_active(&conn->lgr->lnk[i]))
2524 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2644 if (lgr->lnk[link_idx].link_id == link_id) {
2656 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2658 struct smc_link_group *lgr = smc_get_lgr(lnk);
2664 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2665 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2674 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2675 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2680 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2682 struct smc_link_group *lgr = smc_get_lgr(lnk);
2687 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2702 struct smc_link *lnk,
2705 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,