Lines Matching +full:mc +full:- +full:sid
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
89 if (rdma_ib_or_roce(id->device, id->port_num)) in rdma_reject_msg()
92 if (rdma_protocol_iwarp(id->device, id->port_num)) in rdma_reject_msg()
101 * rdma_is_consumer_reject - return true if the consumer rejected the connect
108 if (rdma_ib_or_roce(id->device, id->port_num)) in rdma_is_consumer_reject()
111 if (rdma_protocol_iwarp(id->device, id->port_num)) in rdma_is_consumer_reject()
112 return reason == -ECONNREFUSED; in rdma_is_consumer_reject()
123 if (rdma_is_consumer_reject(id, ev->status)) { in rdma_consumer_reject_data()
124 *data_len = ev->param.conn.private_data_len; in rdma_consumer_reject_data()
125 p = ev->param.conn.private_data; in rdma_consumer_reject_data()
135 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
143 if (id->device->node_type == RDMA_NODE_RNIC) in rdma_iw_cm_id()
144 return id_priv->cm_id.iw; in rdma_iw_cm_id()
150 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
158 return &id_priv->id; in rdma_res_to_id()
200 return &pernet->tcp_ps; in cma_pernet_xa()
202 return &pernet->udp_ps; in cma_pernet_xa()
204 return &pernet->ipoib_ps; in cma_pernet_xa()
206 return &pernet->ib_ps; in cma_pernet_xa()
263 refcount_inc(&cma_dev->refcount); in cma_dev_get()
268 if (refcount_dec_and_test(&cma_dev->refcount)) in cma_dev_put()
269 complete(&cma_dev->comp); in cma_dev_put()
281 if (filter(cma_dev->device, cookie)) { in cma_enum_devices_by_ibdev()
295 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_get_default_gid_type()
296 return -EINVAL; in cma_get_default_gid_type()
298 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; in cma_get_default_gid_type()
307 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_set_default_gid_type()
308 return -EINVAL; in cma_set_default_gid_type()
311 rdma_protocol_roce_eth_encap(cma_dev->device, port)) in cma_set_default_gid_type()
314 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); in cma_set_default_gid_type()
317 return -EINVAL; in cma_set_default_gid_type()
319 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = in cma_set_default_gid_type()
327 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_get_default_roce_tos()
328 return -EINVAL; in cma_get_default_roce_tos()
330 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; in cma_get_default_roce_tos()
336 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_set_default_roce_tos()
337 return -EINVAL; in cma_set_default_roce_tos()
339 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = in cma_set_default_roce_tos()
346 return cma_dev->device; in cma_get_ib_dev()
421 lockdep_assert_held(&id_priv->handler_mutex); in cma_comp_exch()
423 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp_exch()
424 if ((ret = (id_priv->state == comp))) in cma_comp_exch()
425 id_priv->state = exch; in cma_comp_exch()
426 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp_exch()
432 return hdr->ip_version >> 4; in cma_get_ip_ver()
437 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); in cma_set_ip_ver()
442 return (struct sockaddr *)&id_priv->id.route.addr.src_addr; in cma_src_addr()
447 return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; in cma_dst_addr()
460 *(__be32 *)(mgid->raw + 12)); in cma_igmp_send()
463 *(__be32 *)(mgid->raw + 12)); in cma_igmp_send()
467 return (in_dev) ? 0 : -ENODEV; in cma_igmp_send()
474 &entry_b->id_list, struct rdma_id_private, id_list_entry); in compare_netdev_and_ip()
475 int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; in compare_netdev_and_ip()
479 return (ifindex_a > ifindex_b) ? 1 : -1; in compare_netdev_and_ip()
481 if (sa->sa_family != sb->sa_family) in compare_netdev_and_ip()
482 return sa->sa_family - sb->sa_family; in compare_netdev_and_ip()
484 if (sa->sa_family == AF_INET && in compare_netdev_and_ip()
486 return memcmp(&((struct sockaddr_in *)sa)->sin_addr, in compare_netdev_and_ip()
487 &((struct sockaddr_in *)sb)->sin_addr, in compare_netdev_and_ip()
488 sizeof(((struct sockaddr_in *)sa)->sin_addr)); in compare_netdev_and_ip()
491 if (sa->sa_family == AF_INET6 && in compare_netdev_and_ip()
493 return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, in compare_netdev_and_ip()
494 &((struct sockaddr_in6 *)sb)->sin6_addr); in compare_netdev_and_ip()
497 return -1; in compare_netdev_and_ip()
509 return -ENOMEM; in cma_add_id_to_tree()
516 node_id_priv->id.route.addr.dev_addr.bound_dev_if, in cma_add_id_to_tree()
521 new = &((*new)->rb_left); in cma_add_id_to_tree()
523 new = &((*new)->rb_right); in cma_add_id_to_tree()
525 list_add_tail(&node_id_priv->id_list_entry, in cma_add_id_to_tree()
526 &this->id_list); in cma_add_id_to_tree()
532 INIT_LIST_HEAD(&node->id_list); in cma_add_id_to_tree()
533 list_add_tail(&node_id_priv->id_list_entry, &node->id_list); in cma_add_id_to_tree()
535 rb_link_node(&node->rb_node, parent, new); in cma_add_id_to_tree()
536 rb_insert_color(&node->rb_node, &id_table); in cma_add_id_to_tree()
546 struct rb_node *node = root->rb_node; in node_from_ndev_ip()
554 node = node->rb_left; in node_from_ndev_ip()
556 node = node->rb_right; in node_from_ndev_ip()
570 if (list_empty(&id_priv->id_list_entry)) in cma_remove_id_from_tree()
574 id_priv->id.route.addr.dev_addr.bound_dev_if, in cma_remove_id_from_tree()
579 list_del_init(&id_priv->id_list_entry); in cma_remove_id_from_tree()
580 if (list_empty(&data->id_list)) { in cma_remove_id_from_tree()
581 rb_erase(&data->rb_node, &id_table); in cma_remove_id_from_tree()
592 id_priv->cma_dev = cma_dev; in _cma_attach_to_dev()
593 id_priv->id.device = cma_dev->device; in _cma_attach_to_dev()
594 id_priv->id.route.addr.dev_addr.transport = in _cma_attach_to_dev()
595 rdma_node_get_transport(cma_dev->device->node_type); in _cma_attach_to_dev()
596 list_add_tail(&id_priv->device_item, &cma_dev->id_list); in _cma_attach_to_dev()
598 trace_cm_id_attach(id_priv, cma_dev->device); in _cma_attach_to_dev()
605 id_priv->gid_type = in cma_attach_to_dev()
606 cma_dev->default_gid_type[id_priv->id.port_num - in cma_attach_to_dev()
607 rdma_start_port(cma_dev->device)]; in cma_attach_to_dev()
613 list_del_init(&id_priv->device_item); in cma_release_dev()
614 cma_dev_put(id_priv->cma_dev); in cma_release_dev()
615 id_priv->cma_dev = NULL; in cma_release_dev()
616 id_priv->id.device = NULL; in cma_release_dev()
617 if (id_priv->id.route.addr.dev_addr.sgid_attr) { in cma_release_dev()
618 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_release_dev()
619 id_priv->id.route.addr.dev_addr.sgid_attr = NULL; in cma_release_dev()
626 return id_priv->id.route.addr.src_addr.ss_family; in cma_family()
634 switch (id_priv->id.ps) { in cma_set_default_qkey()
637 id_priv->qkey = RDMA_UDP_QKEY; in cma_set_default_qkey()
640 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); in cma_set_default_qkey()
641 ret = ib_sa_get_mcmember_rec(id_priv->id.device, in cma_set_default_qkey()
642 id_priv->id.port_num, &rec.mgid, in cma_set_default_qkey()
645 id_priv->qkey = be32_to_cpu(rec.qkey); in cma_set_default_qkey()
656 (id_priv->qkey && (id_priv->qkey != qkey))) in cma_set_qkey()
657 return -EINVAL; in cma_set_qkey()
659 id_priv->qkey = qkey; in cma_set_qkey()
665 dev_addr->dev_type = ARPHRD_INFINIBAND; in cma_translate_ib()
666 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); in cma_translate_ib()
667 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); in cma_translate_ib()
674 if (addr->sa_family != AF_IB) { in cma_translate_addr()
690 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_validate_port()
691 const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV); in cma_validate_port()
692 int bound_if_index = dev_addr->bound_dev_if; in cma_validate_port()
693 int dev_type = dev_addr->dev_type; in cma_validate_port()
697 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) in cma_validate_port()
719 ndev = rcu_dereference(sgid_attr->ndev); in cma_validate_port()
720 if (ndev->ifindex != bound_if_index) { in cma_validate_port()
721 pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index); in cma_validate_port()
725 if (ndev->ifindex == pdev->ifindex) in cma_validate_port()
726 bound_if_index = pdev->ifindex; in cma_validate_port()
730 if (bound_if_index == pdev->ifindex) in cma_validate_port()
731 bound_if_index = ndev->ifindex; in cma_validate_port()
735 if (!net_eq(dev_net(ndev), dev_addr->net) || in cma_validate_port()
736 ndev->ifindex != bound_if_index) { in cma_validate_port()
738 sgid_attr = ERR_PTR(-ENODEV); in cma_validate_port()
749 if (device->ops.driver_id == RDMA_DRIVER_RXE) { in cma_validate_port()
752 ndev = dev_get_by_index(dev_addr->net, bound_if_index); in cma_validate_port()
758 ndev = dev_get_by_index(dev_addr->net, bound_if_index); in cma_validate_port()
775 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_bind_sgid_attr()
776 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; in cma_bind_sgid_attr()
780 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
790 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_acquire_dev_by_src_ip()
795 int ret = -ENODEV; in cma_acquire_dev_by_src_ip()
798 if (dev_addr->dev_type != ARPHRD_INFINIBAND && in cma_acquire_dev_by_src_ip()
799 id_priv->id.ps == RDMA_PS_IPOIB) in cma_acquire_dev_by_src_ip()
800 return -EINVAL; in cma_acquire_dev_by_src_ip()
802 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_acquire_dev_by_src_ip()
805 memcpy(&gid, dev_addr->src_dev_addr + in cma_acquire_dev_by_src_ip()
810 rdma_for_each_port (cma_dev->device, port) { in cma_acquire_dev_by_src_ip()
811 gidp = rdma_protocol_roce(cma_dev->device, port) ? in cma_acquire_dev_by_src_ip()
813 gid_type = cma_dev->default_gid_type[port - 1]; in cma_acquire_dev_by_src_ip()
814 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_acquire_dev_by_src_ip()
817 id_priv->id.port_num = port; in cma_acquire_dev_by_src_ip()
831 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
845 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_acquire_dev()
850 if (dev_addr->dev_type != ARPHRD_INFINIBAND && in cma_ib_acquire_dev()
851 id_priv->id.ps == RDMA_PS_IPOIB) in cma_ib_acquire_dev()
852 return -EINVAL; in cma_ib_acquire_dev()
854 if (rdma_protocol_roce(req->device, req->port)) in cma_ib_acquire_dev()
855 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_ib_acquire_dev()
858 memcpy(&gid, dev_addr->src_dev_addr + in cma_ib_acquire_dev()
861 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; in cma_ib_acquire_dev()
862 sgid_attr = cma_validate_port(req->device, req->port, in cma_ib_acquire_dev()
867 id_priv->id.port_num = req->port; in cma_ib_acquire_dev()
870 * of cma_dev->id_list such as cma_netdev_callback() and in cma_ib_acquire_dev()
874 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); in cma_ib_acquire_dev()
876 rdma_restrack_add(&id_priv->res); in cma_ib_acquire_dev()
883 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iw_acquire_dev()
887 int ret = -ENODEV; in cma_iw_acquire_dev()
891 if (dev_addr->dev_type != ARPHRD_INFINIBAND && in cma_iw_acquire_dev()
892 id_priv->id.ps == RDMA_PS_IPOIB) in cma_iw_acquire_dev()
893 return -EINVAL; in cma_iw_acquire_dev()
895 memcpy(&gid, dev_addr->src_dev_addr + in cma_iw_acquire_dev()
900 cma_dev = listen_id_priv->cma_dev; in cma_iw_acquire_dev()
901 port = listen_id_priv->id.port_num; in cma_iw_acquire_dev()
902 gid_type = listen_id_priv->gid_type; in cma_iw_acquire_dev()
903 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_iw_acquire_dev()
906 id_priv->id.port_num = port; in cma_iw_acquire_dev()
913 rdma_for_each_port (cma_dev->device, port) { in cma_iw_acquire_dev()
914 if (listen_id_priv->cma_dev == cma_dev && in cma_iw_acquire_dev()
915 listen_id_priv->id.port_num == port) in cma_iw_acquire_dev()
918 gid_type = cma_dev->default_gid_type[port - 1]; in cma_iw_acquire_dev()
919 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_iw_acquire_dev()
922 id_priv->id.port_num = port; in cma_iw_acquire_dev()
933 rdma_restrack_add(&id_priv->res); in cma_iw_acquire_dev()
956 dgid = (union ib_gid *) &addr->sib_addr; in cma_resolve_ib_dev()
957 pkey = ntohs(addr->sib_pkey); in cma_resolve_ib_dev()
961 rdma_for_each_port (cur_dev->device, p) { in cma_resolve_ib_dev()
962 if (!rdma_cap_af_ib(cur_dev->device, p)) in cma_resolve_ib_dev()
965 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) in cma_resolve_ib_dev()
968 if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) in cma_resolve_ib_dev()
971 for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; in cma_resolve_ib_dev()
973 ret = rdma_query_gid(cur_dev->device, p, i, in cma_resolve_ib_dev()
981 id_priv->id.port_num = p; in cma_resolve_ib_dev()
986 dgid->global.subnet_prefix) && in cma_resolve_ib_dev()
990 id_priv->id.port_num = p; in cma_resolve_ib_dev()
997 return -ENODEV; in cma_resolve_ib_dev()
1001 rdma_restrack_add(&id_priv->res); in cma_resolve_ib_dev()
1004 memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); in cma_resolve_ib_dev()
1005 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); in cma_resolve_ib_dev()
1011 refcount_inc(&id_priv->refcount); in cma_id_get()
1016 if (refcount_dec_and_test(&id_priv->refcount)) in cma_id_put()
1017 complete(&id_priv->comp); in cma_id_put()
1029 return ERR_PTR(-ENOMEM); in __rdma_create_id()
1031 id_priv->state = RDMA_CM_IDLE; in __rdma_create_id()
1032 id_priv->id.context = context; in __rdma_create_id()
1033 id_priv->id.event_handler = event_handler; in __rdma_create_id()
1034 id_priv->id.ps = ps; in __rdma_create_id()
1035 id_priv->id.qp_type = qp_type; in __rdma_create_id()
1036 id_priv->tos_set = false; in __rdma_create_id()
1037 id_priv->timeout_set = false; in __rdma_create_id()
1038 id_priv->min_rnr_timer_set = false; in __rdma_create_id()
1039 id_priv->gid_type = IB_GID_TYPE_IB; in __rdma_create_id()
1040 spin_lock_init(&id_priv->lock); in __rdma_create_id()
1041 mutex_init(&id_priv->qp_mutex); in __rdma_create_id()
1042 init_completion(&id_priv->comp); in __rdma_create_id()
1043 refcount_set(&id_priv->refcount, 1); in __rdma_create_id()
1044 mutex_init(&id_priv->handler_mutex); in __rdma_create_id()
1045 INIT_LIST_HEAD(&id_priv->device_item); in __rdma_create_id()
1046 INIT_LIST_HEAD(&id_priv->id_list_entry); in __rdma_create_id()
1047 INIT_LIST_HEAD(&id_priv->listen_list); in __rdma_create_id()
1048 INIT_LIST_HEAD(&id_priv->mc_list); in __rdma_create_id()
1049 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); in __rdma_create_id()
1050 id_priv->id.route.addr.dev_addr.net = get_net(net); in __rdma_create_id()
1051 id_priv->seq_num &= 0x00ffffff; in __rdma_create_id()
1052 INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler); in __rdma_create_id()
1054 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); in __rdma_create_id()
1056 rdma_restrack_parent_name(&id_priv->res, &parent->res); in __rdma_create_id()
1072 rdma_restrack_set_name(&ret->res, caller); in __rdma_create_kernel_id()
1073 return &ret->id; in __rdma_create_kernel_id()
1084 ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, in rdma_create_user_id()
1089 rdma_restrack_set_name(&ret->res, NULL); in rdma_create_user_id()
1090 return &ret->id; in rdma_create_user_id()
1100 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_ud_qp()
1126 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_conn_qp()
1141 if (id->device != pd->device) { in rdma_create_qp()
1142 ret = -EINVAL; in rdma_create_qp()
1146 qp_init_attr->port_num = id->port_num; in rdma_create_qp()
1153 if (id->qp_type == IB_QPT_UD) in rdma_create_qp()
1160 id->qp = qp; in rdma_create_qp()
1161 id_priv->qp_num = qp->qp_num; in rdma_create_qp()
1162 id_priv->srq = (qp->srq != NULL); in rdma_create_qp()
1179 mutex_lock(&id_priv->qp_mutex); in rdma_destroy_qp()
1180 ib_destroy_qp(id_priv->id.qp); in rdma_destroy_qp()
1181 id_priv->id.qp = NULL; in rdma_destroy_qp()
1182 mutex_unlock(&id_priv->qp_mutex); in rdma_destroy_qp()
1192 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
1193 if (!id_priv->id.qp) { in cma_modify_qp_rtr()
1200 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
1204 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
1209 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
1213 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); in cma_modify_qp_rtr()
1216 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; in cma_modify_qp_rtr()
1217 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
1219 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
1229 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rts()
1230 if (!id_priv->id.qp) { in cma_modify_qp_rts()
1236 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rts()
1241 qp_attr.max_rd_atomic = conn_param->initiator_depth; in cma_modify_qp_rts()
1242 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rts()
1244 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rts()
1253 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_err()
1254 if (!id_priv->id.qp) { in cma_modify_qp_err()
1260 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); in cma_modify_qp_err()
1262 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_err()
1269 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_init_qp_attr()
1273 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) in cma_ib_init_qp_attr()
1278 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, in cma_ib_init_qp_attr()
1279 pkey, &qp_attr->pkey_index); in cma_ib_init_qp_attr()
1283 qp_attr->port_num = id_priv->id.port_num; in cma_ib_init_qp_attr()
1286 if (id_priv->id.qp_type == IB_QPT_UD) { in cma_ib_init_qp_attr()
1291 qp_attr->qkey = id_priv->qkey; in cma_ib_init_qp_attr()
1294 qp_attr->qp_access_flags = 0; in cma_ib_init_qp_attr()
1307 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_init_qp_attr()
1308 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
1311 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
1314 if (qp_attr->qp_state == IB_QPS_RTR) in rdma_init_qp_attr()
1315 qp_attr->rq_psn = id_priv->seq_num; in rdma_init_qp_attr()
1316 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_init_qp_attr()
1317 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
1318 qp_attr->qp_access_flags = 0; in rdma_init_qp_attr()
1321 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1323 qp_attr->port_num = id_priv->id.port_num; in rdma_init_qp_attr()
1326 ret = -ENOSYS; in rdma_init_qp_attr()
1329 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) in rdma_init_qp_attr()
1330 qp_attr->timeout = id_priv->timeout; in rdma_init_qp_attr()
1332 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) in rdma_init_qp_attr()
1333 qp_attr->min_rnr_timer = id_priv->min_rnr_timer; in rdma_init_qp_attr()
1341 switch (addr->sa_family) { in cma_zero_addr()
1343 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); in cma_zero_addr()
1345 return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); in cma_zero_addr()
1347 return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); in cma_zero_addr()
1355 switch (addr->sa_family) { in cma_loopback_addr()
1358 ((struct sockaddr_in *)addr)->sin_addr.s_addr); in cma_loopback_addr()
1361 &((struct sockaddr_in6 *)addr)->sin6_addr); in cma_loopback_addr()
1364 &((struct sockaddr_ib *)addr)->sib_addr); in cma_loopback_addr()
1377 if (src->sa_family != dst->sa_family) in cma_addr_cmp()
1378 return -1; in cma_addr_cmp()
1380 switch (src->sa_family) { in cma_addr_cmp()
1382 return ((struct sockaddr_in *)src)->sin_addr.s_addr != in cma_addr_cmp()
1383 ((struct sockaddr_in *)dst)->sin_addr.s_addr; in cma_addr_cmp()
1389 if (ipv6_addr_cmp(&src_addr6->sin6_addr, in cma_addr_cmp()
1390 &dst_addr6->sin6_addr)) in cma_addr_cmp()
1392 link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & in cma_addr_cmp()
1395 return link_local ? (src_addr6->sin6_scope_id != in cma_addr_cmp()
1396 dst_addr6->sin6_scope_id) : in cma_addr_cmp()
1401 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, in cma_addr_cmp()
1402 &((struct sockaddr_ib *) dst)->sib_addr); in cma_addr_cmp()
1410 switch (addr->sa_family) { in cma_port()
1412 return ((struct sockaddr_in *) addr)->sin_port; in cma_port()
1414 return ((struct sockaddr_in6 *) addr)->sin6_port; in cma_port()
1417 return htons((u16) (be64_to_cpu(sib->sib_sid) & in cma_port()
1418 be64_to_cpu(sib->sib_sid_mask))); in cma_port()
1436 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; in cma_save_ib_info()
1439 ib->sib_family = AF_IB; in cma_save_ib_info()
1441 ib->sib_pkey = path->pkey; in cma_save_ib_info()
1442 ib->sib_flowinfo = path->flow_label; in cma_save_ib_info()
1443 memcpy(&ib->sib_addr, &path->sgid, 16); in cma_save_ib_info()
1444 ib->sib_sid = path->service_id; in cma_save_ib_info()
1445 ib->sib_scope_id = 0; in cma_save_ib_info()
1447 ib->sib_pkey = listen_ib->sib_pkey; in cma_save_ib_info()
1448 ib->sib_flowinfo = listen_ib->sib_flowinfo; in cma_save_ib_info()
1449 ib->sib_addr = listen_ib->sib_addr; in cma_save_ib_info()
1450 ib->sib_sid = listen_ib->sib_sid; in cma_save_ib_info()
1451 ib->sib_scope_id = listen_ib->sib_scope_id; in cma_save_ib_info()
1453 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); in cma_save_ib_info()
1457 ib->sib_family = AF_IB; in cma_save_ib_info()
1459 ib->sib_pkey = path->pkey; in cma_save_ib_info()
1460 ib->sib_flowinfo = path->flow_label; in cma_save_ib_info()
1461 memcpy(&ib->sib_addr, &path->dgid, 16); in cma_save_ib_info()
1474 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, in cma_save_ip4_info()
1482 .sin_addr.s_addr = hdr->src_addr.ip4.addr, in cma_save_ip4_info()
1483 .sin_port = hdr->port, in cma_save_ip4_info()
1496 .sin6_addr = hdr->dst_addr.ip6, in cma_save_ip6_info()
1504 .sin6_addr = hdr->src_addr.ip6, in cma_save_ip6_info()
1505 .sin6_port = hdr->port, in cma_save_ip6_info()
1523 hdr = ib_event->private_data; in cma_save_ip_info()
1524 if (hdr->cma_version != CMA_VERSION) in cma_save_ip_info()
1525 return -EINVAL; in cma_save_ip_info()
1539 return -EAFNOSUPPORT; in cma_save_ip_info()
1552 if (ib_event->event == IB_CM_REQ_RECEIVED) in cma_save_net_info()
1554 ib_event->param.req_rcvd.primary_path); in cma_save_net_info()
1555 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) in cma_save_net_info()
1567 &ib_event->param.req_rcvd; in cma_save_req_info()
1569 &ib_event->param.sidr_req_rcvd; in cma_save_req_info()
1571 switch (ib_event->event) { in cma_save_req_info()
1573 req->device = req_param->listen_id->device; in cma_save_req_info()
1574 req->port = req_param->port; in cma_save_req_info()
1575 memcpy(&req->local_gid, &req_param->primary_path->sgid, in cma_save_req_info()
1576 sizeof(req->local_gid)); in cma_save_req_info()
1577 req->has_gid = true; in cma_save_req_info()
1578 req->service_id = req_param->primary_path->service_id; in cma_save_req_info()
1579 req->pkey = be16_to_cpu(req_param->primary_path->pkey); in cma_save_req_info()
1580 if (req->pkey != req_param->bth_pkey) in cma_save_req_info()
1583 req_param->bth_pkey, req->pkey); in cma_save_req_info()
1586 req->device = sidr_param->listen_id->device; in cma_save_req_info()
1587 req->port = sidr_param->port; in cma_save_req_info()
1588 req->has_gid = false; in cma_save_req_info()
1589 req->service_id = sidr_param->service_id; in cma_save_req_info()
1590 req->pkey = sidr_param->pkey; in cma_save_req_info()
1591 if (req->pkey != sidr_param->bth_pkey) in cma_save_req_info()
1594 sidr_param->bth_pkey, req->pkey); in cma_save_req_info()
1597 return -EINVAL; in cma_save_req_info()
1607 __be32 daddr = dst_addr->sin_addr.s_addr, in validate_ipv4_net_dev()
1608 saddr = src_addr->sin_addr.s_addr; in validate_ipv4_net_dev()
1621 fl4.flowi4_oif = net_dev->ifindex; in validate_ipv4_net_dev()
1638 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & in validate_ipv6_net_dev()
1640 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, in validate_ipv6_net_dev()
1641 &src_addr->sin6_addr, net_dev->ifindex, in validate_ipv6_net_dev()
1648 ret = rt->rt6i_idev->dev == net_dev; in validate_ipv6_net_dev()
1666 switch (daddr->sa_family) { in validate_net_dev()
1668 return saddr->sa_family == AF_INET && in validate_net_dev()
1672 return saddr->sa_family == AF_INET6 && in validate_net_dev()
1686 if (ib_event->event == IB_CM_REQ_RECEIVED) in roce_get_net_dev_by_cm_event()
1687 sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; in roce_get_net_dev_by_cm_event()
1688 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) in roce_get_net_dev_by_cm_event()
1689 sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; in roce_get_net_dev_by_cm_event()
1708 (struct sockaddr *)&req->listen_addr_storage; in cma_get_net_dev()
1709 struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; in cma_get_net_dev()
1711 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; in cma_get_net_dev()
1715 req->service_id); in cma_get_net_dev()
1719 if (rdma_protocol_roce(req->device, req->port)) in cma_get_net_dev()
1722 net_dev = ib_get_net_dev_by_params(req->device, req->port, in cma_get_net_dev()
1723 req->pkey, in cma_get_net_dev()
1726 return ERR_PTR(-ENODEV); in cma_get_net_dev()
1743 if (cma_any_addr(addr) && !id_priv->afonly) in cma_match_private_data()
1746 switch (addr->sa_family) { in cma_match_private_data()
1748 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; in cma_match_private_data()
1752 hdr->dst_addr.ip4.addr != ip4_addr) in cma_match_private_data()
1756 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; in cma_match_private_data()
1760 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) in cma_match_private_data()
1774 struct ib_device *device = id->device; in cma_protocol_roce()
1775 const u32 port_num = id->port_num ?: rdma_start_port(device); in cma_protocol_roce()
1783 (const struct sockaddr *)&req->listen_addr_storage; in cma_is_req_ipv6_ll()
1787 return (daddr->sa_family == AF_INET6 && in cma_is_req_ipv6_ll()
1788 (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); in cma_is_req_ipv6_ll()
1795 const struct rdma_addr *addr = &id->route.addr; in cma_match_net_dev()
1799 return (!id->port_num || id->port_num == req->port) && in cma_match_net_dev()
1800 (addr->src_addr.ss_family == AF_IB); in cma_match_net_dev()
1812 if (net_eq(dev_net(net_dev), addr->dev_addr.net) && in cma_match_net_dev()
1813 (!!addr->dev_addr.bound_dev_if == in cma_match_net_dev()
1814 (addr->dev_addr.bound_dev_if == net_dev->ifindex))) in cma_match_net_dev()
1832 return ERR_PTR(-EINVAL); in cma_find_listener()
1834 hlist_for_each_entry(id_priv, &bind_list->owners, node) { in cma_find_listener()
1835 if (cma_match_private_data(id_priv, ib_event->private_data)) { in cma_find_listener()
1836 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1837 cma_match_net_dev(&id_priv->id, net_dev, req)) in cma_find_listener()
1840 &id_priv->listen_list, in cma_find_listener()
1842 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener()
1843 cma_match_net_dev(&id_priv_dev->id, in cma_find_listener()
1850 return ERR_PTR(-EINVAL); in cma_find_listener()
1869 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { in cma_ib_id_from_event()
1900 if (((*net_dev)->flags & IFF_UP) == 0) { in cma_ib_id_from_event()
1901 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1906 (struct sockaddr *)&req->src_addr_storage, in cma_ib_id_from_event()
1907 (struct sockaddr *)&req->listen_addr_storage)) { in cma_ib_id_from_event()
1908 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1914 rdma_ps_from_service_id(req->service_id), in cma_ib_id_from_event()
1915 cma_port_from_service_id(req->service_id)); in cma_ib_id_from_event()
1934 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { in cma_cancel_route()
1935 if (id_priv->query) in cma_cancel_route()
1936 ib_sa_cancel_query(id_priv->query_id, id_priv->query); in cma_cancel_route()
1950 list_del_init(&id_priv->listen_any_item); in _cma_cancel_listens()
1952 while (!list_empty(&id_priv->listen_list)) { in _cma_cancel_listens()
1954 list_first_entry(&id_priv->listen_list, in _cma_cancel_listens()
1957 list_del_init(&dev_id_priv->device_item); in _cma_cancel_listens()
1958 list_del_init(&dev_id_priv->listen_item); in _cma_cancel_listens()
1961 rdma_destroy_id(&dev_id_priv->id); in _cma_cancel_listens()
1986 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); in cma_cancel_operation()
1992 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
2002 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_release_port()
2003 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_release_port()
2009 hlist_del(&id_priv->node); in cma_release_port()
2010 if (hlist_empty(&bind_list->owners)) { in cma_release_port()
2011 cma_ps_remove(net, bind_list->ps, bind_list->port); in cma_release_port()
2018 struct cma_multicast *mc) in destroy_mc() argument
2020 bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); in destroy_mc()
2022 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) in destroy_mc()
2023 ib_sa_free_multicast(mc->sa_mc); in destroy_mc()
2025 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { in destroy_mc()
2027 &id_priv->id.route.addr.dev_addr; in destroy_mc()
2030 if (dev_addr->bound_dev_if) in destroy_mc()
2031 ndev = dev_get_by_index(dev_addr->net, in destroy_mc()
2032 dev_addr->bound_dev_if); in destroy_mc()
2037 gid_type = id_priv->cma_dev->default_gid_type in destroy_mc()
2038 [id_priv->id.port_num - in destroy_mc()
2040 id_priv->cma_dev->device)]; in destroy_mc()
2041 cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, in destroy_mc()
2047 cancel_work_sync(&mc->iboe_join.work); in destroy_mc()
2049 kfree(mc); in destroy_mc()
2054 struct cma_multicast *mc; in cma_leave_mc_groups() local
2056 while (!list_empty(&id_priv->mc_list)) { in cma_leave_mc_groups()
2057 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, in cma_leave_mc_groups()
2059 list_del(&mc->list); in cma_leave_mc_groups()
2060 destroy_mc(id_priv, mc); in cma_leave_mc_groups()
2069 rdma_restrack_del(&id_priv->res); in _destroy_id()
2071 if (id_priv->cma_dev) { in _destroy_id()
2072 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { in _destroy_id()
2073 if (id_priv->cm_id.ib) in _destroy_id()
2074 ib_destroy_cm_id(id_priv->cm_id.ib); in _destroy_id()
2075 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { in _destroy_id()
2076 if (id_priv->cm_id.iw) in _destroy_id()
2077 iw_destroy_cm_id(id_priv->cm_id.iw); in _destroy_id()
2085 wait_for_completion(&id_priv->comp); in _destroy_id()
2087 if (id_priv->internal_id) in _destroy_id()
2088 cma_id_put(id_priv->id.context); in _destroy_id()
2090 kfree(id_priv->id.route.path_rec); in _destroy_id()
2091 kfree(id_priv->id.route.path_rec_inbound); in _destroy_id()
2092 kfree(id_priv->id.route.path_rec_outbound); in _destroy_id()
2094 put_net(id_priv->id.route.addr.dev_addr.net); in _destroy_id()
2103 __releases(&idprv->handler_mutex) in destroy_id_handler_unlock()
2116 lockdep_assert_held(&id_priv->handler_mutex); in destroy_id_handler_unlock()
2117 spin_lock_irqsave(&id_priv->lock, flags); in destroy_id_handler_unlock()
2118 state = id_priv->state; in destroy_id_handler_unlock()
2119 id_priv->state = RDMA_CM_DESTROYING; in destroy_id_handler_unlock()
2120 spin_unlock_irqrestore(&id_priv->lock, flags); in destroy_id_handler_unlock()
2121 mutex_unlock(&id_priv->handler_mutex); in destroy_id_handler_unlock()
2130 mutex_lock(&id_priv->handler_mutex); in rdma_destroy_id()
2148 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); in cma_rep_recv()
2157 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, in cma_rep_recv()
2166 event->param.conn.private_data = private_data; in cma_set_rep_event_data()
2167 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; in cma_set_rep_event_data()
2168 event->param.conn.responder_resources = rep_data->responder_resources; in cma_set_rep_event_data()
2169 event->param.conn.initiator_depth = rep_data->initiator_depth; in cma_set_rep_event_data()
2170 event->param.conn.flow_control = rep_data->flow_control; in cma_set_rep_event_data()
2171 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; in cma_set_rep_event_data()
2172 event->param.conn.srq = rep_data->srq; in cma_set_rep_event_data()
2173 event->param.conn.qp_num = rep_data->remote_qpn; in cma_set_rep_event_data()
2175 event->ece.vendor_id = rep_data->ece.vendor_id; in cma_set_rep_event_data()
2176 event->ece.attr_mod = rep_data->ece.attr_mod; in cma_set_rep_event_data()
2184 lockdep_assert_held(&id_priv->handler_mutex); in cma_cm_event_handler()
2187 ret = id_priv->id.event_handler(&id_priv->id, event); in cma_cm_event_handler()
2195 struct rdma_id_private *id_priv = cm_id->context; in cma_ib_handler()
2200 mutex_lock(&id_priv->handler_mutex); in cma_ib_handler()
2201 state = READ_ONCE(id_priv->state); in cma_ib_handler()
2202 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && in cma_ib_handler()
2204 (ib_event->event == IB_CM_TIMEWAIT_EXIT && in cma_ib_handler()
2208 switch (ib_event->event) { in cma_ib_handler()
2212 event.status = -ETIMEDOUT; in cma_ib_handler()
2216 (id_priv->id.qp_type != IB_QPT_UD)) { in cma_ib_handler()
2220 if (id_priv->id.qp) { in cma_ib_handler()
2227 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, in cma_ib_handler()
2228 ib_event->private_data); in cma_ib_handler()
2235 event.status = -ETIMEDOUT; in cma_ib_handler()
2251 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, in cma_ib_handler()
2252 ib_event->param.rej_rcvd.reason)); in cma_ib_handler()
2254 event.status = ib_event->param.rej_rcvd.reason; in cma_ib_handler()
2256 event.param.conn.private_data = ib_event->private_data; in cma_ib_handler()
2261 ib_event->event); in cma_ib_handler()
2267 /* Destroy the CM ID by returning a non-zero value. */ in cma_ib_handler()
2268 id_priv->cm_id.ib = NULL; in cma_ib_handler()
2273 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
2286 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; in cma_ib_new_conn_id()
2287 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; in cma_ib_new_conn_id()
2289 ib_event->param.req_rcvd.primary_path->service_id; in cma_ib_new_conn_id()
2293 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, in cma_ib_new_conn_id()
2294 listen_id->event_handler, listen_id->context, in cma_ib_new_conn_id()
2295 listen_id->ps, in cma_ib_new_conn_id()
2296 ib_event->param.req_rcvd.qp_type, in cma_ib_new_conn_id()
2301 id = &id_priv->id; in cma_ib_new_conn_id()
2302 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, in cma_ib_new_conn_id()
2303 (struct sockaddr *)&id->route.addr.dst_addr, in cma_ib_new_conn_id()
2307 rt = &id->route; in cma_ib_new_conn_id()
2308 rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; in cma_ib_new_conn_id()
2309 rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, in cma_ib_new_conn_id()
2310 sizeof(*rt->path_rec), GFP_KERNEL); in cma_ib_new_conn_id()
2311 if (!rt->path_rec) in cma_ib_new_conn_id()
2314 rt->path_rec[0] = *path; in cma_ib_new_conn_id()
2315 if (rt->num_pri_alt_paths == 2) in cma_ib_new_conn_id()
2316 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; in cma_ib_new_conn_id()
2319 rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); in cma_ib_new_conn_id()
2323 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; in cma_ib_new_conn_id()
2324 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); in cma_ib_new_conn_id()
2325 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); in cma_ib_new_conn_id()
2327 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); in cma_ib_new_conn_id()
2332 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); in cma_ib_new_conn_id()
2334 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_conn_id()
2350 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; in cma_ib_new_udp_id()
2351 struct net *net = listen_id->route.addr.dev_addr.net; in cma_ib_new_udp_id()
2355 id_priv = __rdma_create_id(net, listen_id->event_handler, in cma_ib_new_udp_id()
2356 listen_id->context, listen_id->ps, IB_QPT_UD, in cma_ib_new_udp_id()
2361 id = &id_priv->id; in cma_ib_new_udp_id()
2362 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, in cma_ib_new_udp_id()
2363 (struct sockaddr *)&id->route.addr.dst_addr, in cma_ib_new_udp_id()
2365 ib_event->param.sidr_req_rcvd.service_id)) in cma_ib_new_udp_id()
2369 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); in cma_ib_new_udp_id()
2373 &id->route.addr.dev_addr); in cma_ib_new_udp_id()
2379 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_udp_id()
2390 event->param.conn.private_data = private_data + offset; in cma_set_req_event_data()
2391 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; in cma_set_req_event_data()
2392 event->param.conn.responder_resources = req_data->responder_resources; in cma_set_req_event_data()
2393 event->param.conn.initiator_depth = req_data->initiator_depth; in cma_set_req_event_data()
2394 event->param.conn.flow_control = req_data->flow_control; in cma_set_req_event_data()
2395 event->param.conn.retry_count = req_data->retry_count; in cma_set_req_event_data()
2396 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; in cma_set_req_event_data()
2397 event->param.conn.srq = req_data->srq; in cma_set_req_event_data()
2398 event->param.conn.qp_num = req_data->remote_qpn; in cma_set_req_event_data()
2400 event->ece.vendor_id = req_data->ece.vendor_id; in cma_set_req_event_data()
2401 event->ece.attr_mod = req_data->ece.attr_mod; in cma_set_req_event_data()
2407 return (((ib_event->event == IB_CM_REQ_RECEIVED) && in cma_ib_check_req_qp_type()
2408 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || in cma_ib_check_req_qp_type()
2409 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && in cma_ib_check_req_qp_type()
2410 (id->qp_type == IB_QPT_UD)) || in cma_ib_check_req_qp_type()
2411 (!id->qp_type)); in cma_ib_check_req_qp_type()
2428 trace_cm_req_handler(listen_id, ib_event->event); in cma_ib_req_handler()
2429 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { in cma_ib_req_handler()
2430 ret = -EINVAL; in cma_ib_req_handler()
2434 mutex_lock(&listen_id->handler_mutex); in cma_ib_req_handler()
2435 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { in cma_ib_req_handler()
2436 ret = -ECONNABORTED; in cma_ib_req_handler()
2442 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { in cma_ib_req_handler()
2443 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); in cma_ib_req_handler()
2444 event.param.ud.private_data = ib_event->private_data + offset; in cma_ib_req_handler()
2446 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; in cma_ib_req_handler()
2448 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); in cma_ib_req_handler()
2449 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, in cma_ib_req_handler()
2450 ib_event->private_data, offset); in cma_ib_req_handler()
2453 ret = -ENOMEM; in cma_ib_req_handler()
2457 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); in cma_ib_req_handler()
2464 conn_id->cm_id.ib = cm_id; in cma_ib_req_handler()
2465 cm_id->context = conn_id; in cma_ib_req_handler()
2466 cm_id->cm_handler = cma_ib_handler; in cma_ib_req_handler()
2470 /* Destroy the CM ID by returning a non-zero value. */ in cma_ib_req_handler()
2471 conn_id->cm_id.ib = NULL; in cma_ib_req_handler()
2472 mutex_unlock(&listen_id->handler_mutex); in cma_ib_req_handler()
2477 if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && in cma_ib_req_handler()
2478 conn_id->id.qp_type != IB_QPT_UD) { in cma_ib_req_handler()
2479 trace_cm_send_mra(cm_id->context); in cma_ib_req_handler()
2482 mutex_unlock(&conn_id->handler_mutex); in cma_ib_req_handler()
2485 mutex_unlock(&listen_id->handler_mutex); in cma_ib_req_handler()
2495 if (addr->sa_family == AF_IB) in rdma_get_service_id()
2496 return ((struct sockaddr_ib *) addr)->sib_sid; in rdma_get_service_id()
2498 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); in rdma_get_service_id()
2505 struct rdma_addr *addr = &cm_id->route.addr; in rdma_read_gids()
2507 if (!cm_id->device) { in rdma_read_gids()
2515 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { in rdma_read_gids()
2517 rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); in rdma_read_gids()
2519 rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); in rdma_read_gids()
2522 rdma_addr_get_sgid(&addr->dev_addr, sgid); in rdma_read_gids()
2524 rdma_addr_get_dgid(&addr->dev_addr, dgid); in rdma_read_gids()
2531 struct rdma_id_private *id_priv = iw_id->context; in cma_iw_handler()
2534 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; in cma_iw_handler()
2535 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; in cma_iw_handler()
2537 mutex_lock(&id_priv->handler_mutex); in cma_iw_handler()
2538 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in cma_iw_handler()
2541 switch (iw_event->event) { in cma_iw_handler()
2550 switch (iw_event->status) { in cma_iw_handler()
2553 event.param.conn.initiator_depth = iw_event->ird; in cma_iw_handler()
2554 event.param.conn.responder_resources = iw_event->ord; in cma_iw_handler()
2556 case -ECONNRESET: in cma_iw_handler()
2557 case -ECONNREFUSED: in cma_iw_handler()
2560 case -ETIMEDOUT: in cma_iw_handler()
2570 event.param.conn.initiator_depth = iw_event->ird; in cma_iw_handler()
2571 event.param.conn.responder_resources = iw_event->ord; in cma_iw_handler()
2577 event.status = iw_event->status; in cma_iw_handler()
2578 event.param.conn.private_data = iw_event->private_data; in cma_iw_handler()
2579 event.param.conn.private_data_len = iw_event->private_data_len; in cma_iw_handler()
2582 /* Destroy the CM ID by returning a non-zero value. */ in cma_iw_handler()
2583 id_priv->cm_id.iw = NULL; in cma_iw_handler()
2589 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
2598 int ret = -ECONNABORTED; in iw_conn_req_handler()
2599 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; in iw_conn_req_handler()
2600 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; in iw_conn_req_handler()
2603 event.param.conn.private_data = iw_event->private_data; in iw_conn_req_handler()
2604 event.param.conn.private_data_len = iw_event->private_data_len; in iw_conn_req_handler()
2605 event.param.conn.initiator_depth = iw_event->ird; in iw_conn_req_handler()
2606 event.param.conn.responder_resources = iw_event->ord; in iw_conn_req_handler()
2608 listen_id = cm_id->context; in iw_conn_req_handler()
2610 mutex_lock(&listen_id->handler_mutex); in iw_conn_req_handler()
2611 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) in iw_conn_req_handler()
2615 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, in iw_conn_req_handler()
2616 listen_id->id.event_handler, in iw_conn_req_handler()
2617 listen_id->id.context, RDMA_PS_TCP, in iw_conn_req_handler()
2620 ret = -ENOMEM; in iw_conn_req_handler()
2623 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); in iw_conn_req_handler()
2624 conn_id->state = RDMA_CM_CONNECT; in iw_conn_req_handler()
2626 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); in iw_conn_req_handler()
2628 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2635 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2640 conn_id->cm_id.iw = cm_id; in iw_conn_req_handler()
2641 cm_id->context = conn_id; in iw_conn_req_handler()
2642 cm_id->cm_handler = cma_iw_handler; in iw_conn_req_handler()
2650 conn_id->cm_id.iw = NULL; in iw_conn_req_handler()
2651 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2656 mutex_unlock(&conn_id->handler_mutex); in iw_conn_req_handler()
2659 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2670 svc_id = rdma_get_service_id(&id_priv->id, addr); in cma_ib_listen()
2671 id = ib_cm_insert_listen(id_priv->id.device, in cma_ib_listen()
2675 id_priv->cm_id.ib = id; in cma_ib_listen()
2685 id = iw_create_cm_id(id_priv->id.device, in cma_iw_listen()
2691 mutex_lock(&id_priv->qp_mutex); in cma_iw_listen()
2692 id->tos = id_priv->tos; in cma_iw_listen()
2693 id->tos_set = id_priv->tos_set; in cma_iw_listen()
2694 mutex_unlock(&id_priv->qp_mutex); in cma_iw_listen()
2695 id->afonly = id_priv->afonly; in cma_iw_listen()
2696 id_priv->cm_id.iw = id; in cma_iw_listen()
2698 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), in cma_iw_listen()
2701 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen()
2704 iw_destroy_cm_id(id_priv->cm_id.iw); in cma_iw_listen()
2705 id_priv->cm_id.iw = NULL; in cma_iw_listen()
2714 struct rdma_id_private *id_priv = id->context; in cma_listen_handler()
2717 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) in cma_listen_handler()
2718 return -1; in cma_listen_handler()
2720 id->context = id_priv->id.context; in cma_listen_handler()
2721 id->event_handler = id_priv->id.event_handler; in cma_listen_handler()
2723 return id_priv->id.event_handler(id, event); in cma_listen_handler()
2731 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_listen_on_dev()
2737 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) in cma_listen_on_dev()
2742 id_priv->id.ps, id_priv->id.qp_type, id_priv); in cma_listen_on_dev()
2746 dev_id_priv->state = RDMA_CM_ADDR_BOUND; in cma_listen_on_dev()
2751 rdma_restrack_add(&dev_id_priv->res); in cma_listen_on_dev()
2753 dev_id_priv->internal_id = 1; in cma_listen_on_dev()
2754 dev_id_priv->afonly = id_priv->afonly; in cma_listen_on_dev()
2755 mutex_lock(&id_priv->qp_mutex); in cma_listen_on_dev()
2756 dev_id_priv->tos_set = id_priv->tos_set; in cma_listen_on_dev()
2757 dev_id_priv->tos = id_priv->tos; in cma_listen_on_dev()
2758 mutex_unlock(&id_priv->qp_mutex); in cma_listen_on_dev()
2760 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); in cma_listen_on_dev()
2763 list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); in cma_listen_on_dev()
2768 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); in cma_listen_on_dev()
2779 list_add_tail(&id_priv->listen_any_item, &listen_any_list); in cma_listen_on_all()
2785 list_del_init(&to_destroy->device_item); in cma_listen_on_all()
2796 rdma_destroy_id(&to_destroy->id); in cma_listen_on_all()
2805 mutex_lock(&id_priv->qp_mutex); in rdma_set_service_type()
2806 id_priv->tos = (u8) tos; in rdma_set_service_type()
2807 id_priv->tos_set = true; in rdma_set_service_type()
2808 mutex_unlock(&id_priv->qp_mutex); in rdma_set_service_type()
2813 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2831 if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) in rdma_set_ack_timeout()
2832 return -EINVAL; in rdma_set_ack_timeout()
2835 mutex_lock(&id_priv->qp_mutex); in rdma_set_ack_timeout()
2836 id_priv->timeout = timeout; in rdma_set_ack_timeout()
2837 id_priv->timeout_set = true; in rdma_set_ack_timeout()
2838 mutex_unlock(&id_priv->qp_mutex); in rdma_set_ack_timeout()
2845 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2848 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2866 /* It is a five-bit value */ in rdma_set_min_rnr_timer()
2868 return -EINVAL; in rdma_set_min_rnr_timer()
2870 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) in rdma_set_min_rnr_timer()
2871 return -EINVAL; in rdma_set_min_rnr_timer()
2874 mutex_lock(&id_priv->qp_mutex); in rdma_set_min_rnr_timer()
2875 id_priv->min_rnr_timer = min_rnr_timer; in rdma_set_min_rnr_timer()
2876 id_priv->min_rnr_timer_set = true; in rdma_set_min_rnr_timer()
2877 mutex_unlock(&id_priv->qp_mutex); in rdma_set_min_rnr_timer()
2886 struct rdma_route *route = &work->id->id.route; in route_set_path_rec_inbound()
2888 if (!route->path_rec_inbound) { in route_set_path_rec_inbound()
2889 route->path_rec_inbound = in route_set_path_rec_inbound()
2890 kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); in route_set_path_rec_inbound()
2891 if (!route->path_rec_inbound) in route_set_path_rec_inbound()
2892 return -ENOMEM; in route_set_path_rec_inbound()
2895 *route->path_rec_inbound = *path_rec; in route_set_path_rec_inbound()
2902 struct rdma_route *route = &work->id->id.route; in route_set_path_rec_outbound()
2904 if (!route->path_rec_outbound) { in route_set_path_rec_outbound()
2905 route->path_rec_outbound = in route_set_path_rec_outbound()
2906 kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); in route_set_path_rec_outbound()
2907 if (!route->path_rec_outbound) in route_set_path_rec_outbound()
2908 return -ENOMEM; in route_set_path_rec_outbound()
2911 *route->path_rec_outbound = *path_rec; in route_set_path_rec_outbound()
2922 route = &work->id->id.route; in cma_query_handler()
2929 *route->path_rec = path_rec[i]; in cma_query_handler()
2936 status = -EINVAL; in cma_query_handler()
2942 route->num_pri_alt_paths = 1; in cma_query_handler()
2943 queue_work(cma_wq, &work->work); in cma_query_handler()
2947 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_query_handler()
2948 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_query_handler()
2949 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; in cma_query_handler()
2950 work->event.status = status; in cma_query_handler()
2953 queue_work(cma_wq, &work->work); in cma_query_handler()
2959 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_query_ib_route()
2967 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) in cma_query_ib_route()
2976 path_rec.service_id = rdma_get_service_id(&id_priv->id, in cma_query_ib_route()
2985 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); in cma_query_ib_route()
2990 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); in cma_query_ib_route()
2995 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); in cma_query_ib_route()
3000 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, in cma_query_ib_route()
3001 id_priv->id.port_num, &path_rec, in cma_query_ib_route()
3004 work, &id_priv->query); in cma_query_ib_route()
3006 return (id_priv->query_id < 0) ? id_priv->query_id : 0; in cma_query_ib_route()
3011 struct cma_multicast *mc = in cma_iboe_join_work_handler() local
3013 struct rdma_cm_event *event = &mc->iboe_join.event; in cma_iboe_join_work_handler()
3014 struct rdma_id_private *id_priv = mc->id_priv; in cma_iboe_join_work_handler()
3017 mutex_lock(&id_priv->handler_mutex); in cma_iboe_join_work_handler()
3018 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_iboe_join_work_handler()
3019 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_iboe_join_work_handler()
3026 mutex_unlock(&id_priv->handler_mutex); in cma_iboe_join_work_handler()
3027 if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) in cma_iboe_join_work_handler()
3028 rdma_destroy_ah_attr(&event->param.ud.ah_attr); in cma_iboe_join_work_handler()
3034 struct rdma_id_private *id_priv = work->id; in cma_work_handler()
3036 mutex_lock(&id_priv->handler_mutex); in cma_work_handler()
3037 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_work_handler()
3038 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_work_handler()
3040 if (work->old_state != 0 || work->new_state != 0) { in cma_work_handler()
3041 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
3045 if (cma_cm_event_handler(id_priv, &work->event)) { in cma_work_handler()
3052 mutex_unlock(&id_priv->handler_mutex); in cma_work_handler()
3055 if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) in cma_work_handler()
3056 rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); in cma_work_handler()
3063 work->id = id_priv; in cma_init_resolve_route_work()
3064 INIT_WORK(&work->work, cma_work_handler); in cma_init_resolve_route_work()
3065 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_init_resolve_route_work()
3066 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_init_resolve_route_work()
3067 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_init_resolve_route_work()
3076 work->id = id_priv; in enqueue_resolve_addr_work()
3077 INIT_WORK(&work->work, cma_work_handler); in enqueue_resolve_addr_work()
3078 work->old_state = RDMA_CM_ADDR_QUERY; in enqueue_resolve_addr_work()
3079 work->new_state = RDMA_CM_ADDR_RESOLVED; in enqueue_resolve_addr_work()
3080 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; in enqueue_resolve_addr_work()
3082 queue_work(cma_wq, &work->work); in enqueue_resolve_addr_work()
3088 struct rdma_route *route = &id_priv->id.route; in cma_resolve_ib_route()
3094 return -ENOMEM; in cma_resolve_ib_route()
3098 if (!route->path_rec) in cma_resolve_ib_route()
3099 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); in cma_resolve_ib_route()
3100 if (!route->path_rec) { in cma_resolve_ib_route()
3101 ret = -ENOMEM; in cma_resolve_ib_route()
3111 kfree(route->path_rec); in cma_resolve_ib_route()
3112 route->path_rec = NULL; in cma_resolve_ib_route()
3140 struct rdma_route *route = &id_priv->id.route; in cma_iboe_set_path_rec_l2_fields()
3142 struct rdma_addr *addr = &route->addr; in cma_iboe_set_path_rec_l2_fields()
3146 if (!addr->dev_addr.bound_dev_if) in cma_iboe_set_path_rec_l2_fields()
3149 ndev = dev_get_by_index(addr->dev_addr.net, in cma_iboe_set_path_rec_l2_fields()
3150 addr->dev_addr.bound_dev_if); in cma_iboe_set_path_rec_l2_fields()
3154 supported_gids = roce_gid_type_mask_support(id_priv->id.device, in cma_iboe_set_path_rec_l2_fields()
3155 id_priv->id.port_num); in cma_iboe_set_path_rec_l2_fields()
3156 gid_type = cma_route_gid_type(addr->dev_addr.network, in cma_iboe_set_path_rec_l2_fields()
3158 id_priv->gid_type); in cma_iboe_set_path_rec_l2_fields()
3160 if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) in cma_iboe_set_path_rec_l2_fields()
3161 gid_type = ib_network_to_gid_type(addr->dev_addr.network); in cma_iboe_set_path_rec_l2_fields()
3162 route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); in cma_iboe_set_path_rec_l2_fields()
3164 route->path_rec->roce.route_resolved = true; in cma_iboe_set_path_rec_l2_fields()
3165 sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); in cma_iboe_set_path_rec_l2_fields()
3179 return -EINVAL; in rdma_set_ib_path()
3181 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), in rdma_set_ib_path()
3183 if (!id->route.path_rec) { in rdma_set_ib_path()
3184 ret = -ENOMEM; in rdma_set_ib_path()
3188 if (rdma_protocol_roce(id->device, id->port_num)) { in rdma_set_ib_path()
3191 ret = -ENODEV; in rdma_set_ib_path()
3197 id->route.num_pri_alt_paths = 1; in rdma_set_ib_path()
3201 kfree(id->route.path_rec); in rdma_set_ib_path()
3202 id->route.path_rec = NULL; in rdma_set_ib_path()
3215 return -ENOMEM; in cma_resolve_iw_route()
3218 queue_work(cma_wq, &work->work); in cma_resolve_iw_route()
3227 if (dev->num_tc) in get_vlan_ndev_tc()
3243 struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; in get_lower_vlan_dev_tc()
3246 map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); in get_lower_vlan_dev_tc()
3247 else if (dev->num_tc) in get_lower_vlan_dev_tc()
3248 map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); in get_lower_vlan_dev_tc()
3250 map->output_tc = 0; in get_lower_vlan_dev_tc()
3254 map->found = true; in get_lower_vlan_dev_tc()
3280 else if (ndev->num_tc) in iboe_tos_to_sl()
3293 fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; in cma_get_roce_udp_flow_label()
3306 struct rdma_route *route = &id_priv->id.route; in cma_resolve_iboe_route()
3307 struct rdma_addr *addr = &route->addr; in cma_resolve_iboe_route()
3312 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - in cma_resolve_iboe_route()
3313 rdma_start_port(id_priv->cma_dev->device)]; in cma_resolve_iboe_route()
3316 mutex_lock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3317 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; in cma_resolve_iboe_route()
3318 mutex_unlock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3322 return -ENOMEM; in cma_resolve_iboe_route()
3324 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); in cma_resolve_iboe_route()
3325 if (!route->path_rec) { in cma_resolve_iboe_route()
3326 ret = -ENOMEM; in cma_resolve_iboe_route()
3330 route->num_pri_alt_paths = 1; in cma_resolve_iboe_route()
3334 ret = -ENODEV; in cma_resolve_iboe_route()
3338 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_resolve_iboe_route()
3339 &route->path_rec->sgid); in cma_resolve_iboe_route()
3340 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, in cma_resolve_iboe_route()
3341 &route->path_rec->dgid); in cma_resolve_iboe_route()
3343 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) in cma_resolve_iboe_route()
3345 route->path_rec->hop_limit = addr->dev_addr.hoplimit; in cma_resolve_iboe_route()
3347 route->path_rec->hop_limit = 1; in cma_resolve_iboe_route()
3348 route->path_rec->reversible = 1; in cma_resolve_iboe_route()
3349 route->path_rec->pkey = cpu_to_be16(0xffff); in cma_resolve_iboe_route()
3350 route->path_rec->mtu_selector = IB_SA_EQ; in cma_resolve_iboe_route()
3351 route->path_rec->sl = iboe_tos_to_sl(ndev, tos); in cma_resolve_iboe_route()
3352 route->path_rec->traffic_class = tos; in cma_resolve_iboe_route()
3353 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); in cma_resolve_iboe_route()
3354 route->path_rec->rate_selector = IB_SA_EQ; in cma_resolve_iboe_route()
3355 route->path_rec->rate = IB_RATE_PORT_CURRENT; in cma_resolve_iboe_route()
3357 route->path_rec->packet_life_time_selector = IB_SA_EQ; in cma_resolve_iboe_route()
3365 mutex_lock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3366 if (id_priv->timeout_set && id_priv->timeout) in cma_resolve_iboe_route()
3367 route->path_rec->packet_life_time = id_priv->timeout - 1; in cma_resolve_iboe_route()
3369 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; in cma_resolve_iboe_route()
3370 mutex_unlock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3372 if (!route->path_rec->mtu) { in cma_resolve_iboe_route()
3373 ret = -EINVAL; in cma_resolve_iboe_route()
3377 if (rdma_protocol_roce_udp_encap(id_priv->id.device, in cma_resolve_iboe_route()
3378 id_priv->id.port_num)) in cma_resolve_iboe_route()
3379 route->path_rec->flow_label = in cma_resolve_iboe_route()
3383 queue_work(cma_wq, &work->work); in cma_resolve_iboe_route()
3388 kfree(route->path_rec); in cma_resolve_iboe_route()
3389 route->path_rec = NULL; in cma_resolve_iboe_route()
3390 route->num_pri_alt_paths = 0; in cma_resolve_iboe_route()
3402 return -EINVAL; in rdma_resolve_route()
3406 return -EINVAL; in rdma_resolve_route()
3409 if (rdma_cap_ib_sa(id->device, id->port_num)) in rdma_resolve_route()
3411 else if (rdma_protocol_roce(id->device, id->port_num)) { in rdma_resolve_route()
3416 else if (rdma_protocol_iwarp(id->device, id->port_num)) in rdma_resolve_route()
3419 ret = -ENOSYS; in rdma_resolve_route()
3434 switch (addr->sa_family) { in cma_set_loopback()
3436 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); in cma_set_loopback()
3439 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, in cma_set_loopback()
3443 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, in cma_set_loopback()
3462 !rdma_cap_ib_cm(cur_dev->device, 1)) in cma_bind_loopback()
3468 rdma_for_each_port (cur_dev->device, p) { in cma_bind_loopback()
3469 if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && in cma_bind_loopback()
3478 ret = -ENODEV; in cma_bind_loopback()
3485 ret = rdma_query_gid(cma_dev->device, p, 0, &gid); in cma_bind_loopback()
3489 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); in cma_bind_loopback()
3493 id_priv->id.route.addr.dev_addr.dev_type = in cma_bind_loopback()
3494 (rdma_protocol_ib(cma_dev->device, p)) ? in cma_bind_loopback()
3497 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_bind_loopback()
3498 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); in cma_bind_loopback()
3499 id_priv->id.port_num = p; in cma_bind_loopback()
3501 rdma_restrack_add(&id_priv->res); in cma_bind_loopback()
3516 mutex_lock(&id_priv->handler_mutex); in addr_handler()
3529 if (!status && !id_priv->cma_dev) { in addr_handler()
3534 rdma_restrack_add(&id_priv->res); in addr_handler()
3555 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
3566 return -ENOMEM; in cma_resolve_loopback()
3568 if (!id_priv->cma_dev) { in cma_resolve_loopback()
3574 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
3575 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
3591 return -ENOMEM; in cma_resolve_ib_addr()
3593 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
3599 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) in cma_resolve_ib_addr()
3600 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); in cma_resolve_ib_addr()
3616 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_reuseaddr()
3617 if ((reuse && id_priv->state != RDMA_CM_LISTEN) || in rdma_set_reuseaddr()
3618 id_priv->state == RDMA_CM_IDLE) { in rdma_set_reuseaddr()
3619 id_priv->reuseaddr = reuse; in rdma_set_reuseaddr()
3622 ret = -EINVAL; in rdma_set_reuseaddr()
3624 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_reuseaddr()
3636 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_afonly()
3637 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { in rdma_set_afonly()
3638 id_priv->options |= (1 << CMA_OPTION_AFONLY); in rdma_set_afonly()
3639 id_priv->afonly = afonly; in rdma_set_afonly()
3642 ret = -EINVAL; in rdma_set_afonly()
3644 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_afonly()
3654 u64 sid, mask; in cma_bind_port() local
3660 port = htons(bind_list->port); in cma_bind_port()
3662 switch (addr->sa_family) { in cma_bind_port()
3664 ((struct sockaddr_in *) addr)->sin_port = port; in cma_bind_port()
3667 ((struct sockaddr_in6 *) addr)->sin6_port = port; in cma_bind_port()
3671 sid = be64_to_cpu(sib->sib_sid); in cma_bind_port()
3672 mask = be64_to_cpu(sib->sib_sid_mask); in cma_bind_port()
3673 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); in cma_bind_port()
3674 sib->sib_sid_mask = cpu_to_be64(~0ULL); in cma_bind_port()
3677 id_priv->bind_list = bind_list; in cma_bind_port()
3678 hlist_add_head(&id_priv->node, &bind_list->owners); in cma_bind_port()
3691 return -ENOMEM; in cma_alloc_port()
3693 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, in cma_alloc_port()
3698 bind_list->ps = ps; in cma_alloc_port()
3699 bind_list->port = snum; in cma_alloc_port()
3704 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; in cma_alloc_port()
3717 hlist_for_each_entry(cur_id, &bind_list->owners, node) { in cma_port_is_unique()
3725 /* different dest port -> unique */ in cma_port_is_unique()
3731 /* different src address -> unique */ in cma_port_is_unique()
3737 /* different dst address -> unique */ in cma_port_is_unique()
3743 return -EADDRNOTAVAIL; in cma_port_is_unique()
3754 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_alloc_any_port()
3759 remaining = (high - low) + 1; in cma_alloc_any_port()
3760 rover = get_random_u32_inclusive(low, remaining + low - 1); in cma_alloc_any_port()
3777 * re-using same port immediately after it is closed. in cma_alloc_any_port()
3781 if (ret != -EADDRNOTAVAIL) in cma_alloc_any_port()
3784 if (--remaining) { in cma_alloc_any_port()
3790 return -EADDRNOTAVAIL; in cma_alloc_any_port()
3808 hlist_for_each_entry(cur_id, &bind_list->owners, node) { in cma_check_port()
3812 if (reuseaddr && cur_id->reuseaddr) in cma_check_port()
3816 if (id_priv->afonly && cur_id->afonly && in cma_check_port()
3817 (addr->sa_family != cur_addr->sa_family)) in cma_check_port()
3821 return -EADDRNOTAVAIL; in cma_check_port()
3824 return -EADDRINUSE; in cma_check_port()
3840 return -EACCES; in cma_use_port()
3842 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); in cma_use_port()
3846 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); in cma_use_port()
3856 switch (id_priv->id.ps) { in cma_select_inet_ps()
3861 return id_priv->id.ps; in cma_select_inet_ps()
3873 u64 sid_ps, mask, sid; in cma_select_ib_ps() local
3876 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; in cma_select_ib_ps()
3877 sid = be64_to_cpu(sib->sib_sid) & mask; in cma_select_ib_ps()
3879 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { in cma_select_ib_ps()
3882 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && in cma_select_ib_ps()
3883 (sid == (RDMA_IB_IP_PS_TCP & mask))) { in cma_select_ib_ps()
3886 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && in cma_select_ib_ps()
3887 (sid == (RDMA_IB_IP_PS_UDP & mask))) { in cma_select_ib_ps()
3893 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); in cma_select_ib_ps()
3894 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | in cma_select_ib_ps()
3895 be64_to_cpu(sib->sib_sid_mask)); in cma_select_ib_ps()
3910 return -EPROTONOSUPPORT; in cma_get_port()
3928 if (addr->sa_family != AF_INET6) in cma_check_linklocal()
3933 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) in cma_check_linklocal()
3936 if (!sin6->sin6_scope_id) in cma_check_linklocal()
3937 return -EINVAL; in cma_check_linklocal()
3939 dev_addr->bound_dev_if = sin6->sin6_scope_id; in cma_check_linklocal()
3962 return -EINVAL; in rdma_listen()
3969 if (id_priv->reuseaddr) { in rdma_listen()
3971 ret = cma_check_port(id_priv->bind_list, id_priv, 0); in rdma_listen()
3973 id_priv->reuseaddr = 0; in rdma_listen()
3979 id_priv->backlog = backlog; in rdma_listen()
3980 if (id_priv->cma_dev) { in rdma_listen()
3981 if (rdma_cap_ib_cm(id->device, 1)) { in rdma_listen()
3985 } else if (rdma_cap_iw_cm(id->device, 1)) { in rdma_listen()
3990 ret = -ENOSYS; in rdma_listen()
4001 id_priv->backlog = 0; in rdma_listen()
4017 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && in rdma_bind_addr_dst()
4018 addr->sa_family != AF_IB) in rdma_bind_addr_dst()
4019 return -EAFNOSUPPORT; in rdma_bind_addr_dst()
4022 return -EINVAL; in rdma_bind_addr_dst()
4024 ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); in rdma_bind_addr_dst()
4030 ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); in rdma_bind_addr_dst()
4039 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { in rdma_bind_addr_dst()
4040 if (addr->sa_family == AF_INET) in rdma_bind_addr_dst()
4041 id_priv->afonly = 1; in rdma_bind_addr_dst()
4043 else if (addr->sa_family == AF_INET6) { in rdma_bind_addr_dst()
4044 struct net *net = id_priv->id.route.addr.dev_addr.net; in rdma_bind_addr_dst()
4046 id_priv->afonly = net->ipv6.sysctl.bindv6only; in rdma_bind_addr_dst()
4053 id_daddr->sa_family = addr->sa_family; in rdma_bind_addr_dst()
4060 rdma_restrack_add(&id_priv->res); in rdma_bind_addr_dst()
4063 if (id_priv->cma_dev) in rdma_bind_addr_dst()
4077 if (src_addr && src_addr->sa_family) in cma_bind_addr()
4083 zero_sock.ss_family = dst_addr->sa_family; in cma_bind_addr()
4084 if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { in cma_bind_addr()
4090 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; in cma_bind_addr()
4091 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) in cma_bind_addr()
4092 id->route.addr.dev_addr.bound_dev_if = in cma_bind_addr()
4093 dst_addr6->sin6_scope_id; in cma_bind_addr()
4094 } else if (dst_addr->sa_family == AF_IB) { in cma_bind_addr()
4095 ((struct sockaddr_ib *)&zero_sock)->sib_pkey = in cma_bind_addr()
4096 ((struct sockaddr_ib *)dst_addr)->sib_pkey; in cma_bind_addr()
4104 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
4115 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); in resolve_prepare_src()
4120 return -EINVAL; in resolve_prepare_src()
4126 if (cma_family(id_priv) != dst_addr->sa_family) { in resolve_prepare_src()
4127 ret = -EINVAL; in resolve_prepare_src()
4151 if (dst_addr->sa_family == AF_IB) { in rdma_resolve_addr()
4165 if (id_priv->used_resolve_ip) in rdma_resolve_addr()
4166 rdma_addr_cancel(&id->route.addr.dev_addr); in rdma_resolve_addr()
4168 id_priv->used_resolve_ip = 1; in rdma_resolve_addr()
4170 &id->route.addr.dev_addr, in rdma_resolve_addr()
4199 cma_hdr->cma_version = CMA_VERSION; in cma_format_hdr()
4207 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; in cma_format_hdr()
4208 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; in cma_format_hdr()
4209 cma_hdr->port = src4->sin_port; in cma_format_hdr()
4217 cma_hdr->src_addr.ip6 = src6->sin6_addr; in cma_format_hdr()
4218 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; in cma_format_hdr()
4219 cma_hdr->port = src6->sin6_port; in cma_format_hdr()
4227 struct rdma_id_private *id_priv = cm_id->context; in cma_sidr_rep_handler()
4230 &ib_event->param.sidr_rep_rcvd; in cma_sidr_rep_handler()
4233 mutex_lock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
4234 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in cma_sidr_rep_handler()
4237 switch (ib_event->event) { in cma_sidr_rep_handler()
4240 event.status = -ETIMEDOUT; in cma_sidr_rep_handler()
4243 event.param.ud.private_data = ib_event->private_data; in cma_sidr_rep_handler()
4245 if (rep->status != IB_SIDR_SUCCESS) { in cma_sidr_rep_handler()
4247 event.status = ib_event->param.sidr_rep_rcvd.status; in cma_sidr_rep_handler()
4252 ret = cma_set_qkey(id_priv, rep->qkey); in cma_sidr_rep_handler()
4259 ib_init_ah_attr_from_path(id_priv->id.device, in cma_sidr_rep_handler()
4260 id_priv->id.port_num, in cma_sidr_rep_handler()
4261 id_priv->id.route.path_rec, in cma_sidr_rep_handler()
4263 rep->sgid_attr); in cma_sidr_rep_handler()
4264 event.param.ud.qp_num = rep->qpn; in cma_sidr_rep_handler()
4265 event.param.ud.qkey = rep->qkey; in cma_sidr_rep_handler()
4271 ib_event->event); in cma_sidr_rep_handler()
4279 /* Destroy the CM ID by returning a non-zero value. */ in cma_sidr_rep_handler()
4280 id_priv->cm_id.ib = NULL; in cma_sidr_rep_handler()
4285 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
4300 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) in cma_resolve_ib_udp()
4301 return -EINVAL; in cma_resolve_ib_udp()
4306 return -ENOMEM; in cma_resolve_ib_udp()
4311 if (conn_param->private_data && conn_param->private_data_len) in cma_resolve_ib_udp()
4312 memcpy(private_data + offset, conn_param->private_data, in cma_resolve_ib_udp()
4313 conn_param->private_data_len); in cma_resolve_ib_udp()
4322 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, in cma_resolve_ib_udp()
4328 id_priv->cm_id.ib = id; in cma_resolve_ib_udp()
4330 req.path = id_priv->id.route.path_rec; in cma_resolve_ib_udp()
4331 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_resolve_ib_udp()
4332 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_resolve_ib_udp()
4333 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); in cma_resolve_ib_udp()
4337 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); in cma_resolve_ib_udp()
4339 ib_destroy_cm_id(id_priv->cm_id.ib); in cma_resolve_ib_udp()
4340 id_priv->cm_id.ib = NULL; in cma_resolve_ib_udp()
4359 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) in cma_connect_ib()
4360 return -EINVAL; in cma_connect_ib()
4365 return -ENOMEM; in cma_connect_ib()
4370 if (conn_param->private_data && conn_param->private_data_len) in cma_connect_ib()
4371 memcpy(private_data + offset, conn_param->private_data, in cma_connect_ib()
4372 conn_param->private_data_len); in cma_connect_ib()
4374 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); in cma_connect_ib()
4379 id_priv->cm_id.ib = id; in cma_connect_ib()
4381 route = &id_priv->id.route; in cma_connect_ib()
4389 req.primary_path = &route->path_rec[0]; in cma_connect_ib()
4390 req.primary_path_inbound = route->path_rec_inbound; in cma_connect_ib()
4391 req.primary_path_outbound = route->path_rec_outbound; in cma_connect_ib()
4392 if (route->num_pri_alt_paths == 2) in cma_connect_ib()
4393 req.alternate_path = &route->path_rec[1]; in cma_connect_ib()
4395 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_connect_ib()
4397 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_connect_ib()
4398 req.qp_num = id_priv->qp_num; in cma_connect_ib()
4399 req.qp_type = id_priv->id.qp_type; in cma_connect_ib()
4400 req.starting_psn = id_priv->seq_num; in cma_connect_ib()
4401 req.responder_resources = conn_param->responder_resources; in cma_connect_ib()
4402 req.initiator_depth = conn_param->initiator_depth; in cma_connect_ib()
4403 req.flow_control = conn_param->flow_control; in cma_connect_ib()
4404 req.retry_count = min_t(u8, 7, conn_param->retry_count); in cma_connect_ib()
4405 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); in cma_connect_ib()
4409 req.srq = id_priv->srq ? 1 : 0; in cma_connect_ib()
4410 req.ece.vendor_id = id_priv->ece.vendor_id; in cma_connect_ib()
4411 req.ece.attr_mod = id_priv->ece.attr_mod; in cma_connect_ib()
4414 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); in cma_connect_ib()
4418 id_priv->cm_id.ib = NULL; in cma_connect_ib()
4432 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); in cma_connect_iw()
4436 mutex_lock(&id_priv->qp_mutex); in cma_connect_iw()
4437 cm_id->tos = id_priv->tos; in cma_connect_iw()
4438 cm_id->tos_set = id_priv->tos_set; in cma_connect_iw()
4439 mutex_unlock(&id_priv->qp_mutex); in cma_connect_iw()
4441 id_priv->cm_id.iw = cm_id; in cma_connect_iw()
4443 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), in cma_connect_iw()
4445 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), in cma_connect_iw()
4453 iw_param.ord = conn_param->initiator_depth; in cma_connect_iw()
4454 iw_param.ird = conn_param->responder_resources; in cma_connect_iw()
4455 iw_param.private_data = conn_param->private_data; in cma_connect_iw()
4456 iw_param.private_data_len = conn_param->private_data_len; in cma_connect_iw()
4457 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
4460 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
4466 id_priv->cm_id.iw = NULL; in cma_connect_iw()
4472 * rdma_connect_locked - Initiate an active connection request.
4487 return -EINVAL; in rdma_connect_locked()
4489 if (!id->qp) { in rdma_connect_locked()
4490 id_priv->qp_num = conn_param->qp_num; in rdma_connect_locked()
4491 id_priv->srq = conn_param->srq; in rdma_connect_locked()
4494 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_connect_locked()
4495 if (id->qp_type == IB_QPT_UD) in rdma_connect_locked()
4499 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_connect_locked()
4502 ret = -ENOSYS; in rdma_connect_locked()
4514 * rdma_connect - Initiate an active connection request.
4531 mutex_lock(&id_priv->handler_mutex); in rdma_connect()
4533 mutex_unlock(&id_priv->handler_mutex); in rdma_connect()
4539 * rdma_connect_ece - Initiate an active connection request with ECE data.
4552 id_priv->ece.vendor_id = ece->vendor_id; in rdma_connect_ece()
4553 id_priv->ece.attr_mod = ece->attr_mod; in rdma_connect_ece()
4574 rep.qp_num = id_priv->qp_num; in cma_accept_ib()
4575 rep.starting_psn = id_priv->seq_num; in cma_accept_ib()
4576 rep.private_data = conn_param->private_data; in cma_accept_ib()
4577 rep.private_data_len = conn_param->private_data_len; in cma_accept_ib()
4578 rep.responder_resources = conn_param->responder_resources; in cma_accept_ib()
4579 rep.initiator_depth = conn_param->initiator_depth; in cma_accept_ib()
4581 rep.flow_control = conn_param->flow_control; in cma_accept_ib()
4582 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); in cma_accept_ib()
4583 rep.srq = id_priv->srq ? 1 : 0; in cma_accept_ib()
4584 rep.ece.vendor_id = id_priv->ece.vendor_id; in cma_accept_ib()
4585 rep.ece.attr_mod = id_priv->ece.attr_mod; in cma_accept_ib()
4588 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); in cma_accept_ib()
4600 return -EINVAL; in cma_accept_iw()
4606 iw_param.ord = conn_param->initiator_depth; in cma_accept_iw()
4607 iw_param.ird = conn_param->responder_resources; in cma_accept_iw()
4608 iw_param.private_data = conn_param->private_data; in cma_accept_iw()
4609 iw_param.private_data_len = conn_param->private_data_len; in cma_accept_iw()
4610 if (id_priv->id.qp) in cma_accept_iw()
4611 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
4613 iw_param.qpn = conn_param->qp_num; in cma_accept_iw()
4615 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); in cma_accept_iw()
4634 rep.qp_num = id_priv->qp_num; in cma_send_sidr_rep()
4635 rep.qkey = id_priv->qkey; in cma_send_sidr_rep()
4637 rep.ece.vendor_id = id_priv->ece.vendor_id; in cma_send_sidr_rep()
4638 rep.ece.attr_mod = id_priv->ece.attr_mod; in cma_send_sidr_rep()
4645 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); in cma_send_sidr_rep()
4649 * rdma_accept - Called to accept a connection request or response.
4672 lockdep_assert_held(&id_priv->handler_mutex); in rdma_accept()
4674 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in rdma_accept()
4675 return -EINVAL; in rdma_accept()
4677 if (!id->qp && conn_param) { in rdma_accept()
4678 id_priv->qp_num = conn_param->qp_num; in rdma_accept()
4679 id_priv->srq = conn_param->srq; in rdma_accept()
4682 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_accept()
4683 if (id->qp_type == IB_QPT_UD) { in rdma_accept()
4686 conn_param->qkey, in rdma_accept()
4687 conn_param->private_data, in rdma_accept()
4688 conn_param->private_data_len); in rdma_accept()
4698 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_accept()
4701 ret = -ENOSYS; in rdma_accept()
4720 id_priv->ece.vendor_id = ece->vendor_id; in rdma_accept_ece()
4721 id_priv->ece.attr_mod = ece->attr_mod; in rdma_accept_ece()
4732 mutex_lock(&id_priv->handler_mutex); in rdma_lock_handler()
4741 mutex_unlock(&id_priv->handler_mutex); in rdma_unlock_handler()
4751 if (!id_priv->cm_id.ib) in rdma_notify()
4752 return -EINVAL; in rdma_notify()
4754 switch (id->device->node_type) { in rdma_notify()
4756 ret = ib_cm_notify(id_priv->cm_id.ib, event); in rdma_notify()
4773 if (!id_priv->cm_id.ib) in rdma_reject()
4774 return -EINVAL; in rdma_reject()
4776 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_reject()
4777 if (id->qp_type == IB_QPT_UD) { in rdma_reject()
4782 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, in rdma_reject()
4785 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_reject()
4786 ret = iw_cm_reject(id_priv->cm_id.iw, in rdma_reject()
4789 ret = -ENOSYS; in rdma_reject()
4802 if (!id_priv->cm_id.ib) in rdma_disconnect()
4803 return -EINVAL; in rdma_disconnect()
4805 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_disconnect()
4811 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { in rdma_disconnect()
4812 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) in rdma_disconnect()
4817 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_disconnect()
4818 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); in rdma_disconnect()
4820 ret = -EINVAL; in rdma_disconnect()
4830 struct cma_multicast *mc) in cma_make_mc_event() argument
4840 event->status = status; in cma_make_mc_event()
4841 event->param.ud.private_data = mc->context; in cma_make_mc_event()
4843 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; in cma_make_mc_event()
4847 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_make_mc_event()
4848 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); in cma_make_mc_event()
4850 id_priv->cma_dev in cma_make_mc_event()
4851 ->default_gid_type[id_priv->id.port_num - in cma_make_mc_event()
4853 id_priv->cma_dev->device)]; in cma_make_mc_event()
4855 event->event = RDMA_CM_EVENT_MULTICAST_JOIN; in cma_make_mc_event()
4856 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, in cma_make_mc_event()
4857 &multicast->rec, ndev, gid_type, in cma_make_mc_event()
4858 &event->param.ud.ah_attr)) { in cma_make_mc_event()
4859 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; in cma_make_mc_event()
4863 event->param.ud.qp_num = 0xFFFFFF; in cma_make_mc_event()
4864 event->param.ud.qkey = id_priv->qkey; in cma_make_mc_event()
4872 struct cma_multicast *mc = multicast->context; in cma_ib_mc_handler() local
4873 struct rdma_id_private *id_priv = mc->id_priv; in cma_ib_mc_handler()
4877 mutex_lock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4878 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || in cma_ib_mc_handler()
4879 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) in cma_ib_mc_handler()
4882 ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); in cma_ib_mc_handler()
4884 cma_make_mc_event(status, id_priv, multicast, &event, mc); in cma_ib_mc_handler()
4891 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4899 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_set_mgid()
4905 } else if ((addr->sa_family == AF_INET6) && in cma_set_mgid()
4906 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == in cma_set_mgid()
4909 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); in cma_set_mgid()
4910 } else if (addr->sa_family == AF_IB) { in cma_set_mgid()
4911 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); in cma_set_mgid()
4912 } else if (addr->sa_family == AF_INET6) { in cma_set_mgid()
4913 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); in cma_set_mgid()
4914 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4918 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); in cma_set_mgid()
4919 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4926 struct cma_multicast *mc) in cma_join_ib_multicast() argument
4929 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_join_ib_multicast()
4934 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, in cma_join_ib_multicast()
4939 if (!id_priv->qkey) { in cma_join_ib_multicast()
4945 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); in cma_join_ib_multicast()
4946 rec.qkey = cpu_to_be32(id_priv->qkey); in cma_join_ib_multicast()
4949 rec.join_state = mc->join_state; in cma_join_ib_multicast()
4957 if (id_priv->id.ps == RDMA_PS_IPOIB) in cma_join_ib_multicast()
4964 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, in cma_join_ib_multicast()
4965 id_priv->id.port_num, &rec, comp_mask, in cma_join_ib_multicast()
4966 GFP_KERNEL, cma_ib_mc_handler, mc); in cma_join_ib_multicast()
4967 return PTR_ERR_OR_ZERO(mc->sa_mc); in cma_join_ib_multicast()
4978 } else if (addr->sa_family == AF_INET6) { in cma_iboe_set_mgid()
4979 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); in cma_iboe_set_mgid()
4981 mgid->raw[0] = in cma_iboe_set_mgid()
4983 mgid->raw[1] = in cma_iboe_set_mgid()
4985 mgid->raw[2] = 0; in cma_iboe_set_mgid()
4986 mgid->raw[3] = 0; in cma_iboe_set_mgid()
4987 mgid->raw[4] = 0; in cma_iboe_set_mgid()
4988 mgid->raw[5] = 0; in cma_iboe_set_mgid()
4989 mgid->raw[6] = 0; in cma_iboe_set_mgid()
4990 mgid->raw[7] = 0; in cma_iboe_set_mgid()
4991 mgid->raw[8] = 0; in cma_iboe_set_mgid()
4992 mgid->raw[9] = 0; in cma_iboe_set_mgid()
4993 mgid->raw[10] = 0xff; in cma_iboe_set_mgid()
4994 mgid->raw[11] = 0xff; in cma_iboe_set_mgid()
4995 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; in cma_iboe_set_mgid()
5000 struct cma_multicast *mc) in cma_iboe_join_multicast() argument
5002 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iboe_join_multicast()
5004 struct sockaddr *addr = (struct sockaddr *)&mc->addr; in cma_iboe_join_multicast()
5010 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); in cma_iboe_join_multicast()
5013 return -EINVAL; in cma_iboe_join_multicast()
5015 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - in cma_iboe_join_multicast()
5016 rdma_start_port(id_priv->cma_dev->device)]; in cma_iboe_join_multicast()
5020 if (dev_addr->bound_dev_if) in cma_iboe_join_multicast()
5021 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); in cma_iboe_join_multicast()
5023 return -ENODEV; in cma_iboe_join_multicast()
5027 ib.rec.mtu = iboe_get_mtu(ndev->mtu); in cma_iboe_join_multicast()
5029 if (addr->sa_family == AF_INET) { in cma_iboe_join_multicast()
5039 err = -ENOTSUPP; in cma_iboe_join_multicast()
5043 return err ?: -EINVAL; in cma_iboe_join_multicast()
5045 if (!id_priv->qkey) in cma_iboe_join_multicast()
5048 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_iboe_join_multicast()
5050 INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); in cma_iboe_join_multicast()
5051 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); in cma_iboe_join_multicast()
5052 queue_work(cma_wq, &mc->iboe_join.work); in cma_iboe_join_multicast()
5061 struct cma_multicast *mc; in rdma_join_multicast() local
5065 if (WARN_ON(id->qp)) in rdma_join_multicast()
5066 return -EINVAL; in rdma_join_multicast()
5069 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && in rdma_join_multicast()
5070 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) in rdma_join_multicast()
5071 return -EINVAL; in rdma_join_multicast()
5073 if (id_priv->id.qp_type != IB_QPT_UD) in rdma_join_multicast()
5074 return -EINVAL; in rdma_join_multicast()
5076 mc = kzalloc(sizeof(*mc), GFP_KERNEL); in rdma_join_multicast()
5077 if (!mc) in rdma_join_multicast()
5078 return -ENOMEM; in rdma_join_multicast()
5080 memcpy(&mc->addr, addr, rdma_addr_size(addr)); in rdma_join_multicast()
5081 mc->context = context; in rdma_join_multicast()
5082 mc->id_priv = id_priv; in rdma_join_multicast()
5083 mc->join_state = join_state; in rdma_join_multicast()
5085 if (rdma_protocol_roce(id->device, id->port_num)) { in rdma_join_multicast()
5086 ret = cma_iboe_join_multicast(id_priv, mc); in rdma_join_multicast()
5089 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { in rdma_join_multicast()
5090 ret = cma_join_ib_multicast(id_priv, mc); in rdma_join_multicast()
5094 ret = -ENOSYS; in rdma_join_multicast()
5098 spin_lock(&id_priv->lock); in rdma_join_multicast()
5099 list_add(&mc->list, &id_priv->mc_list); in rdma_join_multicast()
5100 spin_unlock(&id_priv->lock); in rdma_join_multicast()
5104 kfree(mc); in rdma_join_multicast()
5112 struct cma_multicast *mc; in rdma_leave_multicast() local
5115 spin_lock_irq(&id_priv->lock); in rdma_leave_multicast()
5116 list_for_each_entry(mc, &id_priv->mc_list, list) { in rdma_leave_multicast()
5117 if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) in rdma_leave_multicast()
5119 list_del(&mc->list); in rdma_leave_multicast()
5120 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
5122 WARN_ON(id_priv->cma_dev->device != id->device); in rdma_leave_multicast()
5123 destroy_mc(id_priv, mc); in rdma_leave_multicast()
5126 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
5135 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_netdev_change()
5137 if ((dev_addr->bound_dev_if == ndev->ifindex) && in cma_netdev_change()
5138 (net_eq(dev_net(ndev), dev_addr->net)) && in cma_netdev_change()
5139 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { in cma_netdev_change()
5141 ndev->name, &id_priv->id); in cma_netdev_change()
5144 return -ENOMEM; in cma_netdev_change()
5146 INIT_WORK(&work->work, cma_work_handler); in cma_netdev_change()
5147 work->id = id_priv; in cma_netdev_change()
5148 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; in cma_netdev_change()
5150 queue_work(cma_wq, &work->work); in cma_netdev_change()
5172 list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { in cma_netdev_callback()
5189 mutex_lock(&id_priv->handler_mutex); in cma_netevent_work_handler()
5191 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_netevent_work_handler()
5192 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_netevent_work_handler()
5196 event.status = -ETIMEDOUT; in cma_netevent_work_handler()
5199 __acquire(&id_priv->handler_mutex); in cma_netevent_work_handler()
5200 id_priv->cm_id.ib = NULL; in cma_netevent_work_handler()
5207 mutex_unlock(&id_priv->handler_mutex); in cma_netevent_work_handler()
5223 if (neigh->tbl->family == AF_INET6) { in cma_netevent_callback()
5227 neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; in cma_netevent_callback()
5228 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, in cma_netevent_callback()
5230 } else if (neigh->tbl->family == AF_INET) { in cma_netevent_callback()
5234 neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); in cma_netevent_callback()
5235 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, in cma_netevent_callback()
5243 list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { in cma_netevent_callback()
5244 if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, in cma_netevent_callback()
5245 neigh->ha, ETH_ALEN)) in cma_netevent_callback()
5248 queue_work(cma_wq, ¤t_id->id.net_work); in cma_netevent_callback()
5269 mutex_lock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5271 spin_lock_irqsave(&id_priv->lock, flags); in cma_send_device_removal_put()
5272 state = id_priv->state; in cma_send_device_removal_put()
5274 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_send_device_removal_put()
5275 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5279 id_priv->state = RDMA_CM_DEVICE_REMOVAL; in cma_send_device_removal_put()
5280 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_send_device_removal_put()
5288 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5293 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
5306 while (!list_empty(&cma_dev->id_list)) { in cma_process_remove()
5308 &cma_dev->id_list, struct rdma_id_private, device_item); in cma_process_remove()
5310 list_del_init(&id_priv->listen_item); in cma_process_remove()
5311 list_del_init(&id_priv->device_item); in cma_process_remove()
5322 wait_for_completion(&cma_dev->comp); in cma_process_remove()
5346 return -EOPNOTSUPP; in cma_add_one()
5350 return -ENOMEM; in cma_add_one()
5352 cma_dev->device = device; in cma_add_one()
5353 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, in cma_add_one()
5354 sizeof(*cma_dev->default_gid_type), in cma_add_one()
5356 if (!cma_dev->default_gid_type) { in cma_add_one()
5357 ret = -ENOMEM; in cma_add_one()
5361 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, in cma_add_one()
5362 sizeof(*cma_dev->default_roce_tos), in cma_add_one()
5364 if (!cma_dev->default_roce_tos) { in cma_add_one()
5365 ret = -ENOMEM; in cma_add_one()
5373 cma_dev->default_gid_type[i - rdma_start_port(device)] = in cma_add_one()
5376 cma_dev->default_gid_type[i - rdma_start_port(device)] = in cma_add_one()
5378 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; in cma_add_one()
5381 init_completion(&cma_dev->comp); in cma_add_one()
5382 refcount_set(&cma_dev->refcount, 1); in cma_add_one()
5383 INIT_LIST_HEAD(&cma_dev->id_list); in cma_add_one()
5387 list_add_tail(&cma_dev->list, &dev_list); in cma_add_one()
5399 list_del(&cma_dev->list); in cma_add_one()
5404 kfree(cma_dev->default_roce_tos); in cma_add_one()
5406 kfree(cma_dev->default_gid_type); in cma_add_one()
5420 list_del(&cma_dev->list); in cma_remove_one()
5424 kfree(cma_dev->default_roce_tos); in cma_remove_one()
5425 kfree(cma_dev->default_gid_type); in cma_remove_one()
5433 xa_init(&pernet->tcp_ps); in cma_init_net()
5434 xa_init(&pernet->udp_ps); in cma_init_net()
5435 xa_init(&pernet->ipoib_ps); in cma_init_net()
5436 xa_init(&pernet->ib_ps); in cma_init_net()
5445 WARN_ON(!xa_empty(&pernet->tcp_ps)); in cma_exit_net()
5446 WARN_ON(!xa_empty(&pernet->udp_ps)); in cma_exit_net()
5447 WARN_ON(!xa_empty(&pernet->ipoib_ps)); in cma_exit_net()
5448 WARN_ON(!xa_empty(&pernet->ib_ps)); in cma_exit_net()
5477 return -ENOMEM; in cma_init()