1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* 3 * Copyright (c) 2005 Voltaire Inc. All rights reserved. 4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. 5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. 6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/in.h> 11 #include <linux/in6.h> 12 #include <linux/mutex.h> 13 #include <linux/random.h> 14 #include <linux/rbtree.h> 15 #include <linux/igmp.h> 16 #include <linux/xarray.h> 17 #include <linux/inetdevice.h> 18 #include <linux/slab.h> 19 #include <linux/module.h> 20 #include <net/route.h> 21 22 #include <net/net_namespace.h> 23 #include <net/netns/generic.h> 24 #include <net/netevent.h> 25 #include <net/tcp.h> 26 #include <net/ipv6.h> 27 #include <net/ip_fib.h> 28 #include <net/ip6_route.h> 29 30 #include <rdma/rdma_cm.h> 31 #include <rdma/rdma_cm_ib.h> 32 #include <rdma/rdma_netlink.h> 33 #include <rdma/ib.h> 34 #include <rdma/ib_cache.h> 35 #include <rdma/ib_cm.h> 36 #include <rdma/ib_sa.h> 37 #include <rdma/iw_cm.h> 38 39 #include "core_priv.h" 40 #include "cma_priv.h" 41 #include "cma_trace.h" 42 43 MODULE_AUTHOR("Sean Hefty"); 44 MODULE_DESCRIPTION("Generic RDMA CM Agent"); 45 MODULE_LICENSE("Dual BSD/GPL"); 46 47 #define CMA_CM_RESPONSE_TIMEOUT 20 48 #define CMA_MAX_CM_RETRIES 15 49 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 50 #define CMA_IBOE_PACKET_LIFETIME 16 51 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP 52 53 static const char * const cma_events[] = { 54 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", 55 [RDMA_CM_EVENT_ADDR_ERROR] = "address error", 56 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", 57 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", 58 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", 59 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", 60 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", 61 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", 62 [RDMA_CM_EVENT_REJECTED] = "rejected", 63 [RDMA_CM_EVENT_ESTABLISHED] = "established", 64 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", 65 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", 66 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", 67 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", 68 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", 69 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", 70 }; 71 72 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 73 enum ib_gid_type gid_type); 74 75 static void cma_netevent_work_handler(struct work_struct *_work); 76 77 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) 78 { 79 size_t index = event; 80 81 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? 82 cma_events[index] : "unrecognized event"; 83 } 84 EXPORT_SYMBOL(rdma_event_msg); 85 86 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 87 int reason) 88 { 89 if (rdma_ib_or_roce(id->device, id->port_num)) 90 return ibcm_reject_msg(reason); 91 92 if (rdma_protocol_iwarp(id->device, id->port_num)) 93 return iwcm_reject_msg(reason); 94 95 WARN_ON_ONCE(1); 96 return "unrecognized transport"; 97 } 98 EXPORT_SYMBOL(rdma_reject_msg); 99 100 /** 101 * rdma_is_consumer_reject - return true if the consumer rejected the connect 102 * request. 103 * @id: Communication identifier that received the REJECT event. 104 * @reason: Value returned in the REJECT event status field. 105 */ 106 static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 107 { 108 if (rdma_ib_or_roce(id->device, id->port_num)) 109 return reason == IB_CM_REJ_CONSUMER_DEFINED; 110 111 if (rdma_protocol_iwarp(id->device, id->port_num)) 112 return reason == -ECONNREFUSED; 113 114 WARN_ON_ONCE(1); 115 return false; 116 } 117 118 const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 119 struct rdma_cm_event *ev, u8 *data_len) 120 { 121 const void *p; 122 123 if (rdma_is_consumer_reject(id, ev->status)) { 124 *data_len = ev->param.conn.private_data_len; 125 p = ev->param.conn.private_data; 126 } else { 127 *data_len = 0; 128 p = NULL; 129 } 130 return p; 131 } 132 EXPORT_SYMBOL(rdma_consumer_reject_data); 133 134 /** 135 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. 136 * @id: Communication Identifier 137 */ 138 struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) 139 { 140 struct rdma_id_private *id_priv; 141 142 id_priv = container_of(id, struct rdma_id_private, id); 143 if (id->device->node_type == RDMA_NODE_RNIC) 144 return id_priv->cm_id.iw; 145 return NULL; 146 } 147 EXPORT_SYMBOL(rdma_iw_cm_id); 148 149 /** 150 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. 151 * @res: rdma resource tracking entry pointer 152 */ 153 struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) 154 { 155 struct rdma_id_private *id_priv = 156 container_of(res, struct rdma_id_private, res); 157 158 return &id_priv->id; 159 } 160 EXPORT_SYMBOL(rdma_res_to_id); 161 162 static int cma_add_one(struct ib_device *device); 163 static void cma_remove_one(struct ib_device *device, void *client_data); 164 165 static struct ib_client cma_client = { 166 .name = "cma", 167 .add = cma_add_one, 168 .remove = cma_remove_one 169 }; 170 171 static struct ib_sa_client sa_client; 172 static LIST_HEAD(dev_list); 173 static LIST_HEAD(listen_any_list); 174 static DEFINE_MUTEX(lock); 175 static struct rb_root id_table = RB_ROOT; 176 /* Serialize operations of id_table tree */ 177 static DEFINE_SPINLOCK(id_table_lock); 178 static struct workqueue_struct *cma_wq; 179 static unsigned int cma_pernet_id; 180 181 struct cma_pernet { 182 struct xarray tcp_ps; 183 struct xarray udp_ps; 184 struct xarray ipoib_ps; 185 struct xarray ib_ps; 186 }; 187 188 static struct cma_pernet *cma_pernet(struct net *net) 189 { 190 return net_generic(net, cma_pernet_id); 191 } 192 193 static 194 struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) 195 { 196 struct cma_pernet *pernet = cma_pernet(net); 197 198 switch (ps) { 199 case RDMA_PS_TCP: 200 return &pernet->tcp_ps; 201 case RDMA_PS_UDP: 202 return &pernet->udp_ps; 203 case RDMA_PS_IPOIB: 204 return &pernet->ipoib_ps; 205 case RDMA_PS_IB: 206 return &pernet->ib_ps; 207 default: 208 return NULL; 209 } 210 } 211 212 struct id_table_entry { 213 struct list_head id_list; 214 struct rb_node rb_node; 215 }; 216 217 struct cma_device { 218 struct list_head list; 219 struct ib_device *device; 220 struct completion comp; 221 refcount_t refcount; 222 struct list_head id_list; 223 enum ib_gid_type *default_gid_type; 224 u8 *default_roce_tos; 225 }; 226 227 struct rdma_bind_list { 228 enum rdma_ucm_port_space ps; 229 struct hlist_head owners; 230 unsigned short port; 231 }; 232 233 static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, 234 struct rdma_bind_list *bind_list, int snum) 235 { 236 struct xarray *xa = cma_pernet_xa(net, ps); 237 238 return xa_insert(xa, snum, bind_list, GFP_KERNEL); 239 } 240 241 static struct rdma_bind_list *cma_ps_find(struct net *net, 242 enum rdma_ucm_port_space ps, int snum) 243 { 244 struct xarray *xa = cma_pernet_xa(net, ps); 245 246 return xa_load(xa, snum); 247 } 248 249 static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, 250 int snum) 251 { 252 struct xarray *xa = cma_pernet_xa(net, ps); 253 254 xa_erase(xa, snum); 255 } 256 257 enum { 258 CMA_OPTION_AFONLY, 259 }; 260 261 void cma_dev_get(struct cma_device *cma_dev) 262 { 263 refcount_inc(&cma_dev->refcount); 264 } 265 266 void cma_dev_put(struct cma_device *cma_dev) 267 { 268 if (refcount_dec_and_test(&cma_dev->refcount)) 269 complete(&cma_dev->comp); 270 } 271 272 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, 273 void *cookie) 274 { 275 struct cma_device *cma_dev; 276 struct cma_device *found_cma_dev = NULL; 277 278 mutex_lock(&lock); 279 280 list_for_each_entry(cma_dev, &dev_list, list) 281 if (filter(cma_dev->device, cookie)) { 282 found_cma_dev = cma_dev; 283 break; 284 } 285 286 if (found_cma_dev) 287 cma_dev_get(found_cma_dev); 288 mutex_unlock(&lock); 289 return found_cma_dev; 290 } 291 292 int cma_get_default_gid_type(struct cma_device *cma_dev, 293 u32 port) 294 { 295 if (!rdma_is_port_valid(cma_dev->device, port)) 296 return -EINVAL; 297 298 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; 299 } 300 301 int cma_set_default_gid_type(struct cma_device *cma_dev, 302 u32 port, 303 enum ib_gid_type default_gid_type) 304 { 305 unsigned long supported_gids; 306 307 if (!rdma_is_port_valid(cma_dev->device, port)) 308 return -EINVAL; 309 310 if (default_gid_type == IB_GID_TYPE_IB && 311 rdma_protocol_roce_eth_encap(cma_dev->device, port)) 312 default_gid_type = IB_GID_TYPE_ROCE; 313 314 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); 315 316 if (!(supported_gids & 1 << default_gid_type)) 317 return -EINVAL; 318 319 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = 320 default_gid_type; 321 322 return 0; 323 } 324 325 int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) 326 { 327 if (!rdma_is_port_valid(cma_dev->device, port)) 328 return -EINVAL; 329 330 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; 331 } 332 333 int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, 334 u8 default_roce_tos) 335 { 336 if (!rdma_is_port_valid(cma_dev->device, port)) 337 return -EINVAL; 338 339 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = 340 default_roce_tos; 341 342 return 0; 343 } 344 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) 345 { 346 return cma_dev->device; 347 } 348 349 /* 350 * Device removal can occur at anytime, so we need extra handling to 351 * serialize notifying the user of device removal with other callbacks. 352 * We do this by disabling removal notification while a callback is in process, 353 * and reporting it after the callback completes. 354 */ 355 356 struct cma_multicast { 357 struct rdma_id_private *id_priv; 358 union { 359 struct ib_sa_multicast *sa_mc; 360 struct { 361 struct work_struct work; 362 struct rdma_cm_event event; 363 } iboe_join; 364 }; 365 struct list_head list; 366 void *context; 367 struct sockaddr_storage addr; 368 u8 join_state; 369 }; 370 371 struct cma_work { 372 struct work_struct work; 373 struct rdma_id_private *id; 374 enum rdma_cm_state old_state; 375 enum rdma_cm_state new_state; 376 struct rdma_cm_event event; 377 }; 378 379 union cma_ip_addr { 380 struct in6_addr ip6; 381 struct { 382 __be32 pad[3]; 383 __be32 addr; 384 } ip4; 385 }; 386 387 struct cma_hdr { 388 u8 cma_version; 389 u8 ip_version; /* IP version: 7:4 */ 390 __be16 port; 391 union cma_ip_addr src_addr; 392 union cma_ip_addr dst_addr; 393 }; 394 395 #define CMA_VERSION 0x00 396 397 struct cma_req_info { 398 struct sockaddr_storage listen_addr_storage; 399 struct sockaddr_storage src_addr_storage; 400 struct ib_device *device; 401 union ib_gid local_gid; 402 __be64 service_id; 403 int port; 404 bool has_gid; 405 u16 pkey; 406 }; 407 408 static int cma_comp_exch(struct rdma_id_private *id_priv, 409 enum rdma_cm_state comp, enum rdma_cm_state exch) 410 { 411 unsigned long flags; 412 int ret; 413 414 /* 415 * The FSM uses a funny double locking where state is protected by both 416 * the handler_mutex and the spinlock. State is not allowed to change 417 * to/from a handler_mutex protected value without also holding 418 * handler_mutex. 419 */ 420 if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) 421 lockdep_assert_held(&id_priv->handler_mutex); 422 423 spin_lock_irqsave(&id_priv->lock, flags); 424 if ((ret = (id_priv->state == comp))) 425 id_priv->state = exch; 426 spin_unlock_irqrestore(&id_priv->lock, flags); 427 return ret; 428 } 429 430 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) 431 { 432 return hdr->ip_version >> 4; 433 } 434 435 static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) 436 { 437 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); 438 } 439 440 static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) 441 { 442 return (struct sockaddr *)&id_priv->id.route.addr.src_addr; 443 } 444 445 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) 446 { 447 return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; 448 } 449 450 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) 451 { 452 struct in_device *in_dev = NULL; 453 454 if (ndev) { 455 rtnl_lock(); 456 in_dev = __in_dev_get_rtnl(ndev); 457 if (in_dev) { 458 if (join) 459 ip_mc_inc_group(in_dev, 460 *(__be32 *)(mgid->raw + 12)); 461 else 462 ip_mc_dec_group(in_dev, 463 *(__be32 *)(mgid->raw + 12)); 464 } 465 rtnl_unlock(); 466 } 467 return (in_dev) ? 0 : -ENODEV; 468 } 469 470 static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, 471 struct id_table_entry *entry_b) 472 { 473 struct rdma_id_private *id_priv = list_first_entry( 474 &entry_b->id_list, struct rdma_id_private, id_list_entry); 475 int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; 476 struct sockaddr *sb = cma_dst_addr(id_priv); 477 478 if (ifindex_a != ifindex_b) 479 return (ifindex_a > ifindex_b) ? 1 : -1; 480 481 if (sa->sa_family != sb->sa_family) 482 return sa->sa_family - sb->sa_family; 483 484 if (sa->sa_family == AF_INET && 485 __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { 486 return memcmp(&((struct sockaddr_in *)sa)->sin_addr, 487 &((struct sockaddr_in *)sb)->sin_addr, 488 sizeof(((struct sockaddr_in *)sa)->sin_addr)); 489 } 490 491 if (sa->sa_family == AF_INET6 && 492 __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { 493 return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, 494 &((struct sockaddr_in6 *)sb)->sin6_addr); 495 } 496 497 return -1; 498 } 499 500 static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) 501 { 502 struct rb_node **new, *parent = NULL; 503 struct id_table_entry *this, *node; 504 unsigned long flags; 505 int result; 506 507 node = kzalloc(sizeof(*node), GFP_KERNEL); 508 if (!node) 509 return -ENOMEM; 510 511 spin_lock_irqsave(&id_table_lock, flags); 512 new = &id_table.rb_node; 513 while (*new) { 514 this = container_of(*new, struct id_table_entry, rb_node); 515 result = compare_netdev_and_ip( 516 node_id_priv->id.route.addr.dev_addr.bound_dev_if, 517 cma_dst_addr(node_id_priv), this); 518 519 parent = *new; 520 if (result < 0) 521 new = &((*new)->rb_left); 522 else if (result > 0) 523 new = &((*new)->rb_right); 524 else { 525 list_add_tail(&node_id_priv->id_list_entry, 526 &this->id_list); 527 kfree(node); 528 goto unlock; 529 } 530 } 531 532 INIT_LIST_HEAD(&node->id_list); 533 list_add_tail(&node_id_priv->id_list_entry, &node->id_list); 534 535 rb_link_node(&node->rb_node, parent, new); 536 rb_insert_color(&node->rb_node, &id_table); 537 538 unlock: 539 spin_unlock_irqrestore(&id_table_lock, flags); 540 return 0; 541 } 542 543 static struct id_table_entry * 544 node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) 545 { 546 struct rb_node *node = root->rb_node; 547 struct id_table_entry *data; 548 int result; 549 550 while (node) { 551 data = container_of(node, struct id_table_entry, rb_node); 552 result = compare_netdev_and_ip(ifindex, sa, data); 553 if (result < 0) 554 node = node->rb_left; 555 else if (result > 0) 556 node = node->rb_right; 557 else 558 return data; 559 } 560 561 return NULL; 562 } 563 564 static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) 565 { 566 struct id_table_entry *data; 567 unsigned long flags; 568 569 spin_lock_irqsave(&id_table_lock, flags); 570 if (list_empty(&id_priv->id_list_entry)) 571 goto out; 572 573 data = node_from_ndev_ip(&id_table, 574 id_priv->id.route.addr.dev_addr.bound_dev_if, 575 cma_dst_addr(id_priv)); 576 if (!data) 577 goto out; 578 579 list_del_init(&id_priv->id_list_entry); 580 if (list_empty(&data->id_list)) { 581 rb_erase(&data->rb_node, &id_table); 582 kfree(data); 583 } 584 out: 585 spin_unlock_irqrestore(&id_table_lock, flags); 586 } 587 588 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, 589 struct cma_device *cma_dev) 590 { 591 cma_dev_get(cma_dev); 592 id_priv->cma_dev = cma_dev; 593 id_priv->id.device = cma_dev->device; 594 id_priv->id.route.addr.dev_addr.transport = 595 rdma_node_get_transport(cma_dev->device->node_type); 596 list_add_tail(&id_priv->device_item, &cma_dev->id_list); 597 598 trace_cm_id_attach(id_priv, cma_dev->device); 599 } 600 601 static void cma_attach_to_dev(struct rdma_id_private *id_priv, 602 struct cma_device *cma_dev) 603 { 604 _cma_attach_to_dev(id_priv, cma_dev); 605 id_priv->gid_type = 606 cma_dev->default_gid_type[id_priv->id.port_num - 607 rdma_start_port(cma_dev->device)]; 608 } 609 610 static void cma_release_dev(struct rdma_id_private *id_priv) 611 { 612 mutex_lock(&lock); 613 list_del_init(&id_priv->device_item); 614 cma_dev_put(id_priv->cma_dev); 615 id_priv->cma_dev = NULL; 616 id_priv->id.device = NULL; 617 if (id_priv->id.route.addr.dev_addr.sgid_attr) { 618 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); 619 id_priv->id.route.addr.dev_addr.sgid_attr = NULL; 620 } 621 mutex_unlock(&lock); 622 } 623 624 static inline unsigned short cma_family(struct rdma_id_private *id_priv) 625 { 626 return id_priv->id.route.addr.src_addr.ss_family; 627 } 628 629 static int cma_set_default_qkey(struct rdma_id_private *id_priv) 630 { 631 struct ib_sa_mcmember_rec rec; 632 int ret = 0; 633 634 switch (id_priv->id.ps) { 635 case RDMA_PS_UDP: 636 case RDMA_PS_IB: 637 id_priv->qkey = RDMA_UDP_QKEY; 638 break; 639 case RDMA_PS_IPOIB: 640 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); 641 ret = ib_sa_get_mcmember_rec(id_priv->id.device, 642 id_priv->id.port_num, &rec.mgid, 643 &rec); 644 if (!ret) 645 id_priv->qkey = be32_to_cpu(rec.qkey); 646 break; 647 default: 648 break; 649 } 650 return ret; 651 } 652 653 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) 654 { 655 if (!qkey || 656 (id_priv->qkey && (id_priv->qkey != qkey))) 657 return -EINVAL; 658 659 id_priv->qkey = qkey; 660 return 0; 661 } 662 663 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) 664 { 665 dev_addr->dev_type = ARPHRD_INFINIBAND; 666 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); 667 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); 668 } 669 670 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) 671 { 672 int ret; 673 674 if (addr->sa_family != AF_IB) { 675 ret = rdma_translate_ip(addr, dev_addr); 676 } else { 677 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); 678 ret = 0; 679 } 680 681 return ret; 682 } 683 684 static const struct ib_gid_attr * 685 cma_validate_port(struct ib_device *device, u32 port, 686 enum ib_gid_type gid_type, 687 union ib_gid *gid, 688 struct rdma_id_private *id_priv) 689 { 690 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 691 const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV); 692 int bound_if_index = dev_addr->bound_dev_if; 693 int dev_type = dev_addr->dev_type; 694 struct net_device *ndev = NULL; 695 struct net_device *pdev = NULL; 696 697 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) 698 goto out; 699 700 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) 701 goto out; 702 703 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 704 goto out; 705 706 /* 707 * For drivers that do not associate more than one net device with 708 * their gid tables, such as iWARP drivers, it is sufficient to 709 * return the first table entry. 710 * 711 * Other driver classes might be included in the future. 712 */ 713 if (rdma_protocol_iwarp(device, port)) { 714 sgid_attr = rdma_get_gid_attr(device, port, 0); 715 if (IS_ERR(sgid_attr)) 716 goto out; 717 718 rcu_read_lock(); 719 ndev = rcu_dereference(sgid_attr->ndev); 720 if (ndev->ifindex != bound_if_index) { 721 pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index); 722 if (pdev) { 723 if (is_vlan_dev(pdev)) { 724 pdev = vlan_dev_real_dev(pdev); 725 if (ndev->ifindex == pdev->ifindex) 726 bound_if_index = pdev->ifindex; 727 } 728 if (is_vlan_dev(ndev)) { 729 pdev = vlan_dev_real_dev(ndev); 730 if (bound_if_index == pdev->ifindex) 731 bound_if_index = ndev->ifindex; 732 } 733 } 734 } 735 if (!net_eq(dev_net(ndev), dev_addr->net) || 736 ndev->ifindex != bound_if_index) { 737 rdma_put_gid_attr(sgid_attr); 738 sgid_attr = ERR_PTR(-ENODEV); 739 } 740 rcu_read_unlock(); 741 goto out; 742 } 743 744 /* 745 * For a RXE device, it should work with TUN device and normal ethernet 746 * devices. Use driver_id to check if a device is a RXE device or not. 747 * ARPHDR_NONE means a TUN device. 748 */ 749 if (device->ops.driver_id == RDMA_DRIVER_RXE) { 750 if ((dev_type == ARPHRD_NONE || dev_type == ARPHRD_ETHER) 751 && rdma_protocol_roce(device, port)) { 752 ndev = dev_get_by_index(dev_addr->net, bound_if_index); 753 if (!ndev) 754 goto out; 755 } 756 } else { 757 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 758 ndev = dev_get_by_index(dev_addr->net, bound_if_index); 759 if (!ndev) 760 goto out; 761 } else { 762 gid_type = IB_GID_TYPE_IB; 763 } 764 } 765 766 sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); 767 dev_put(ndev); 768 out: 769 return sgid_attr; 770 } 771 772 static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, 773 const struct ib_gid_attr *sgid_attr) 774 { 775 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); 776 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; 777 } 778 779 /** 780 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute 781 * based on source ip address. 782 * @id_priv: cm_id which should be bound to cma device 783 * 784 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute 785 * based on source IP address. It returns 0 on success or error code otherwise. 786 * It is applicable to active and passive side cm_id. 787 */ 788 static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) 789 { 790 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 791 const struct ib_gid_attr *sgid_attr; 792 union ib_gid gid, iboe_gid, *gidp; 793 struct cma_device *cma_dev; 794 enum ib_gid_type gid_type; 795 int ret = -ENODEV; 796 u32 port; 797 798 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 799 id_priv->id.ps == RDMA_PS_IPOIB) 800 return -EINVAL; 801 802 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 803 &iboe_gid); 804 805 memcpy(&gid, dev_addr->src_dev_addr + 806 rdma_addr_gid_offset(dev_addr), sizeof(gid)); 807 808 mutex_lock(&lock); 809 list_for_each_entry(cma_dev, &dev_list, list) { 810 rdma_for_each_port (cma_dev->device, port) { 811 gidp = rdma_protocol_roce(cma_dev->device, port) ? 812 &iboe_gid : &gid; 813 gid_type = cma_dev->default_gid_type[port - 1]; 814 sgid_attr = cma_validate_port(cma_dev->device, port, 815 gid_type, gidp, id_priv); 816 if (!IS_ERR(sgid_attr)) { 817 id_priv->id.port_num = port; 818 cma_bind_sgid_attr(id_priv, sgid_attr); 819 cma_attach_to_dev(id_priv, cma_dev); 820 ret = 0; 821 goto out; 822 } 823 } 824 } 825 out: 826 mutex_unlock(&lock); 827 return ret; 828 } 829 830 /** 831 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute 832 * @id_priv: cm id to bind to cma device 833 * @listen_id_priv: listener cm id to match against 834 * @req: Pointer to req structure containaining incoming 835 * request information 836 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when 837 * rdma device matches for listen_id and incoming request. It also verifies 838 * that a GID table entry is present for the source address. 839 * Returns 0 on success, or returns error code otherwise. 840 */ 841 static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, 842 const struct rdma_id_private *listen_id_priv, 843 struct cma_req_info *req) 844 { 845 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 846 const struct ib_gid_attr *sgid_attr; 847 enum ib_gid_type gid_type; 848 union ib_gid gid; 849 850 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 851 id_priv->id.ps == RDMA_PS_IPOIB) 852 return -EINVAL; 853 854 if (rdma_protocol_roce(req->device, req->port)) 855 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 856 &gid); 857 else 858 memcpy(&gid, dev_addr->src_dev_addr + 859 rdma_addr_gid_offset(dev_addr), sizeof(gid)); 860 861 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; 862 sgid_attr = cma_validate_port(req->device, req->port, 863 gid_type, &gid, id_priv); 864 if (IS_ERR(sgid_attr)) 865 return PTR_ERR(sgid_attr); 866 867 id_priv->id.port_num = req->port; 868 cma_bind_sgid_attr(id_priv, sgid_attr); 869 /* Need to acquire lock to protect against reader 870 * of cma_dev->id_list such as cma_netdev_callback() and 871 * cma_process_remove(). 872 */ 873 mutex_lock(&lock); 874 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); 875 mutex_unlock(&lock); 876 rdma_restrack_add(&id_priv->res); 877 return 0; 878 } 879 880 static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, 881 const struct rdma_id_private *listen_id_priv) 882 { 883 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 884 const struct ib_gid_attr *sgid_attr; 885 struct cma_device *cma_dev; 886 enum ib_gid_type gid_type; 887 int ret = -ENODEV; 888 union ib_gid gid; 889 u32 port; 890 891 if (dev_addr->dev_type != ARPHRD_INFINIBAND && 892 id_priv->id.ps == RDMA_PS_IPOIB) 893 return -EINVAL; 894 895 memcpy(&gid, dev_addr->src_dev_addr + 896 rdma_addr_gid_offset(dev_addr), sizeof(gid)); 897 898 mutex_lock(&lock); 899 900 cma_dev = listen_id_priv->cma_dev; 901 port = listen_id_priv->id.port_num; 902 gid_type = listen_id_priv->gid_type; 903 sgid_attr = cma_validate_port(cma_dev->device, port, 904 gid_type, &gid, id_priv); 905 if (!IS_ERR(sgid_attr)) { 906 id_priv->id.port_num = port; 907 cma_bind_sgid_attr(id_priv, sgid_attr); 908 ret = 0; 909 goto out; 910 } 911 912 list_for_each_entry(cma_dev, &dev_list, list) { 913 rdma_for_each_port (cma_dev->device, port) { 914 if (listen_id_priv->cma_dev == cma_dev && 915 listen_id_priv->id.port_num == port) 916 continue; 917 918 gid_type = cma_dev->default_gid_type[port - 1]; 919 sgid_attr = cma_validate_port(cma_dev->device, port, 920 gid_type, &gid, id_priv); 921 if (!IS_ERR(sgid_attr)) { 922 id_priv->id.port_num = port; 923 cma_bind_sgid_attr(id_priv, sgid_attr); 924 ret = 0; 925 goto out; 926 } 927 } 928 } 929 930 out: 931 if (!ret) { 932 cma_attach_to_dev(id_priv, cma_dev); 933 rdma_restrack_add(&id_priv->res); 934 } 935 936 mutex_unlock(&lock); 937 return ret; 938 } 939 940 /* 941 * Select the source IB device and address to reach the destination IB address. 942 */ 943 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) 944 { 945 struct cma_device *cma_dev, *cur_dev; 946 struct sockaddr_ib *addr; 947 union ib_gid gid, sgid, *dgid; 948 unsigned int p; 949 u16 pkey, index; 950 enum ib_port_state port_state; 951 int ret; 952 int i; 953 954 cma_dev = NULL; 955 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); 956 dgid = (union ib_gid *) &addr->sib_addr; 957 pkey = ntohs(addr->sib_pkey); 958 959 mutex_lock(&lock); 960 list_for_each_entry(cur_dev, &dev_list, list) { 961 rdma_for_each_port (cur_dev->device, p) { 962 if (!rdma_cap_af_ib(cur_dev->device, p)) 963 continue; 964 965 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) 966 continue; 967 968 if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) 969 continue; 970 971 for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; 972 ++i) { 973 ret = rdma_query_gid(cur_dev->device, p, i, 974 &gid); 975 if (ret) 976 continue; 977 978 if (!memcmp(&gid, dgid, sizeof(gid))) { 979 cma_dev = cur_dev; 980 sgid = gid; 981 id_priv->id.port_num = p; 982 goto found; 983 } 984 985 if (!cma_dev && (gid.global.subnet_prefix == 986 dgid->global.subnet_prefix) && 987 port_state == IB_PORT_ACTIVE) { 988 cma_dev = cur_dev; 989 sgid = gid; 990 id_priv->id.port_num = p; 991 goto found; 992 } 993 } 994 } 995 } 996 mutex_unlock(&lock); 997 return -ENODEV; 998 999 found: 1000 cma_attach_to_dev(id_priv, cma_dev); 1001 rdma_restrack_add(&id_priv->res); 1002 mutex_unlock(&lock); 1003 addr = (struct sockaddr_ib *)cma_src_addr(id_priv); 1004 memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); 1005 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 1006 return 0; 1007 } 1008 1009 static void cma_id_get(struct rdma_id_private *id_priv) 1010 { 1011 refcount_inc(&id_priv->refcount); 1012 } 1013 1014 static void cma_id_put(struct rdma_id_private *id_priv) 1015 { 1016 if (refcount_dec_and_test(&id_priv->refcount)) 1017 complete(&id_priv->comp); 1018 } 1019 1020 static struct rdma_id_private * 1021 __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, 1022 void *context, enum rdma_ucm_port_space ps, 1023 enum ib_qp_type qp_type, const struct rdma_id_private *parent) 1024 { 1025 struct rdma_id_private *id_priv; 1026 1027 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); 1028 if (!id_priv) 1029 return ERR_PTR(-ENOMEM); 1030 1031 id_priv->state = RDMA_CM_IDLE; 1032 id_priv->id.context = context; 1033 id_priv->id.event_handler = event_handler; 1034 id_priv->id.ps = ps; 1035 id_priv->id.qp_type = qp_type; 1036 id_priv->tos_set = false; 1037 id_priv->timeout_set = false; 1038 id_priv->min_rnr_timer_set = false; 1039 id_priv->gid_type = IB_GID_TYPE_IB; 1040 spin_lock_init(&id_priv->lock); 1041 mutex_init(&id_priv->qp_mutex); 1042 init_completion(&id_priv->comp); 1043 refcount_set(&id_priv->refcount, 1); 1044 mutex_init(&id_priv->handler_mutex); 1045 INIT_LIST_HEAD(&id_priv->device_item); 1046 INIT_LIST_HEAD(&id_priv->id_list_entry); 1047 INIT_LIST_HEAD(&id_priv->listen_list); 1048 INIT_LIST_HEAD(&id_priv->mc_list); 1049 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); 1050 id_priv->id.route.addr.dev_addr.net = get_net(net); 1051 id_priv->seq_num &= 0x00ffffff; 1052 INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler); 1053 1054 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); 1055 if (parent) 1056 rdma_restrack_parent_name(&id_priv->res, &parent->res); 1057 1058 return id_priv; 1059 } 1060 1061 struct rdma_cm_id * 1062 __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, 1063 void *context, enum rdma_ucm_port_space ps, 1064 enum ib_qp_type qp_type, const char *caller) 1065 { 1066 struct rdma_id_private *ret; 1067 1068 ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); 1069 if (IS_ERR(ret)) 1070 return ERR_CAST(ret); 1071 1072 rdma_restrack_set_name(&ret->res, caller); 1073 return &ret->id; 1074 } 1075 EXPORT_SYMBOL(__rdma_create_kernel_id); 1076 1077 struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, 1078 void *context, 1079 enum rdma_ucm_port_space ps, 1080 enum ib_qp_type qp_type) 1081 { 1082 struct rdma_id_private *ret; 1083 1084 ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, 1085 ps, qp_type, NULL); 1086 if (IS_ERR(ret)) 1087 return ERR_CAST(ret); 1088 1089 rdma_restrack_set_name(&ret->res, NULL); 1090 return &ret->id; 1091 } 1092 EXPORT_SYMBOL(rdma_create_user_id); 1093 1094 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 1095 { 1096 struct ib_qp_attr qp_attr; 1097 int qp_attr_mask, ret; 1098 1099 qp_attr.qp_state = IB_QPS_INIT; 1100 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1101 if (ret) 1102 return ret; 1103 1104 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1105 if (ret) 1106 return ret; 1107 1108 qp_attr.qp_state = IB_QPS_RTR; 1109 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 1110 if (ret) 1111 return ret; 1112 1113 qp_attr.qp_state = IB_QPS_RTS; 1114 qp_attr.sq_psn = 0; 1115 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 1116 1117 return ret; 1118 } 1119 1120 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 1121 { 1122 struct ib_qp_attr qp_attr; 1123 int qp_attr_mask, ret; 1124 1125 qp_attr.qp_state = IB_QPS_INIT; 1126 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1127 if (ret) 1128 return ret; 1129 1130 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1131 } 1132 1133 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, 1134 struct ib_qp_init_attr *qp_init_attr) 1135 { 1136 struct rdma_id_private *id_priv; 1137 struct ib_qp *qp; 1138 int ret; 1139 1140 id_priv = container_of(id, struct rdma_id_private, id); 1141 if (id->device != pd->device) { 1142 ret = -EINVAL; 1143 goto out_err; 1144 } 1145 1146 qp_init_attr->port_num = id->port_num; 1147 qp = ib_create_qp(pd, qp_init_attr); 1148 if (IS_ERR(qp)) { 1149 ret = PTR_ERR(qp); 1150 goto out_err; 1151 } 1152 1153 if (id->qp_type == IB_QPT_UD) 1154 ret = cma_init_ud_qp(id_priv, qp); 1155 else 1156 ret = cma_init_conn_qp(id_priv, qp); 1157 if (ret) 1158 goto out_destroy; 1159 1160 id->qp = qp; 1161 id_priv->qp_num = qp->qp_num; 1162 id_priv->srq = (qp->srq != NULL); 1163 trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); 1164 return 0; 1165 out_destroy: 1166 ib_destroy_qp(qp); 1167 out_err: 1168 trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); 1169 return ret; 1170 } 1171 EXPORT_SYMBOL(rdma_create_qp); 1172 1173 void rdma_destroy_qp(struct rdma_cm_id *id) 1174 { 1175 struct rdma_id_private *id_priv; 1176 1177 id_priv = container_of(id, struct rdma_id_private, id); 1178 trace_cm_qp_destroy(id_priv); 1179 mutex_lock(&id_priv->qp_mutex); 1180 ib_destroy_qp(id_priv->id.qp); 1181 id_priv->id.qp = NULL; 1182 mutex_unlock(&id_priv->qp_mutex); 1183 } 1184 EXPORT_SYMBOL(rdma_destroy_qp); 1185 1186 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, 1187 struct rdma_conn_param *conn_param) 1188 { 1189 struct ib_qp_attr qp_attr; 1190 int qp_attr_mask, ret; 1191 1192 mutex_lock(&id_priv->qp_mutex); 1193 if (!id_priv->id.qp) { 1194 ret = 0; 1195 goto out; 1196 } 1197 1198 /* Need to update QP attributes from default values. */ 1199 qp_attr.qp_state = IB_QPS_INIT; 1200 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1201 if (ret) 1202 goto out; 1203 1204 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1205 if (ret) 1206 goto out; 1207 1208 qp_attr.qp_state = IB_QPS_RTR; 1209 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1210 if (ret) 1211 goto out; 1212 1213 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); 1214 1215 if (conn_param) 1216 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; 1217 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1218 out: 1219 mutex_unlock(&id_priv->qp_mutex); 1220 return ret; 1221 } 1222 1223 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, 1224 struct rdma_conn_param *conn_param) 1225 { 1226 struct ib_qp_attr qp_attr; 1227 int qp_attr_mask, ret; 1228 1229 mutex_lock(&id_priv->qp_mutex); 1230 if (!id_priv->id.qp) { 1231 ret = 0; 1232 goto out; 1233 } 1234 1235 qp_attr.qp_state = IB_QPS_RTS; 1236 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); 1237 if (ret) 1238 goto out; 1239 1240 if (conn_param) 1241 qp_attr.max_rd_atomic = conn_param->initiator_depth; 1242 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1243 out: 1244 mutex_unlock(&id_priv->qp_mutex); 1245 return ret; 1246 } 1247 1248 static int cma_modify_qp_err(struct rdma_id_private *id_priv) 1249 { 1250 struct ib_qp_attr qp_attr; 1251 int ret; 1252 1253 mutex_lock(&id_priv->qp_mutex); 1254 if (!id_priv->id.qp) { 1255 ret = 0; 1256 goto out; 1257 } 1258 1259 qp_attr.qp_state = IB_QPS_ERR; 1260 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); 1261 out: 1262 mutex_unlock(&id_priv->qp_mutex); 1263 return ret; 1264 } 1265 1266 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, 1267 struct ib_qp_attr *qp_attr, int *qp_attr_mask) 1268 { 1269 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 1270 int ret; 1271 u16 pkey; 1272 1273 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) 1274 pkey = 0xffff; 1275 else 1276 pkey = ib_addr_get_pkey(dev_addr); 1277 1278 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, 1279 pkey, &qp_attr->pkey_index); 1280 if (ret) 1281 return ret; 1282 1283 qp_attr->port_num = id_priv->id.port_num; 1284 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; 1285 1286 if (id_priv->id.qp_type == IB_QPT_UD) { 1287 ret = cma_set_default_qkey(id_priv); 1288 if (ret) 1289 return ret; 1290 1291 qp_attr->qkey = id_priv->qkey; 1292 *qp_attr_mask |= IB_QP_QKEY; 1293 } else { 1294 qp_attr->qp_access_flags = 0; 1295 *qp_attr_mask |= IB_QP_ACCESS_FLAGS; 1296 } 1297 return 0; 1298 } 1299 1300 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 1301 int *qp_attr_mask) 1302 { 1303 struct rdma_id_private *id_priv; 1304 int ret = 0; 1305 1306 id_priv = container_of(id, struct rdma_id_private, id); 1307 if (rdma_cap_ib_cm(id->device, id->port_num)) { 1308 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) 1309 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); 1310 else 1311 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, 1312 qp_attr_mask); 1313 1314 if (qp_attr->qp_state == IB_QPS_RTR) 1315 qp_attr->rq_psn = id_priv->seq_num; 1316 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 1317 if (!id_priv->cm_id.iw) { 1318 qp_attr->qp_access_flags = 0; 1319 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; 1320 } else 1321 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1322 qp_attr_mask); 1323 qp_attr->port_num = id_priv->id.port_num; 1324 *qp_attr_mask |= IB_QP_PORT; 1325 } else { 1326 ret = -ENOSYS; 1327 } 1328 1329 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) 1330 qp_attr->timeout = id_priv->timeout; 1331 1332 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) 1333 qp_attr->min_rnr_timer = id_priv->min_rnr_timer; 1334 1335 return ret; 1336 } 1337 EXPORT_SYMBOL(rdma_init_qp_attr); 1338 1339 static inline bool cma_zero_addr(const struct sockaddr *addr) 1340 { 1341 switch (addr->sa_family) { 1342 case AF_INET: 1343 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); 1344 case AF_INET6: 1345 return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); 1346 case AF_IB: 1347 return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); 1348 default: 1349 return false; 1350 } 1351 } 1352 1353 static inline bool cma_loopback_addr(const struct sockaddr *addr) 1354 { 1355 switch (addr->sa_family) { 1356 case AF_INET: 1357 return ipv4_is_loopback( 1358 ((struct sockaddr_in *)addr)->sin_addr.s_addr); 1359 case AF_INET6: 1360 return ipv6_addr_loopback( 1361 &((struct sockaddr_in6 *)addr)->sin6_addr); 1362 case AF_IB: 1363 return ib_addr_loopback( 1364 &((struct sockaddr_ib *)addr)->sib_addr); 1365 default: 1366 return false; 1367 } 1368 } 1369 1370 static inline bool cma_any_addr(const struct sockaddr *addr) 1371 { 1372 return cma_zero_addr(addr) || cma_loopback_addr(addr); 1373 } 1374 1375 static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) 1376 { 1377 if (src->sa_family != dst->sa_family) 1378 return -1; 1379 1380 switch (src->sa_family) { 1381 case AF_INET: 1382 return ((struct sockaddr_in *)src)->sin_addr.s_addr != 1383 ((struct sockaddr_in *)dst)->sin_addr.s_addr; 1384 case AF_INET6: { 1385 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; 1386 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; 1387 bool link_local; 1388 1389 if (ipv6_addr_cmp(&src_addr6->sin6_addr, 1390 &dst_addr6->sin6_addr)) 1391 return 1; 1392 link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & 1393 IPV6_ADDR_LINKLOCAL; 1394 /* Link local must match their scope_ids */ 1395 return link_local ? (src_addr6->sin6_scope_id != 1396 dst_addr6->sin6_scope_id) : 1397 0; 1398 } 1399 1400 default: 1401 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, 1402 &((struct sockaddr_ib *) dst)->sib_addr); 1403 } 1404 } 1405 1406 static __be16 cma_port(const struct sockaddr *addr) 1407 { 1408 struct sockaddr_ib *sib; 1409 1410 switch (addr->sa_family) { 1411 case AF_INET: 1412 return ((struct sockaddr_in *) addr)->sin_port; 1413 case AF_INET6: 1414 return ((struct sockaddr_in6 *) addr)->sin6_port; 1415 case AF_IB: 1416 sib = (struct sockaddr_ib *) addr; 1417 return htons((u16) (be64_to_cpu(sib->sib_sid) & 1418 be64_to_cpu(sib->sib_sid_mask))); 1419 default: 1420 return 0; 1421 } 1422 } 1423 1424 static inline int cma_any_port(const struct sockaddr *addr) 1425 { 1426 return !cma_port(addr); 1427 } 1428 1429 static void cma_save_ib_info(struct sockaddr *src_addr, 1430 struct sockaddr *dst_addr, 1431 const struct rdma_cm_id *listen_id, 1432 const struct sa_path_rec *path) 1433 { 1434 struct sockaddr_ib *listen_ib, *ib; 1435 1436 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 1437 if (src_addr) { 1438 ib = (struct sockaddr_ib *)src_addr; 1439 ib->sib_family = AF_IB; 1440 if (path) { 1441 ib->sib_pkey = path->pkey; 1442 ib->sib_flowinfo = path->flow_label; 1443 memcpy(&ib->sib_addr, &path->sgid, 16); 1444 ib->sib_sid = path->service_id; 1445 ib->sib_scope_id = 0; 1446 } else { 1447 ib->sib_pkey = listen_ib->sib_pkey; 1448 ib->sib_flowinfo = listen_ib->sib_flowinfo; 1449 ib->sib_addr = listen_ib->sib_addr; 1450 ib->sib_sid = listen_ib->sib_sid; 1451 ib->sib_scope_id = listen_ib->sib_scope_id; 1452 } 1453 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 1454 } 1455 if (dst_addr) { 1456 ib = (struct sockaddr_ib *)dst_addr; 1457 ib->sib_family = AF_IB; 1458 if (path) { 1459 ib->sib_pkey = path->pkey; 1460 ib->sib_flowinfo = path->flow_label; 1461 memcpy(&ib->sib_addr, &path->dgid, 16); 1462 } 1463 } 1464 } 1465 1466 static void cma_save_ip4_info(struct sockaddr_in *src_addr, 1467 struct sockaddr_in *dst_addr, 1468 struct cma_hdr *hdr, 1469 __be16 local_port) 1470 { 1471 if (src_addr) { 1472 *src_addr = (struct sockaddr_in) { 1473 .sin_family = AF_INET, 1474 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, 1475 .sin_port = local_port, 1476 }; 1477 } 1478 1479 if (dst_addr) { 1480 *dst_addr = (struct sockaddr_in) { 1481 .sin_family = AF_INET, 1482 .sin_addr.s_addr = hdr->src_addr.ip4.addr, 1483 .sin_port = hdr->port, 1484 }; 1485 } 1486 } 1487 1488 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, 1489 struct sockaddr_in6 *dst_addr, 1490 struct cma_hdr *hdr, 1491 __be16 local_port) 1492 { 1493 if (src_addr) { 1494 *src_addr = (struct sockaddr_in6) { 1495 .sin6_family = AF_INET6, 1496 .sin6_addr = hdr->dst_addr.ip6, 1497 .sin6_port = local_port, 1498 }; 1499 } 1500 1501 if (dst_addr) { 1502 *dst_addr = (struct sockaddr_in6) { 1503 .sin6_family = AF_INET6, 1504 .sin6_addr = hdr->src_addr.ip6, 1505 .sin6_port = hdr->port, 1506 }; 1507 } 1508 } 1509 1510 static u16 cma_port_from_service_id(__be64 service_id) 1511 { 1512 return (u16)be64_to_cpu(service_id); 1513 } 1514 1515 static int cma_save_ip_info(struct sockaddr *src_addr, 1516 struct sockaddr *dst_addr, 1517 const struct ib_cm_event *ib_event, 1518 __be64 service_id) 1519 { 1520 struct cma_hdr *hdr; 1521 __be16 port; 1522 1523 hdr = ib_event->private_data; 1524 if (hdr->cma_version != CMA_VERSION) 1525 return -EINVAL; 1526 1527 port = htons(cma_port_from_service_id(service_id)); 1528 1529 switch (cma_get_ip_ver(hdr)) { 1530 case 4: 1531 cma_save_ip4_info((struct sockaddr_in *)src_addr, 1532 (struct sockaddr_in *)dst_addr, hdr, port); 1533 break; 1534 case 6: 1535 cma_save_ip6_info((struct sockaddr_in6 *)src_addr, 1536 (struct sockaddr_in6 *)dst_addr, hdr, port); 1537 break; 1538 default: 1539 return -EAFNOSUPPORT; 1540 } 1541 1542 return 0; 1543 } 1544 1545 static int cma_save_net_info(struct sockaddr *src_addr, 1546 struct sockaddr *dst_addr, 1547 const struct rdma_cm_id *listen_id, 1548 const struct ib_cm_event *ib_event, 1549 sa_family_t sa_family, __be64 service_id) 1550 { 1551 if (sa_family == AF_IB) { 1552 if (ib_event->event == IB_CM_REQ_RECEIVED) 1553 cma_save_ib_info(src_addr, dst_addr, listen_id, 1554 ib_event->param.req_rcvd.primary_path); 1555 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1556 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); 1557 return 0; 1558 } 1559 1560 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); 1561 } 1562 1563 static int cma_save_req_info(const struct ib_cm_event *ib_event, 1564 struct cma_req_info *req) 1565 { 1566 const struct ib_cm_req_event_param *req_param = 1567 &ib_event->param.req_rcvd; 1568 const struct ib_cm_sidr_req_event_param *sidr_param = 1569 &ib_event->param.sidr_req_rcvd; 1570 1571 switch (ib_event->event) { 1572 case IB_CM_REQ_RECEIVED: 1573 req->device = req_param->listen_id->device; 1574 req->port = req_param->port; 1575 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1576 sizeof(req->local_gid)); 1577 req->has_gid = true; 1578 req->service_id = req_param->primary_path->service_id; 1579 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1580 if (req->pkey != req_param->bth_pkey) 1581 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" 1582 "RDMA CMA: in the future this may cause the request to be dropped\n", 1583 req_param->bth_pkey, req->pkey); 1584 break; 1585 case IB_CM_SIDR_REQ_RECEIVED: 1586 req->device = sidr_param->listen_id->device; 1587 req->port = sidr_param->port; 1588 req->has_gid = false; 1589 req->service_id = sidr_param->service_id; 1590 req->pkey = sidr_param->pkey; 1591 if (req->pkey != sidr_param->bth_pkey) 1592 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" 1593 "RDMA CMA: in the future this may cause the request to be dropped\n", 1594 sidr_param->bth_pkey, req->pkey); 1595 break; 1596 default: 1597 return -EINVAL; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static bool validate_ipv4_net_dev(struct net_device *net_dev, 1604 const struct sockaddr_in *dst_addr, 1605 const struct sockaddr_in *src_addr) 1606 { 1607 __be32 daddr = dst_addr->sin_addr.s_addr, 1608 saddr = src_addr->sin_addr.s_addr; 1609 struct fib_result res; 1610 struct flowi4 fl4; 1611 int err; 1612 bool ret; 1613 1614 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1615 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || 1616 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || 1617 ipv4_is_loopback(saddr)) 1618 return false; 1619 1620 memset(&fl4, 0, sizeof(fl4)); 1621 fl4.flowi4_oif = net_dev->ifindex; 1622 fl4.daddr = daddr; 1623 fl4.saddr = saddr; 1624 1625 rcu_read_lock(); 1626 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1627 ret = err == 0 && FIB_RES_DEV(res) == net_dev; 1628 rcu_read_unlock(); 1629 1630 return ret; 1631 } 1632 1633 static bool validate_ipv6_net_dev(struct net_device *net_dev, 1634 const struct sockaddr_in6 *dst_addr, 1635 const struct sockaddr_in6 *src_addr) 1636 { 1637 #if IS_ENABLED(CONFIG_IPV6) 1638 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & 1639 IPV6_ADDR_LINKLOCAL; 1640 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, 1641 &src_addr->sin6_addr, net_dev->ifindex, 1642 NULL, strict); 1643 bool ret; 1644 1645 if (!rt) 1646 return false; 1647 1648 ret = rt->rt6i_idev->dev == net_dev; 1649 ip6_rt_put(rt); 1650 1651 return ret; 1652 #else 1653 return false; 1654 #endif 1655 } 1656 1657 static bool validate_net_dev(struct net_device *net_dev, 1658 const struct sockaddr *daddr, 1659 const struct sockaddr *saddr) 1660 { 1661 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; 1662 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; 1663 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1664 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; 1665 1666 switch (daddr->sa_family) { 1667 case AF_INET: 1668 return saddr->sa_family == AF_INET && 1669 validate_ipv4_net_dev(net_dev, daddr4, saddr4); 1670 1671 case AF_INET6: 1672 return saddr->sa_family == AF_INET6 && 1673 validate_ipv6_net_dev(net_dev, daddr6, saddr6); 1674 1675 default: 1676 return false; 1677 } 1678 } 1679 1680 static struct net_device * 1681 roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) 1682 { 1683 const struct ib_gid_attr *sgid_attr = NULL; 1684 struct net_device *ndev; 1685 1686 if (ib_event->event == IB_CM_REQ_RECEIVED) 1687 sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; 1688 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) 1689 sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; 1690 1691 if (!sgid_attr) 1692 return NULL; 1693 1694 rcu_read_lock(); 1695 ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); 1696 if (IS_ERR(ndev)) 1697 ndev = NULL; 1698 else 1699 dev_hold(ndev); 1700 rcu_read_unlock(); 1701 return ndev; 1702 } 1703 1704 static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, 1705 struct cma_req_info *req) 1706 { 1707 struct sockaddr *listen_addr = 1708 (struct sockaddr *)&req->listen_addr_storage; 1709 struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; 1710 struct net_device *net_dev; 1711 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; 1712 int err; 1713 1714 err = cma_save_ip_info(listen_addr, src_addr, ib_event, 1715 req->service_id); 1716 if (err) 1717 return ERR_PTR(err); 1718 1719 if (rdma_protocol_roce(req->device, req->port)) 1720 net_dev = roce_get_net_dev_by_cm_event(ib_event); 1721 else 1722 net_dev = ib_get_net_dev_by_params(req->device, req->port, 1723 req->pkey, 1724 gid, listen_addr); 1725 if (!net_dev) 1726 return ERR_PTR(-ENODEV); 1727 1728 return net_dev; 1729 } 1730 1731 static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) 1732 { 1733 return (be64_to_cpu(service_id) >> 16) & 0xffff; 1734 } 1735 1736 static bool cma_match_private_data(struct rdma_id_private *id_priv, 1737 const struct cma_hdr *hdr) 1738 { 1739 struct sockaddr *addr = cma_src_addr(id_priv); 1740 __be32 ip4_addr; 1741 struct in6_addr ip6_addr; 1742 1743 if (cma_any_addr(addr) && !id_priv->afonly) 1744 return true; 1745 1746 switch (addr->sa_family) { 1747 case AF_INET: 1748 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; 1749 if (cma_get_ip_ver(hdr) != 4) 1750 return false; 1751 if (!cma_any_addr(addr) && 1752 hdr->dst_addr.ip4.addr != ip4_addr) 1753 return false; 1754 break; 1755 case AF_INET6: 1756 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; 1757 if (cma_get_ip_ver(hdr) != 6) 1758 return false; 1759 if (!cma_any_addr(addr) && 1760 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) 1761 return false; 1762 break; 1763 case AF_IB: 1764 return true; 1765 default: 1766 return false; 1767 } 1768 1769 return true; 1770 } 1771 1772 static bool cma_protocol_roce(const struct rdma_cm_id *id) 1773 { 1774 struct ib_device *device = id->device; 1775 const u32 port_num = id->port_num ?: rdma_start_port(device); 1776 1777 return rdma_protocol_roce(device, port_num); 1778 } 1779 1780 static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) 1781 { 1782 const struct sockaddr *daddr = 1783 (const struct sockaddr *)&req->listen_addr_storage; 1784 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; 1785 1786 /* Returns true if the req is for IPv6 link local */ 1787 return (daddr->sa_family == AF_INET6 && 1788 (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); 1789 } 1790 1791 static bool cma_match_net_dev(const struct rdma_cm_id *id, 1792 const struct net_device *net_dev, 1793 const struct cma_req_info *req) 1794 { 1795 const struct rdma_addr *addr = &id->route.addr; 1796 1797 if (!net_dev) 1798 /* This request is an AF_IB request */ 1799 return (!id->port_num || id->port_num == req->port) && 1800 (addr->src_addr.ss_family == AF_IB); 1801 1802 /* 1803 * If the request is not for IPv6 link local, allow matching 1804 * request to any netdevice of the one or multiport rdma device. 1805 */ 1806 if (!cma_is_req_ipv6_ll(req)) 1807 return true; 1808 /* 1809 * Net namespaces must match, and if the listner is listening 1810 * on a specific netdevice than netdevice must match as well. 1811 */ 1812 if (net_eq(dev_net(net_dev), addr->dev_addr.net) && 1813 (!!addr->dev_addr.bound_dev_if == 1814 (addr->dev_addr.bound_dev_if == net_dev->ifindex))) 1815 return true; 1816 else 1817 return false; 1818 } 1819 1820 static struct rdma_id_private *cma_find_listener( 1821 const struct rdma_bind_list *bind_list, 1822 const struct ib_cm_id *cm_id, 1823 const struct ib_cm_event *ib_event, 1824 const struct cma_req_info *req, 1825 const struct net_device *net_dev) 1826 { 1827 struct rdma_id_private *id_priv, *id_priv_dev; 1828 1829 lockdep_assert_held(&lock); 1830 1831 if (!bind_list) 1832 return ERR_PTR(-EINVAL); 1833 1834 hlist_for_each_entry(id_priv, &bind_list->owners, node) { 1835 if (cma_match_private_data(id_priv, ib_event->private_data)) { 1836 if (id_priv->id.device == cm_id->device && 1837 cma_match_net_dev(&id_priv->id, net_dev, req)) 1838 return id_priv; 1839 list_for_each_entry(id_priv_dev, 1840 &id_priv->listen_list, 1841 listen_item) { 1842 if (id_priv_dev->id.device == cm_id->device && 1843 cma_match_net_dev(&id_priv_dev->id, 1844 net_dev, req)) 1845 return id_priv_dev; 1846 } 1847 } 1848 } 1849 1850 return ERR_PTR(-EINVAL); 1851 } 1852 1853 static struct rdma_id_private * 1854 cma_ib_id_from_event(struct ib_cm_id *cm_id, 1855 const struct ib_cm_event *ib_event, 1856 struct cma_req_info *req, 1857 struct net_device **net_dev) 1858 { 1859 struct rdma_bind_list *bind_list; 1860 struct rdma_id_private *id_priv; 1861 int err; 1862 1863 err = cma_save_req_info(ib_event, req); 1864 if (err) 1865 return ERR_PTR(err); 1866 1867 *net_dev = cma_get_net_dev(ib_event, req); 1868 if (IS_ERR(*net_dev)) { 1869 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { 1870 /* Assuming the protocol is AF_IB */ 1871 *net_dev = NULL; 1872 } else { 1873 return ERR_CAST(*net_dev); 1874 } 1875 } 1876 1877 mutex_lock(&lock); 1878 /* 1879 * Net namespace might be getting deleted while route lookup, 1880 * cm_id lookup is in progress. Therefore, perform netdevice 1881 * validation, cm_id lookup under rcu lock. 1882 * RCU lock along with netdevice state check, synchronizes with 1883 * netdevice migrating to different net namespace and also avoids 1884 * case where net namespace doesn't get deleted while lookup is in 1885 * progress. 1886 * If the device state is not IFF_UP, its properties such as ifindex 1887 * and nd_net cannot be trusted to remain valid without rcu lock. 1888 * net/core/dev.c change_net_namespace() ensures to synchronize with 1889 * ongoing operations on net device after device is closed using 1890 * synchronize_net(). 1891 */ 1892 rcu_read_lock(); 1893 if (*net_dev) { 1894 /* 1895 * If netdevice is down, it is likely that it is administratively 1896 * down or it might be migrating to different namespace. 1897 * In that case avoid further processing, as the net namespace 1898 * or ifindex may change. 1899 */ 1900 if (((*net_dev)->flags & IFF_UP) == 0) { 1901 id_priv = ERR_PTR(-EHOSTUNREACH); 1902 goto err; 1903 } 1904 1905 if (!validate_net_dev(*net_dev, 1906 (struct sockaddr *)&req->src_addr_storage, 1907 (struct sockaddr *)&req->listen_addr_storage)) { 1908 id_priv = ERR_PTR(-EHOSTUNREACH); 1909 goto err; 1910 } 1911 } 1912 1913 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, 1914 rdma_ps_from_service_id(req->service_id), 1915 cma_port_from_service_id(req->service_id)); 1916 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); 1917 err: 1918 rcu_read_unlock(); 1919 mutex_unlock(&lock); 1920 if (IS_ERR(id_priv) && *net_dev) { 1921 dev_put(*net_dev); 1922 *net_dev = NULL; 1923 } 1924 return id_priv; 1925 } 1926 1927 static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) 1928 { 1929 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); 1930 } 1931 1932 static void cma_cancel_route(struct rdma_id_private *id_priv) 1933 { 1934 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { 1935 if (id_priv->query) 1936 ib_sa_cancel_query(id_priv->query_id, id_priv->query); 1937 } 1938 } 1939 1940 static void _cma_cancel_listens(struct rdma_id_private *id_priv) 1941 { 1942 struct rdma_id_private *dev_id_priv; 1943 1944 lockdep_assert_held(&lock); 1945 1946 /* 1947 * Remove from listen_any_list to prevent added devices from spawning 1948 * additional listen requests. 1949 */ 1950 list_del_init(&id_priv->listen_any_item); 1951 1952 while (!list_empty(&id_priv->listen_list)) { 1953 dev_id_priv = 1954 list_first_entry(&id_priv->listen_list, 1955 struct rdma_id_private, listen_item); 1956 /* sync with device removal to avoid duplicate destruction */ 1957 list_del_init(&dev_id_priv->device_item); 1958 list_del_init(&dev_id_priv->listen_item); 1959 mutex_unlock(&lock); 1960 1961 rdma_destroy_id(&dev_id_priv->id); 1962 mutex_lock(&lock); 1963 } 1964 } 1965 1966 static void cma_cancel_listens(struct rdma_id_private *id_priv) 1967 { 1968 mutex_lock(&lock); 1969 _cma_cancel_listens(id_priv); 1970 mutex_unlock(&lock); 1971 } 1972 1973 static void cma_cancel_operation(struct rdma_id_private *id_priv, 1974 enum rdma_cm_state state) 1975 { 1976 switch (state) { 1977 case RDMA_CM_ADDR_QUERY: 1978 /* 1979 * We can avoid doing the rdma_addr_cancel() based on state, 1980 * only RDMA_CM_ADDR_QUERY has a work that could still execute. 1981 * Notice that the addr_handler work could still be exiting 1982 * outside this state, however due to the interaction with the 1983 * handler_mutex the work is guaranteed not to touch id_priv 1984 * during exit. 1985 */ 1986 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1987 break; 1988 case RDMA_CM_ROUTE_QUERY: 1989 cma_cancel_route(id_priv); 1990 break; 1991 case RDMA_CM_LISTEN: 1992 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) 1993 cma_cancel_listens(id_priv); 1994 break; 1995 default: 1996 break; 1997 } 1998 } 1999 2000 static void cma_release_port(struct rdma_id_private *id_priv) 2001 { 2002 struct rdma_bind_list *bind_list = id_priv->bind_list; 2003 struct net *net = id_priv->id.route.addr.dev_addr.net; 2004 2005 if (!bind_list) 2006 return; 2007 2008 mutex_lock(&lock); 2009 hlist_del(&id_priv->node); 2010 if (hlist_empty(&bind_list->owners)) { 2011 cma_ps_remove(net, bind_list->ps, bind_list->port); 2012 kfree(bind_list); 2013 } 2014 mutex_unlock(&lock); 2015 } 2016 2017 static void destroy_mc(struct rdma_id_private *id_priv, 2018 struct cma_multicast *mc) 2019 { 2020 bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 2021 2022 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) 2023 ib_sa_free_multicast(mc->sa_mc); 2024 2025 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { 2026 struct rdma_dev_addr *dev_addr = 2027 &id_priv->id.route.addr.dev_addr; 2028 struct net_device *ndev = NULL; 2029 2030 if (dev_addr->bound_dev_if) 2031 ndev = dev_get_by_index(dev_addr->net, 2032 dev_addr->bound_dev_if); 2033 if (ndev && !send_only) { 2034 enum ib_gid_type gid_type; 2035 union ib_gid mgid; 2036 2037 gid_type = id_priv->cma_dev->default_gid_type 2038 [id_priv->id.port_num - 2039 rdma_start_port( 2040 id_priv->cma_dev->device)]; 2041 cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, 2042 gid_type); 2043 cma_igmp_send(ndev, &mgid, false); 2044 } 2045 dev_put(ndev); 2046 2047 cancel_work_sync(&mc->iboe_join.work); 2048 } 2049 kfree(mc); 2050 } 2051 2052 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) 2053 { 2054 struct cma_multicast *mc; 2055 2056 while (!list_empty(&id_priv->mc_list)) { 2057 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, 2058 list); 2059 list_del(&mc->list); 2060 destroy_mc(id_priv, mc); 2061 } 2062 } 2063 2064 static void _destroy_id(struct rdma_id_private *id_priv, 2065 enum rdma_cm_state state) 2066 { 2067 cma_cancel_operation(id_priv, state); 2068 2069 rdma_restrack_del(&id_priv->res); 2070 cma_remove_id_from_tree(id_priv); 2071 if (id_priv->cma_dev) { 2072 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { 2073 if (id_priv->cm_id.ib) 2074 ib_destroy_cm_id(id_priv->cm_id.ib); 2075 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { 2076 if (id_priv->cm_id.iw) 2077 iw_destroy_cm_id(id_priv->cm_id.iw); 2078 } 2079 cma_leave_mc_groups(id_priv); 2080 cma_release_dev(id_priv); 2081 } 2082 2083 cma_release_port(id_priv); 2084 cma_id_put(id_priv); 2085 wait_for_completion(&id_priv->comp); 2086 2087 if (id_priv->internal_id) 2088 cma_id_put(id_priv->id.context); 2089 2090 kfree(id_priv->id.route.path_rec); 2091 kfree(id_priv->id.route.path_rec_inbound); 2092 kfree(id_priv->id.route.path_rec_outbound); 2093 2094 put_net(id_priv->id.route.addr.dev_addr.net); 2095 kfree(id_priv); 2096 } 2097 2098 /* 2099 * destroy an ID from within the handler_mutex. This ensures that no other 2100 * handlers can start running concurrently. 2101 */ 2102 static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) 2103 __releases(&idprv->handler_mutex) 2104 { 2105 enum rdma_cm_state state; 2106 unsigned long flags; 2107 2108 trace_cm_id_destroy(id_priv); 2109 2110 /* 2111 * Setting the state to destroyed under the handler mutex provides a 2112 * fence against calling handler callbacks. If this is invoked due to 2113 * the failure of a handler callback then it guarentees that no future 2114 * handlers will be called. 2115 */ 2116 lockdep_assert_held(&id_priv->handler_mutex); 2117 spin_lock_irqsave(&id_priv->lock, flags); 2118 state = id_priv->state; 2119 id_priv->state = RDMA_CM_DESTROYING; 2120 spin_unlock_irqrestore(&id_priv->lock, flags); 2121 mutex_unlock(&id_priv->handler_mutex); 2122 _destroy_id(id_priv, state); 2123 } 2124 2125 void rdma_destroy_id(struct rdma_cm_id *id) 2126 { 2127 struct rdma_id_private *id_priv = 2128 container_of(id, struct rdma_id_private, id); 2129 2130 mutex_lock(&id_priv->handler_mutex); 2131 destroy_id_handler_unlock(id_priv); 2132 } 2133 EXPORT_SYMBOL(rdma_destroy_id); 2134 2135 static int cma_rep_recv(struct rdma_id_private *id_priv) 2136 { 2137 int ret; 2138 2139 ret = cma_modify_qp_rtr(id_priv, NULL); 2140 if (ret) 2141 goto reject; 2142 2143 ret = cma_modify_qp_rts(id_priv, NULL); 2144 if (ret) 2145 goto reject; 2146 2147 trace_cm_send_rtu(id_priv); 2148 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 2149 if (ret) 2150 goto reject; 2151 2152 return 0; 2153 reject: 2154 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); 2155 cma_modify_qp_err(id_priv); 2156 trace_cm_send_rej(id_priv); 2157 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 2158 NULL, 0, NULL, 0); 2159 return ret; 2160 } 2161 2162 static void cma_set_rep_event_data(struct rdma_cm_event *event, 2163 const struct ib_cm_rep_event_param *rep_data, 2164 void *private_data) 2165 { 2166 event->param.conn.private_data = private_data; 2167 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 2168 event->param.conn.responder_resources = rep_data->responder_resources; 2169 event->param.conn.initiator_depth = rep_data->initiator_depth; 2170 event->param.conn.flow_control = rep_data->flow_control; 2171 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 2172 event->param.conn.srq = rep_data->srq; 2173 event->param.conn.qp_num = rep_data->remote_qpn; 2174 2175 event->ece.vendor_id = rep_data->ece.vendor_id; 2176 event->ece.attr_mod = rep_data->ece.attr_mod; 2177 } 2178 2179 static int cma_cm_event_handler(struct rdma_id_private *id_priv, 2180 struct rdma_cm_event *event) 2181 { 2182 int ret; 2183 2184 lockdep_assert_held(&id_priv->handler_mutex); 2185 2186 trace_cm_event_handler(id_priv, event); 2187 ret = id_priv->id.event_handler(&id_priv->id, event); 2188 trace_cm_event_done(id_priv, event, ret); 2189 return ret; 2190 } 2191 2192 static int cma_ib_handler(struct ib_cm_id *cm_id, 2193 const struct ib_cm_event *ib_event) 2194 { 2195 struct rdma_id_private *id_priv = cm_id->context; 2196 struct rdma_cm_event event = {}; 2197 enum rdma_cm_state state; 2198 int ret; 2199 2200 mutex_lock(&id_priv->handler_mutex); 2201 state = READ_ONCE(id_priv->state); 2202 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 2203 state != RDMA_CM_CONNECT) || 2204 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 2205 state != RDMA_CM_DISCONNECT)) 2206 goto out; 2207 2208 switch (ib_event->event) { 2209 case IB_CM_REQ_ERROR: 2210 case IB_CM_REP_ERROR: 2211 event.event = RDMA_CM_EVENT_UNREACHABLE; 2212 event.status = -ETIMEDOUT; 2213 break; 2214 case IB_CM_REP_RECEIVED: 2215 if (state == RDMA_CM_CONNECT && 2216 (id_priv->id.qp_type != IB_QPT_UD)) { 2217 trace_cm_send_mra(id_priv); 2218 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2219 } 2220 if (id_priv->id.qp) { 2221 event.status = cma_rep_recv(id_priv); 2222 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 2223 RDMA_CM_EVENT_ESTABLISHED; 2224 } else { 2225 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 2226 } 2227 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 2228 ib_event->private_data); 2229 break; 2230 case IB_CM_RTU_RECEIVED: 2231 case IB_CM_USER_ESTABLISHED: 2232 event.event = RDMA_CM_EVENT_ESTABLISHED; 2233 break; 2234 case IB_CM_DREQ_ERROR: 2235 event.status = -ETIMEDOUT; 2236 fallthrough; 2237 case IB_CM_DREQ_RECEIVED: 2238 case IB_CM_DREP_RECEIVED: 2239 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 2240 RDMA_CM_DISCONNECT)) 2241 goto out; 2242 event.event = RDMA_CM_EVENT_DISCONNECTED; 2243 break; 2244 case IB_CM_TIMEWAIT_EXIT: 2245 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 2246 break; 2247 case IB_CM_MRA_RECEIVED: 2248 /* ignore event */ 2249 goto out; 2250 case IB_CM_REJ_RECEIVED: 2251 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, 2252 ib_event->param.rej_rcvd.reason)); 2253 cma_modify_qp_err(id_priv); 2254 event.status = ib_event->param.rej_rcvd.reason; 2255 event.event = RDMA_CM_EVENT_REJECTED; 2256 event.param.conn.private_data = ib_event->private_data; 2257 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 2258 break; 2259 default: 2260 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 2261 ib_event->event); 2262 goto out; 2263 } 2264 2265 ret = cma_cm_event_handler(id_priv, &event); 2266 if (ret) { 2267 /* Destroy the CM ID by returning a non-zero value. */ 2268 id_priv->cm_id.ib = NULL; 2269 destroy_id_handler_unlock(id_priv); 2270 return ret; 2271 } 2272 out: 2273 mutex_unlock(&id_priv->handler_mutex); 2274 return 0; 2275 } 2276 2277 static struct rdma_id_private * 2278 cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, 2279 const struct ib_cm_event *ib_event, 2280 struct net_device *net_dev) 2281 { 2282 struct rdma_id_private *listen_id_priv; 2283 struct rdma_id_private *id_priv; 2284 struct rdma_cm_id *id; 2285 struct rdma_route *rt; 2286 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2287 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; 2288 const __be64 service_id = 2289 ib_event->param.req_rcvd.primary_path->service_id; 2290 int ret; 2291 2292 listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2293 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, 2294 listen_id->event_handler, listen_id->context, 2295 listen_id->ps, 2296 ib_event->param.req_rcvd.qp_type, 2297 listen_id_priv); 2298 if (IS_ERR(id_priv)) 2299 return NULL; 2300 2301 id = &id_priv->id; 2302 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2303 (struct sockaddr *)&id->route.addr.dst_addr, 2304 listen_id, ib_event, ss_family, service_id)) 2305 goto err; 2306 2307 rt = &id->route; 2308 rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; 2309 rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, 2310 sizeof(*rt->path_rec), GFP_KERNEL); 2311 if (!rt->path_rec) 2312 goto err; 2313 2314 rt->path_rec[0] = *path; 2315 if (rt->num_pri_alt_paths == 2) 2316 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 2317 2318 if (net_dev) { 2319 rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); 2320 } else { 2321 if (!cma_protocol_roce(listen_id) && 2322 cma_any_addr(cma_src_addr(id_priv))) { 2323 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; 2324 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); 2325 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); 2326 } else if (!cma_any_addr(cma_src_addr(id_priv))) { 2327 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); 2328 if (ret) 2329 goto err; 2330 } 2331 } 2332 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 2333 2334 id_priv->state = RDMA_CM_CONNECT; 2335 return id_priv; 2336 2337 err: 2338 rdma_destroy_id(id); 2339 return NULL; 2340 } 2341 2342 static struct rdma_id_private * 2343 cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, 2344 const struct ib_cm_event *ib_event, 2345 struct net_device *net_dev) 2346 { 2347 const struct rdma_id_private *listen_id_priv; 2348 struct rdma_id_private *id_priv; 2349 struct rdma_cm_id *id; 2350 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 2351 struct net *net = listen_id->route.addr.dev_addr.net; 2352 int ret; 2353 2354 listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2355 id_priv = __rdma_create_id(net, listen_id->event_handler, 2356 listen_id->context, listen_id->ps, IB_QPT_UD, 2357 listen_id_priv); 2358 if (IS_ERR(id_priv)) 2359 return NULL; 2360 2361 id = &id_priv->id; 2362 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2363 (struct sockaddr *)&id->route.addr.dst_addr, 2364 listen_id, ib_event, ss_family, 2365 ib_event->param.sidr_req_rcvd.service_id)) 2366 goto err; 2367 2368 if (net_dev) { 2369 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); 2370 } else { 2371 if (!cma_any_addr(cma_src_addr(id_priv))) { 2372 ret = cma_translate_addr(cma_src_addr(id_priv), 2373 &id->route.addr.dev_addr); 2374 if (ret) 2375 goto err; 2376 } 2377 } 2378 2379 id_priv->state = RDMA_CM_CONNECT; 2380 return id_priv; 2381 err: 2382 rdma_destroy_id(id); 2383 return NULL; 2384 } 2385 2386 static void cma_set_req_event_data(struct rdma_cm_event *event, 2387 const struct ib_cm_req_event_param *req_data, 2388 void *private_data, int offset) 2389 { 2390 event->param.conn.private_data = private_data + offset; 2391 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 2392 event->param.conn.responder_resources = req_data->responder_resources; 2393 event->param.conn.initiator_depth = req_data->initiator_depth; 2394 event->param.conn.flow_control = req_data->flow_control; 2395 event->param.conn.retry_count = req_data->retry_count; 2396 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 2397 event->param.conn.srq = req_data->srq; 2398 event->param.conn.qp_num = req_data->remote_qpn; 2399 2400 event->ece.vendor_id = req_data->ece.vendor_id; 2401 event->ece.attr_mod = req_data->ece.attr_mod; 2402 } 2403 2404 static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, 2405 const struct ib_cm_event *ib_event) 2406 { 2407 return (((ib_event->event == IB_CM_REQ_RECEIVED) && 2408 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 2409 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 2410 (id->qp_type == IB_QPT_UD)) || 2411 (!id->qp_type)); 2412 } 2413 2414 static int cma_ib_req_handler(struct ib_cm_id *cm_id, 2415 const struct ib_cm_event *ib_event) 2416 { 2417 struct rdma_id_private *listen_id, *conn_id = NULL; 2418 struct rdma_cm_event event = {}; 2419 struct cma_req_info req = {}; 2420 struct net_device *net_dev; 2421 u8 offset; 2422 int ret; 2423 2424 listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); 2425 if (IS_ERR(listen_id)) 2426 return PTR_ERR(listen_id); 2427 2428 trace_cm_req_handler(listen_id, ib_event->event); 2429 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { 2430 ret = -EINVAL; 2431 goto net_dev_put; 2432 } 2433 2434 mutex_lock(&listen_id->handler_mutex); 2435 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { 2436 ret = -ECONNABORTED; 2437 goto err_unlock; 2438 } 2439 2440 offset = cma_user_data_offset(listen_id); 2441 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2442 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { 2443 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); 2444 event.param.ud.private_data = ib_event->private_data + offset; 2445 event.param.ud.private_data_len = 2446 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 2447 } else { 2448 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); 2449 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 2450 ib_event->private_data, offset); 2451 } 2452 if (!conn_id) { 2453 ret = -ENOMEM; 2454 goto err_unlock; 2455 } 2456 2457 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2458 ret = cma_ib_acquire_dev(conn_id, listen_id, &req); 2459 if (ret) { 2460 destroy_id_handler_unlock(conn_id); 2461 goto err_unlock; 2462 } 2463 2464 conn_id->cm_id.ib = cm_id; 2465 cm_id->context = conn_id; 2466 cm_id->cm_handler = cma_ib_handler; 2467 2468 ret = cma_cm_event_handler(conn_id, &event); 2469 if (ret) { 2470 /* Destroy the CM ID by returning a non-zero value. */ 2471 conn_id->cm_id.ib = NULL; 2472 mutex_unlock(&listen_id->handler_mutex); 2473 destroy_id_handler_unlock(conn_id); 2474 goto net_dev_put; 2475 } 2476 2477 if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && 2478 conn_id->id.qp_type != IB_QPT_UD) { 2479 trace_cm_send_mra(cm_id->context); 2480 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2481 } 2482 mutex_unlock(&conn_id->handler_mutex); 2483 2484 err_unlock: 2485 mutex_unlock(&listen_id->handler_mutex); 2486 2487 net_dev_put: 2488 dev_put(net_dev); 2489 2490 return ret; 2491 } 2492 2493 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) 2494 { 2495 if (addr->sa_family == AF_IB) 2496 return ((struct sockaddr_ib *) addr)->sib_sid; 2497 2498 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); 2499 } 2500 EXPORT_SYMBOL(rdma_get_service_id); 2501 2502 void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, 2503 union ib_gid *dgid) 2504 { 2505 struct rdma_addr *addr = &cm_id->route.addr; 2506 2507 if (!cm_id->device) { 2508 if (sgid) 2509 memset(sgid, 0, sizeof(*sgid)); 2510 if (dgid) 2511 memset(dgid, 0, sizeof(*dgid)); 2512 return; 2513 } 2514 2515 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { 2516 if (sgid) 2517 rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); 2518 if (dgid) 2519 rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); 2520 } else { 2521 if (sgid) 2522 rdma_addr_get_sgid(&addr->dev_addr, sgid); 2523 if (dgid) 2524 rdma_addr_get_dgid(&addr->dev_addr, dgid); 2525 } 2526 } 2527 EXPORT_SYMBOL(rdma_read_gids); 2528 2529 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 2530 { 2531 struct rdma_id_private *id_priv = iw_id->context; 2532 struct rdma_cm_event event = {}; 2533 int ret = 0; 2534 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2535 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2536 2537 mutex_lock(&id_priv->handler_mutex); 2538 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 2539 goto out; 2540 2541 switch (iw_event->event) { 2542 case IW_CM_EVENT_CLOSE: 2543 event.event = RDMA_CM_EVENT_DISCONNECTED; 2544 break; 2545 case IW_CM_EVENT_CONNECT_REPLY: 2546 memcpy(cma_src_addr(id_priv), laddr, 2547 rdma_addr_size(laddr)); 2548 memcpy(cma_dst_addr(id_priv), raddr, 2549 rdma_addr_size(raddr)); 2550 switch (iw_event->status) { 2551 case 0: 2552 event.event = RDMA_CM_EVENT_ESTABLISHED; 2553 event.param.conn.initiator_depth = iw_event->ird; 2554 event.param.conn.responder_resources = iw_event->ord; 2555 break; 2556 case -ECONNRESET: 2557 case -ECONNREFUSED: 2558 event.event = RDMA_CM_EVENT_REJECTED; 2559 break; 2560 case -ETIMEDOUT: 2561 event.event = RDMA_CM_EVENT_UNREACHABLE; 2562 break; 2563 default: 2564 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 2565 break; 2566 } 2567 break; 2568 case IW_CM_EVENT_ESTABLISHED: 2569 event.event = RDMA_CM_EVENT_ESTABLISHED; 2570 event.param.conn.initiator_depth = iw_event->ird; 2571 event.param.conn.responder_resources = iw_event->ord; 2572 break; 2573 default: 2574 goto out; 2575 } 2576 2577 event.status = iw_event->status; 2578 event.param.conn.private_data = iw_event->private_data; 2579 event.param.conn.private_data_len = iw_event->private_data_len; 2580 ret = cma_cm_event_handler(id_priv, &event); 2581 if (ret) { 2582 /* Destroy the CM ID by returning a non-zero value. */ 2583 id_priv->cm_id.iw = NULL; 2584 destroy_id_handler_unlock(id_priv); 2585 return ret; 2586 } 2587 2588 out: 2589 mutex_unlock(&id_priv->handler_mutex); 2590 return ret; 2591 } 2592 2593 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2594 struct iw_cm_event *iw_event) 2595 { 2596 struct rdma_id_private *listen_id, *conn_id; 2597 struct rdma_cm_event event = {}; 2598 int ret = -ECONNABORTED; 2599 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; 2600 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; 2601 2602 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 2603 event.param.conn.private_data = iw_event->private_data; 2604 event.param.conn.private_data_len = iw_event->private_data_len; 2605 event.param.conn.initiator_depth = iw_event->ird; 2606 event.param.conn.responder_resources = iw_event->ord; 2607 2608 listen_id = cm_id->context; 2609 2610 mutex_lock(&listen_id->handler_mutex); 2611 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) 2612 goto out; 2613 2614 /* Create a new RDMA id for the new IW CM ID */ 2615 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2616 listen_id->id.event_handler, 2617 listen_id->id.context, RDMA_PS_TCP, 2618 IB_QPT_RC, listen_id); 2619 if (IS_ERR(conn_id)) { 2620 ret = -ENOMEM; 2621 goto out; 2622 } 2623 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2624 conn_id->state = RDMA_CM_CONNECT; 2625 2626 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); 2627 if (ret) { 2628 mutex_unlock(&listen_id->handler_mutex); 2629 destroy_id_handler_unlock(conn_id); 2630 return ret; 2631 } 2632 2633 ret = cma_iw_acquire_dev(conn_id, listen_id); 2634 if (ret) { 2635 mutex_unlock(&listen_id->handler_mutex); 2636 destroy_id_handler_unlock(conn_id); 2637 return ret; 2638 } 2639 2640 conn_id->cm_id.iw = cm_id; 2641 cm_id->context = conn_id; 2642 cm_id->cm_handler = cma_iw_handler; 2643 2644 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); 2645 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); 2646 2647 ret = cma_cm_event_handler(conn_id, &event); 2648 if (ret) { 2649 /* User wants to destroy the CM ID */ 2650 conn_id->cm_id.iw = NULL; 2651 mutex_unlock(&listen_id->handler_mutex); 2652 destroy_id_handler_unlock(conn_id); 2653 return ret; 2654 } 2655 2656 mutex_unlock(&conn_id->handler_mutex); 2657 2658 out: 2659 mutex_unlock(&listen_id->handler_mutex); 2660 return ret; 2661 } 2662 2663 static int cma_ib_listen(struct rdma_id_private *id_priv) 2664 { 2665 struct sockaddr *addr; 2666 struct ib_cm_id *id; 2667 __be64 svc_id; 2668 2669 addr = cma_src_addr(id_priv); 2670 svc_id = rdma_get_service_id(&id_priv->id, addr); 2671 id = ib_cm_insert_listen(id_priv->id.device, 2672 cma_ib_req_handler, svc_id); 2673 if (IS_ERR(id)) 2674 return PTR_ERR(id); 2675 id_priv->cm_id.ib = id; 2676 2677 return 0; 2678 } 2679 2680 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) 2681 { 2682 int ret; 2683 struct iw_cm_id *id; 2684 2685 id = iw_create_cm_id(id_priv->id.device, 2686 iw_conn_req_handler, 2687 id_priv); 2688 if (IS_ERR(id)) 2689 return PTR_ERR(id); 2690 2691 mutex_lock(&id_priv->qp_mutex); 2692 id->tos = id_priv->tos; 2693 id->tos_set = id_priv->tos_set; 2694 mutex_unlock(&id_priv->qp_mutex); 2695 id->afonly = id_priv->afonly; 2696 id_priv->cm_id.iw = id; 2697 2698 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), 2699 rdma_addr_size(cma_src_addr(id_priv))); 2700 2701 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); 2702 2703 if (ret) { 2704 iw_destroy_cm_id(id_priv->cm_id.iw); 2705 id_priv->cm_id.iw = NULL; 2706 } 2707 2708 return ret; 2709 } 2710 2711 static int cma_listen_handler(struct rdma_cm_id *id, 2712 struct rdma_cm_event *event) 2713 { 2714 struct rdma_id_private *id_priv = id->context; 2715 2716 /* Listening IDs are always destroyed on removal */ 2717 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) 2718 return -1; 2719 2720 id->context = id_priv->id.context; 2721 id->event_handler = id_priv->id.event_handler; 2722 trace_cm_event_handler(id_priv, event); 2723 return id_priv->id.event_handler(id, event); 2724 } 2725 2726 static int cma_listen_on_dev(struct rdma_id_private *id_priv, 2727 struct cma_device *cma_dev, 2728 struct rdma_id_private **to_destroy) 2729 { 2730 struct rdma_id_private *dev_id_priv; 2731 struct net *net = id_priv->id.route.addr.dev_addr.net; 2732 int ret; 2733 2734 lockdep_assert_held(&lock); 2735 2736 *to_destroy = NULL; 2737 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2738 return 0; 2739 2740 dev_id_priv = 2741 __rdma_create_id(net, cma_listen_handler, id_priv, 2742 id_priv->id.ps, id_priv->id.qp_type, id_priv); 2743 if (IS_ERR(dev_id_priv)) 2744 return PTR_ERR(dev_id_priv); 2745 2746 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2747 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), 2748 rdma_addr_size(cma_src_addr(id_priv))); 2749 2750 _cma_attach_to_dev(dev_id_priv, cma_dev); 2751 rdma_restrack_add(&dev_id_priv->res); 2752 cma_id_get(id_priv); 2753 dev_id_priv->internal_id = 1; 2754 dev_id_priv->afonly = id_priv->afonly; 2755 mutex_lock(&id_priv->qp_mutex); 2756 dev_id_priv->tos_set = id_priv->tos_set; 2757 dev_id_priv->tos = id_priv->tos; 2758 mutex_unlock(&id_priv->qp_mutex); 2759 2760 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); 2761 if (ret) 2762 goto err_listen; 2763 list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); 2764 return 0; 2765 err_listen: 2766 /* Caller must destroy this after releasing lock */ 2767 *to_destroy = dev_id_priv; 2768 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); 2769 return ret; 2770 } 2771 2772 static int cma_listen_on_all(struct rdma_id_private *id_priv) 2773 { 2774 struct rdma_id_private *to_destroy; 2775 struct cma_device *cma_dev; 2776 int ret; 2777 2778 mutex_lock(&lock); 2779 list_add_tail(&id_priv->listen_any_item, &listen_any_list); 2780 list_for_each_entry(cma_dev, &dev_list, list) { 2781 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 2782 if (ret) { 2783 /* Prevent racing with cma_process_remove() */ 2784 if (to_destroy) 2785 list_del_init(&to_destroy->device_item); 2786 goto err_listen; 2787 } 2788 } 2789 mutex_unlock(&lock); 2790 return 0; 2791 2792 err_listen: 2793 _cma_cancel_listens(id_priv); 2794 mutex_unlock(&lock); 2795 if (to_destroy) 2796 rdma_destroy_id(&to_destroy->id); 2797 return ret; 2798 } 2799 2800 void rdma_set_service_type(struct rdma_cm_id *id, int tos) 2801 { 2802 struct rdma_id_private *id_priv; 2803 2804 id_priv = container_of(id, struct rdma_id_private, id); 2805 mutex_lock(&id_priv->qp_mutex); 2806 id_priv->tos = (u8) tos; 2807 id_priv->tos_set = true; 2808 mutex_unlock(&id_priv->qp_mutex); 2809 } 2810 EXPORT_SYMBOL(rdma_set_service_type); 2811 2812 /** 2813 * rdma_set_ack_timeout() - Set the ack timeout of QP associated 2814 * with a connection identifier. 2815 * @id: Communication identifier to associated with service type. 2816 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. 2817 * 2818 * This function should be called before rdma_connect() on active side, 2819 * and on passive side before rdma_accept(). It is applicable to primary 2820 * path only. The timeout will affect the local side of the QP, it is not 2821 * negotiated with remote side and zero disables the timer. In case it is 2822 * set before rdma_resolve_route, the value will also be used to determine 2823 * PacketLifeTime for RoCE. 2824 * 2825 * Return: 0 for success 2826 */ 2827 int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) 2828 { 2829 struct rdma_id_private *id_priv; 2830 2831 if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) 2832 return -EINVAL; 2833 2834 id_priv = container_of(id, struct rdma_id_private, id); 2835 mutex_lock(&id_priv->qp_mutex); 2836 id_priv->timeout = timeout; 2837 id_priv->timeout_set = true; 2838 mutex_unlock(&id_priv->qp_mutex); 2839 2840 return 0; 2841 } 2842 EXPORT_SYMBOL(rdma_set_ack_timeout); 2843 2844 /** 2845 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the 2846 * QP associated with a connection identifier. 2847 * @id: Communication identifier to associated with service type. 2848 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK 2849 * Timer Field" in the IBTA specification. 2850 * 2851 * This function should be called before rdma_connect() on active 2852 * side, and on passive side before rdma_accept(). The timer value 2853 * will be associated with the local QP. When it receives a send it is 2854 * not read to handle, typically if the receive queue is empty, an RNR 2855 * Retry NAK is returned to the requester with the min_rnr_timer 2856 * encoded. The requester will then wait at least the time specified 2857 * in the NAK before retrying. The default is zero, which translates 2858 * to a minimum RNR Timer value of 655 ms. 2859 * 2860 * Return: 0 for success 2861 */ 2862 int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) 2863 { 2864 struct rdma_id_private *id_priv; 2865 2866 /* It is a five-bit value */ 2867 if (min_rnr_timer & 0xe0) 2868 return -EINVAL; 2869 2870 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) 2871 return -EINVAL; 2872 2873 id_priv = container_of(id, struct rdma_id_private, id); 2874 mutex_lock(&id_priv->qp_mutex); 2875 id_priv->min_rnr_timer = min_rnr_timer; 2876 id_priv->min_rnr_timer_set = true; 2877 mutex_unlock(&id_priv->qp_mutex); 2878 2879 return 0; 2880 } 2881 EXPORT_SYMBOL(rdma_set_min_rnr_timer); 2882 2883 static int route_set_path_rec_inbound(struct cma_work *work, 2884 struct sa_path_rec *path_rec) 2885 { 2886 struct rdma_route *route = &work->id->id.route; 2887 2888 if (!route->path_rec_inbound) { 2889 route->path_rec_inbound = 2890 kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); 2891 if (!route->path_rec_inbound) 2892 return -ENOMEM; 2893 } 2894 2895 *route->path_rec_inbound = *path_rec; 2896 return 0; 2897 } 2898 2899 static int route_set_path_rec_outbound(struct cma_work *work, 2900 struct sa_path_rec *path_rec) 2901 { 2902 struct rdma_route *route = &work->id->id.route; 2903 2904 if (!route->path_rec_outbound) { 2905 route->path_rec_outbound = 2906 kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); 2907 if (!route->path_rec_outbound) 2908 return -ENOMEM; 2909 } 2910 2911 *route->path_rec_outbound = *path_rec; 2912 return 0; 2913 } 2914 2915 static void cma_query_handler(int status, struct sa_path_rec *path_rec, 2916 unsigned int num_prs, void *context) 2917 { 2918 struct cma_work *work = context; 2919 struct rdma_route *route; 2920 int i; 2921 2922 route = &work->id->id.route; 2923 2924 if (status) 2925 goto fail; 2926 2927 for (i = 0; i < num_prs; i++) { 2928 if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) 2929 *route->path_rec = path_rec[i]; 2930 else if (path_rec[i].flags & IB_PATH_INBOUND) 2931 status = route_set_path_rec_inbound(work, &path_rec[i]); 2932 else if (path_rec[i].flags & IB_PATH_OUTBOUND) 2933 status = route_set_path_rec_outbound(work, 2934 &path_rec[i]); 2935 else 2936 status = -EINVAL; 2937 2938 if (status) 2939 goto fail; 2940 } 2941 2942 route->num_pri_alt_paths = 1; 2943 queue_work(cma_wq, &work->work); 2944 return; 2945 2946 fail: 2947 work->old_state = RDMA_CM_ROUTE_QUERY; 2948 work->new_state = RDMA_CM_ADDR_RESOLVED; 2949 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 2950 work->event.status = status; 2951 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", 2952 status); 2953 queue_work(cma_wq, &work->work); 2954 } 2955 2956 static int cma_query_ib_route(struct rdma_id_private *id_priv, 2957 unsigned long timeout_ms, struct cma_work *work) 2958 { 2959 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 2960 struct sa_path_rec path_rec; 2961 ib_sa_comp_mask comp_mask; 2962 struct sockaddr_in6 *sin6; 2963 struct sockaddr_ib *sib; 2964 2965 memset(&path_rec, 0, sizeof path_rec); 2966 2967 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) 2968 path_rec.rec_type = SA_PATH_REC_TYPE_OPA; 2969 else 2970 path_rec.rec_type = SA_PATH_REC_TYPE_IB; 2971 rdma_addr_get_sgid(dev_addr, &path_rec.sgid); 2972 rdma_addr_get_dgid(dev_addr, &path_rec.dgid); 2973 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2974 path_rec.numb_path = 1; 2975 path_rec.reversible = 1; 2976 path_rec.service_id = rdma_get_service_id(&id_priv->id, 2977 cma_dst_addr(id_priv)); 2978 2979 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2980 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 2981 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 2982 2983 switch (cma_family(id_priv)) { 2984 case AF_INET: 2985 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 2986 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 2987 break; 2988 case AF_INET6: 2989 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 2990 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); 2991 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2992 break; 2993 case AF_IB: 2994 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 2995 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); 2996 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; 2997 break; 2998 } 2999 3000 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 3001 id_priv->id.port_num, &path_rec, 3002 comp_mask, timeout_ms, 3003 GFP_KERNEL, cma_query_handler, 3004 work, &id_priv->query); 3005 3006 return (id_priv->query_id < 0) ? id_priv->query_id : 0; 3007 } 3008 3009 static void cma_iboe_join_work_handler(struct work_struct *work) 3010 { 3011 struct cma_multicast *mc = 3012 container_of(work, struct cma_multicast, iboe_join.work); 3013 struct rdma_cm_event *event = &mc->iboe_join.event; 3014 struct rdma_id_private *id_priv = mc->id_priv; 3015 int ret; 3016 3017 mutex_lock(&id_priv->handler_mutex); 3018 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 3019 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 3020 goto out_unlock; 3021 3022 ret = cma_cm_event_handler(id_priv, event); 3023 WARN_ON(ret); 3024 3025 out_unlock: 3026 mutex_unlock(&id_priv->handler_mutex); 3027 if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) 3028 rdma_destroy_ah_attr(&event->param.ud.ah_attr); 3029 } 3030 3031 static void cma_work_handler(struct work_struct *_work) 3032 { 3033 struct cma_work *work = container_of(_work, struct cma_work, work); 3034 struct rdma_id_private *id_priv = work->id; 3035 3036 mutex_lock(&id_priv->handler_mutex); 3037 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 3038 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 3039 goto out_unlock; 3040 if (work->old_state != 0 || work->new_state != 0) { 3041 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 3042 goto out_unlock; 3043 } 3044 3045 if (cma_cm_event_handler(id_priv, &work->event)) { 3046 cma_id_put(id_priv); 3047 destroy_id_handler_unlock(id_priv); 3048 goto out_free; 3049 } 3050 3051 out_unlock: 3052 mutex_unlock(&id_priv->handler_mutex); 3053 cma_id_put(id_priv); 3054 out_free: 3055 if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) 3056 rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); 3057 kfree(work); 3058 } 3059 3060 static void cma_init_resolve_route_work(struct cma_work *work, 3061 struct rdma_id_private *id_priv) 3062 { 3063 work->id = id_priv; 3064 INIT_WORK(&work->work, cma_work_handler); 3065 work->old_state = RDMA_CM_ROUTE_QUERY; 3066 work->new_state = RDMA_CM_ROUTE_RESOLVED; 3067 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 3068 } 3069 3070 static void enqueue_resolve_addr_work(struct cma_work *work, 3071 struct rdma_id_private *id_priv) 3072 { 3073 /* Balances with cma_id_put() in cma_work_handler */ 3074 cma_id_get(id_priv); 3075 3076 work->id = id_priv; 3077 INIT_WORK(&work->work, cma_work_handler); 3078 work->old_state = RDMA_CM_ADDR_QUERY; 3079 work->new_state = RDMA_CM_ADDR_RESOLVED; 3080 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3081 3082 queue_work(cma_wq, &work->work); 3083 } 3084 3085 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, 3086 unsigned long timeout_ms) 3087 { 3088 struct rdma_route *route = &id_priv->id.route; 3089 struct cma_work *work; 3090 int ret; 3091 3092 work = kzalloc(sizeof *work, GFP_KERNEL); 3093 if (!work) 3094 return -ENOMEM; 3095 3096 cma_init_resolve_route_work(work, id_priv); 3097 3098 if (!route->path_rec) 3099 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); 3100 if (!route->path_rec) { 3101 ret = -ENOMEM; 3102 goto err1; 3103 } 3104 3105 ret = cma_query_ib_route(id_priv, timeout_ms, work); 3106 if (ret) 3107 goto err2; 3108 3109 return 0; 3110 err2: 3111 kfree(route->path_rec); 3112 route->path_rec = NULL; 3113 err1: 3114 kfree(work); 3115 return ret; 3116 } 3117 3118 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, 3119 unsigned long supported_gids, 3120 enum ib_gid_type default_gid) 3121 { 3122 if ((network_type == RDMA_NETWORK_IPV4 || 3123 network_type == RDMA_NETWORK_IPV6) && 3124 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) 3125 return IB_GID_TYPE_ROCE_UDP_ENCAP; 3126 3127 return default_gid; 3128 } 3129 3130 /* 3131 * cma_iboe_set_path_rec_l2_fields() is helper function which sets 3132 * path record type based on GID type. 3133 * It also sets up other L2 fields which includes destination mac address 3134 * netdev ifindex, of the path record. 3135 * It returns the netdev of the bound interface for this path record entry. 3136 */ 3137 static struct net_device * 3138 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) 3139 { 3140 struct rdma_route *route = &id_priv->id.route; 3141 enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; 3142 struct rdma_addr *addr = &route->addr; 3143 unsigned long supported_gids; 3144 struct net_device *ndev; 3145 3146 if (!addr->dev_addr.bound_dev_if) 3147 return NULL; 3148 3149 ndev = dev_get_by_index(addr->dev_addr.net, 3150 addr->dev_addr.bound_dev_if); 3151 if (!ndev) 3152 return NULL; 3153 3154 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 3155 id_priv->id.port_num); 3156 gid_type = cma_route_gid_type(addr->dev_addr.network, 3157 supported_gids, 3158 id_priv->gid_type); 3159 /* Use the hint from IP Stack to select GID Type */ 3160 if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) 3161 gid_type = ib_network_to_gid_type(addr->dev_addr.network); 3162 route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); 3163 3164 route->path_rec->roce.route_resolved = true; 3165 sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); 3166 return ndev; 3167 } 3168 3169 int rdma_set_ib_path(struct rdma_cm_id *id, 3170 struct sa_path_rec *path_rec) 3171 { 3172 struct rdma_id_private *id_priv; 3173 struct net_device *ndev; 3174 int ret; 3175 3176 id_priv = container_of(id, struct rdma_id_private, id); 3177 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 3178 RDMA_CM_ROUTE_RESOLVED)) 3179 return -EINVAL; 3180 3181 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), 3182 GFP_KERNEL); 3183 if (!id->route.path_rec) { 3184 ret = -ENOMEM; 3185 goto err; 3186 } 3187 3188 if (rdma_protocol_roce(id->device, id->port_num)) { 3189 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); 3190 if (!ndev) { 3191 ret = -ENODEV; 3192 goto err_free; 3193 } 3194 dev_put(ndev); 3195 } 3196 3197 id->route.num_pri_alt_paths = 1; 3198 return 0; 3199 3200 err_free: 3201 kfree(id->route.path_rec); 3202 id->route.path_rec = NULL; 3203 err: 3204 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 3205 return ret; 3206 } 3207 EXPORT_SYMBOL(rdma_set_ib_path); 3208 3209 static int cma_resolve_iw_route(struct rdma_id_private *id_priv) 3210 { 3211 struct cma_work *work; 3212 3213 work = kzalloc(sizeof *work, GFP_KERNEL); 3214 if (!work) 3215 return -ENOMEM; 3216 3217 cma_init_resolve_route_work(work, id_priv); 3218 queue_work(cma_wq, &work->work); 3219 return 0; 3220 } 3221 3222 static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) 3223 { 3224 struct net_device *dev; 3225 3226 dev = vlan_dev_real_dev(vlan_ndev); 3227 if (dev->num_tc) 3228 return netdev_get_prio_tc_map(dev, prio); 3229 3230 return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & 3231 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 3232 } 3233 3234 struct iboe_prio_tc_map { 3235 int input_prio; 3236 int output_tc; 3237 bool found; 3238 }; 3239 3240 static int get_lower_vlan_dev_tc(struct net_device *dev, 3241 struct netdev_nested_priv *priv) 3242 { 3243 struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; 3244 3245 if (is_vlan_dev(dev)) 3246 map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); 3247 else if (dev->num_tc) 3248 map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); 3249 else 3250 map->output_tc = 0; 3251 /* We are interested only in first level VLAN device, so always 3252 * return 1 to stop iterating over next level devices. 3253 */ 3254 map->found = true; 3255 return 1; 3256 } 3257 3258 static int iboe_tos_to_sl(struct net_device *ndev, int tos) 3259 { 3260 struct iboe_prio_tc_map prio_tc_map = {}; 3261 int prio = rt_tos2priority(tos); 3262 struct netdev_nested_priv priv; 3263 3264 /* If VLAN device, get it directly from the VLAN netdev */ 3265 if (is_vlan_dev(ndev)) 3266 return get_vlan_ndev_tc(ndev, prio); 3267 3268 prio_tc_map.input_prio = prio; 3269 priv.data = (void *)&prio_tc_map; 3270 rcu_read_lock(); 3271 netdev_walk_all_lower_dev_rcu(ndev, 3272 get_lower_vlan_dev_tc, 3273 &priv); 3274 rcu_read_unlock(); 3275 /* If map is found from lower device, use it; Otherwise 3276 * continue with the current netdevice to get priority to tc map. 3277 */ 3278 if (prio_tc_map.found) 3279 return prio_tc_map.output_tc; 3280 else if (ndev->num_tc) 3281 return netdev_get_prio_tc_map(ndev, prio); 3282 else 3283 return 0; 3284 } 3285 3286 static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) 3287 { 3288 struct sockaddr_in6 *addr6; 3289 u16 dport, sport; 3290 u32 hash, fl; 3291 3292 addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); 3293 fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; 3294 if ((cma_family(id_priv) != AF_INET6) || !fl) { 3295 dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); 3296 sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); 3297 hash = (u32)sport * 31 + dport; 3298 fl = hash & IB_GRH_FLOWLABEL_MASK; 3299 } 3300 3301 return cpu_to_be32(fl); 3302 } 3303 3304 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 3305 { 3306 struct rdma_route *route = &id_priv->id.route; 3307 struct rdma_addr *addr = &route->addr; 3308 struct cma_work *work; 3309 int ret; 3310 struct net_device *ndev; 3311 3312 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - 3313 rdma_start_port(id_priv->cma_dev->device)]; 3314 u8 tos; 3315 3316 mutex_lock(&id_priv->qp_mutex); 3317 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; 3318 mutex_unlock(&id_priv->qp_mutex); 3319 3320 work = kzalloc(sizeof *work, GFP_KERNEL); 3321 if (!work) 3322 return -ENOMEM; 3323 3324 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); 3325 if (!route->path_rec) { 3326 ret = -ENOMEM; 3327 goto err1; 3328 } 3329 3330 route->num_pri_alt_paths = 1; 3331 3332 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); 3333 if (!ndev) { 3334 ret = -ENODEV; 3335 goto err2; 3336 } 3337 3338 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 3339 &route->path_rec->sgid); 3340 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, 3341 &route->path_rec->dgid); 3342 3343 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) 3344 /* TODO: get the hoplimit from the inet/inet6 device */ 3345 route->path_rec->hop_limit = addr->dev_addr.hoplimit; 3346 else 3347 route->path_rec->hop_limit = 1; 3348 route->path_rec->reversible = 1; 3349 route->path_rec->pkey = cpu_to_be16(0xffff); 3350 route->path_rec->mtu_selector = IB_SA_EQ; 3351 route->path_rec->sl = iboe_tos_to_sl(ndev, tos); 3352 route->path_rec->traffic_class = tos; 3353 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 3354 route->path_rec->rate_selector = IB_SA_EQ; 3355 route->path_rec->rate = IB_RATE_PORT_CURRENT; 3356 dev_put(ndev); 3357 route->path_rec->packet_life_time_selector = IB_SA_EQ; 3358 /* In case ACK timeout is set, use this value to calculate 3359 * PacketLifeTime. As per IBTA 12.7.34, 3360 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). 3361 * Assuming a negligible local ACK delay, we can use 3362 * PacketLifeTime = local ACK timeout/2 3363 * as a reasonable approximation for RoCE networks. 3364 */ 3365 mutex_lock(&id_priv->qp_mutex); 3366 if (id_priv->timeout_set && id_priv->timeout) 3367 route->path_rec->packet_life_time = id_priv->timeout - 1; 3368 else 3369 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; 3370 mutex_unlock(&id_priv->qp_mutex); 3371 3372 if (!route->path_rec->mtu) { 3373 ret = -EINVAL; 3374 goto err2; 3375 } 3376 3377 if (rdma_protocol_roce_udp_encap(id_priv->id.device, 3378 id_priv->id.port_num)) 3379 route->path_rec->flow_label = 3380 cma_get_roce_udp_flow_label(id_priv); 3381 3382 cma_init_resolve_route_work(work, id_priv); 3383 queue_work(cma_wq, &work->work); 3384 3385 return 0; 3386 3387 err2: 3388 kfree(route->path_rec); 3389 route->path_rec = NULL; 3390 route->num_pri_alt_paths = 0; 3391 err1: 3392 kfree(work); 3393 return ret; 3394 } 3395 3396 int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) 3397 { 3398 struct rdma_id_private *id_priv; 3399 int ret; 3400 3401 if (!timeout_ms) 3402 return -EINVAL; 3403 3404 id_priv = container_of(id, struct rdma_id_private, id); 3405 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 3406 return -EINVAL; 3407 3408 cma_id_get(id_priv); 3409 if (rdma_cap_ib_sa(id->device, id->port_num)) 3410 ret = cma_resolve_ib_route(id_priv, timeout_ms); 3411 else if (rdma_protocol_roce(id->device, id->port_num)) { 3412 ret = cma_resolve_iboe_route(id_priv); 3413 if (!ret) 3414 cma_add_id_to_tree(id_priv); 3415 } 3416 else if (rdma_protocol_iwarp(id->device, id->port_num)) 3417 ret = cma_resolve_iw_route(id_priv); 3418 else 3419 ret = -ENOSYS; 3420 3421 if (ret) 3422 goto err; 3423 3424 return 0; 3425 err: 3426 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 3427 cma_id_put(id_priv); 3428 return ret; 3429 } 3430 EXPORT_SYMBOL(rdma_resolve_route); 3431 3432 static void cma_set_loopback(struct sockaddr *addr) 3433 { 3434 switch (addr->sa_family) { 3435 case AF_INET: 3436 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 3437 break; 3438 case AF_INET6: 3439 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 3440 0, 0, 0, htonl(1)); 3441 break; 3442 default: 3443 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 3444 0, 0, 0, htonl(1)); 3445 break; 3446 } 3447 } 3448 3449 static int cma_bind_loopback(struct rdma_id_private *id_priv) 3450 { 3451 struct cma_device *cma_dev, *cur_dev; 3452 union ib_gid gid; 3453 enum ib_port_state port_state; 3454 unsigned int p; 3455 u16 pkey; 3456 int ret; 3457 3458 cma_dev = NULL; 3459 mutex_lock(&lock); 3460 list_for_each_entry(cur_dev, &dev_list, list) { 3461 if (cma_family(id_priv) == AF_IB && 3462 !rdma_cap_ib_cm(cur_dev->device, 1)) 3463 continue; 3464 3465 if (!cma_dev) 3466 cma_dev = cur_dev; 3467 3468 rdma_for_each_port (cur_dev->device, p) { 3469 if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && 3470 port_state == IB_PORT_ACTIVE) { 3471 cma_dev = cur_dev; 3472 goto port_found; 3473 } 3474 } 3475 } 3476 3477 if (!cma_dev) { 3478 ret = -ENODEV; 3479 goto out; 3480 } 3481 3482 p = 1; 3483 3484 port_found: 3485 ret = rdma_query_gid(cma_dev->device, p, 0, &gid); 3486 if (ret) 3487 goto out; 3488 3489 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); 3490 if (ret) 3491 goto out; 3492 3493 id_priv->id.route.addr.dev_addr.dev_type = 3494 (rdma_protocol_ib(cma_dev->device, p)) ? 3495 ARPHRD_INFINIBAND : ARPHRD_ETHER; 3496 3497 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); 3498 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); 3499 id_priv->id.port_num = p; 3500 cma_attach_to_dev(id_priv, cma_dev); 3501 rdma_restrack_add(&id_priv->res); 3502 cma_set_loopback(cma_src_addr(id_priv)); 3503 out: 3504 mutex_unlock(&lock); 3505 return ret; 3506 } 3507 3508 static void addr_handler(int status, struct sockaddr *src_addr, 3509 struct rdma_dev_addr *dev_addr, void *context) 3510 { 3511 struct rdma_id_private *id_priv = context; 3512 struct rdma_cm_event event = {}; 3513 struct sockaddr *addr; 3514 struct sockaddr_storage old_addr; 3515 3516 mutex_lock(&id_priv->handler_mutex); 3517 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 3518 RDMA_CM_ADDR_RESOLVED)) 3519 goto out; 3520 3521 /* 3522 * Store the previous src address, so that if we fail to acquire 3523 * matching rdma device, old address can be restored back, which helps 3524 * to cancel the cma listen operation correctly. 3525 */ 3526 addr = cma_src_addr(id_priv); 3527 memcpy(&old_addr, addr, rdma_addr_size(addr)); 3528 memcpy(addr, src_addr, rdma_addr_size(src_addr)); 3529 if (!status && !id_priv->cma_dev) { 3530 status = cma_acquire_dev_by_src_ip(id_priv); 3531 if (status) 3532 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", 3533 status); 3534 rdma_restrack_add(&id_priv->res); 3535 } else if (status) { 3536 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); 3537 } 3538 3539 if (status) { 3540 memcpy(addr, &old_addr, 3541 rdma_addr_size((struct sockaddr *)&old_addr)); 3542 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 3543 RDMA_CM_ADDR_BOUND)) 3544 goto out; 3545 event.event = RDMA_CM_EVENT_ADDR_ERROR; 3546 event.status = status; 3547 } else 3548 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3549 3550 if (cma_cm_event_handler(id_priv, &event)) { 3551 destroy_id_handler_unlock(id_priv); 3552 return; 3553 } 3554 out: 3555 mutex_unlock(&id_priv->handler_mutex); 3556 } 3557 3558 static int cma_resolve_loopback(struct rdma_id_private *id_priv) 3559 { 3560 struct cma_work *work; 3561 union ib_gid gid; 3562 int ret; 3563 3564 work = kzalloc(sizeof *work, GFP_KERNEL); 3565 if (!work) 3566 return -ENOMEM; 3567 3568 if (!id_priv->cma_dev) { 3569 ret = cma_bind_loopback(id_priv); 3570 if (ret) 3571 goto err; 3572 } 3573 3574 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 3575 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 3576 3577 enqueue_resolve_addr_work(work, id_priv); 3578 return 0; 3579 err: 3580 kfree(work); 3581 return ret; 3582 } 3583 3584 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) 3585 { 3586 struct cma_work *work; 3587 int ret; 3588 3589 work = kzalloc(sizeof *work, GFP_KERNEL); 3590 if (!work) 3591 return -ENOMEM; 3592 3593 if (!id_priv->cma_dev) { 3594 ret = cma_resolve_ib_dev(id_priv); 3595 if (ret) 3596 goto err; 3597 } 3598 3599 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) 3600 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); 3601 3602 enqueue_resolve_addr_work(work, id_priv); 3603 return 0; 3604 err: 3605 kfree(work); 3606 return ret; 3607 } 3608 3609 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) 3610 { 3611 struct rdma_id_private *id_priv; 3612 unsigned long flags; 3613 int ret; 3614 3615 id_priv = container_of(id, struct rdma_id_private, id); 3616 spin_lock_irqsave(&id_priv->lock, flags); 3617 if ((reuse && id_priv->state != RDMA_CM_LISTEN) || 3618 id_priv->state == RDMA_CM_IDLE) { 3619 id_priv->reuseaddr = reuse; 3620 ret = 0; 3621 } else { 3622 ret = -EINVAL; 3623 } 3624 spin_unlock_irqrestore(&id_priv->lock, flags); 3625 return ret; 3626 } 3627 EXPORT_SYMBOL(rdma_set_reuseaddr); 3628 3629 int rdma_set_afonly(struct rdma_cm_id *id, int afonly) 3630 { 3631 struct rdma_id_private *id_priv; 3632 unsigned long flags; 3633 int ret; 3634 3635 id_priv = container_of(id, struct rdma_id_private, id); 3636 spin_lock_irqsave(&id_priv->lock, flags); 3637 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { 3638 id_priv->options |= (1 << CMA_OPTION_AFONLY); 3639 id_priv->afonly = afonly; 3640 ret = 0; 3641 } else { 3642 ret = -EINVAL; 3643 } 3644 spin_unlock_irqrestore(&id_priv->lock, flags); 3645 return ret; 3646 } 3647 EXPORT_SYMBOL(rdma_set_afonly); 3648 3649 static void cma_bind_port(struct rdma_bind_list *bind_list, 3650 struct rdma_id_private *id_priv) 3651 { 3652 struct sockaddr *addr; 3653 struct sockaddr_ib *sib; 3654 u64 sid, mask; 3655 __be16 port; 3656 3657 lockdep_assert_held(&lock); 3658 3659 addr = cma_src_addr(id_priv); 3660 port = htons(bind_list->port); 3661 3662 switch (addr->sa_family) { 3663 case AF_INET: 3664 ((struct sockaddr_in *) addr)->sin_port = port; 3665 break; 3666 case AF_INET6: 3667 ((struct sockaddr_in6 *) addr)->sin6_port = port; 3668 break; 3669 case AF_IB: 3670 sib = (struct sockaddr_ib *) addr; 3671 sid = be64_to_cpu(sib->sib_sid); 3672 mask = be64_to_cpu(sib->sib_sid_mask); 3673 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); 3674 sib->sib_sid_mask = cpu_to_be64(~0ULL); 3675 break; 3676 } 3677 id_priv->bind_list = bind_list; 3678 hlist_add_head(&id_priv->node, &bind_list->owners); 3679 } 3680 3681 static int cma_alloc_port(enum rdma_ucm_port_space ps, 3682 struct rdma_id_private *id_priv, unsigned short snum) 3683 { 3684 struct rdma_bind_list *bind_list; 3685 int ret; 3686 3687 lockdep_assert_held(&lock); 3688 3689 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 3690 if (!bind_list) 3691 return -ENOMEM; 3692 3693 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, 3694 snum); 3695 if (ret < 0) 3696 goto err; 3697 3698 bind_list->ps = ps; 3699 bind_list->port = snum; 3700 cma_bind_port(bind_list, id_priv); 3701 return 0; 3702 err: 3703 kfree(bind_list); 3704 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; 3705 } 3706 3707 static int cma_port_is_unique(struct rdma_bind_list *bind_list, 3708 struct rdma_id_private *id_priv) 3709 { 3710 struct rdma_id_private *cur_id; 3711 struct sockaddr *daddr = cma_dst_addr(id_priv); 3712 struct sockaddr *saddr = cma_src_addr(id_priv); 3713 __be16 dport = cma_port(daddr); 3714 3715 lockdep_assert_held(&lock); 3716 3717 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3718 struct sockaddr *cur_daddr = cma_dst_addr(cur_id); 3719 struct sockaddr *cur_saddr = cma_src_addr(cur_id); 3720 __be16 cur_dport = cma_port(cur_daddr); 3721 3722 if (id_priv == cur_id) 3723 continue; 3724 3725 /* different dest port -> unique */ 3726 if (!cma_any_port(daddr) && 3727 !cma_any_port(cur_daddr) && 3728 (dport != cur_dport)) 3729 continue; 3730 3731 /* different src address -> unique */ 3732 if (!cma_any_addr(saddr) && 3733 !cma_any_addr(cur_saddr) && 3734 cma_addr_cmp(saddr, cur_saddr)) 3735 continue; 3736 3737 /* different dst address -> unique */ 3738 if (!cma_any_addr(daddr) && 3739 !cma_any_addr(cur_daddr) && 3740 cma_addr_cmp(daddr, cur_daddr)) 3741 continue; 3742 3743 return -EADDRNOTAVAIL; 3744 } 3745 return 0; 3746 } 3747 3748 static int cma_alloc_any_port(enum rdma_ucm_port_space ps, 3749 struct rdma_id_private *id_priv) 3750 { 3751 static unsigned int last_used_port; 3752 int low, high, remaining; 3753 unsigned int rover; 3754 struct net *net = id_priv->id.route.addr.dev_addr.net; 3755 3756 lockdep_assert_held(&lock); 3757 3758 inet_get_local_port_range(net, &low, &high); 3759 remaining = (high - low) + 1; 3760 rover = get_random_u32_inclusive(low, remaining + low - 1); 3761 retry: 3762 if (last_used_port != rover) { 3763 struct rdma_bind_list *bind_list; 3764 int ret; 3765 3766 bind_list = cma_ps_find(net, ps, (unsigned short)rover); 3767 3768 if (!bind_list) { 3769 ret = cma_alloc_port(ps, id_priv, rover); 3770 } else { 3771 ret = cma_port_is_unique(bind_list, id_priv); 3772 if (!ret) 3773 cma_bind_port(bind_list, id_priv); 3774 } 3775 /* 3776 * Remember previously used port number in order to avoid 3777 * re-using same port immediately after it is closed. 3778 */ 3779 if (!ret) 3780 last_used_port = rover; 3781 if (ret != -EADDRNOTAVAIL) 3782 return ret; 3783 } 3784 if (--remaining) { 3785 rover++; 3786 if ((rover < low) || (rover > high)) 3787 rover = low; 3788 goto retry; 3789 } 3790 return -EADDRNOTAVAIL; 3791 } 3792 3793 /* 3794 * Check that the requested port is available. This is called when trying to 3795 * bind to a specific port, or when trying to listen on a bound port. In 3796 * the latter case, the provided id_priv may already be on the bind_list, but 3797 * we still need to check that it's okay to start listening. 3798 */ 3799 static int cma_check_port(struct rdma_bind_list *bind_list, 3800 struct rdma_id_private *id_priv, uint8_t reuseaddr) 3801 { 3802 struct rdma_id_private *cur_id; 3803 struct sockaddr *addr, *cur_addr; 3804 3805 lockdep_assert_held(&lock); 3806 3807 addr = cma_src_addr(id_priv); 3808 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3809 if (id_priv == cur_id) 3810 continue; 3811 3812 if (reuseaddr && cur_id->reuseaddr) 3813 continue; 3814 3815 cur_addr = cma_src_addr(cur_id); 3816 if (id_priv->afonly && cur_id->afonly && 3817 (addr->sa_family != cur_addr->sa_family)) 3818 continue; 3819 3820 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) 3821 return -EADDRNOTAVAIL; 3822 3823 if (!cma_addr_cmp(addr, cur_addr)) 3824 return -EADDRINUSE; 3825 } 3826 return 0; 3827 } 3828 3829 static int cma_use_port(enum rdma_ucm_port_space ps, 3830 struct rdma_id_private *id_priv) 3831 { 3832 struct rdma_bind_list *bind_list; 3833 unsigned short snum; 3834 int ret; 3835 3836 lockdep_assert_held(&lock); 3837 3838 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3839 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 3840 return -EACCES; 3841 3842 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); 3843 if (!bind_list) { 3844 ret = cma_alloc_port(ps, id_priv, snum); 3845 } else { 3846 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); 3847 if (!ret) 3848 cma_bind_port(bind_list, id_priv); 3849 } 3850 return ret; 3851 } 3852 3853 static enum rdma_ucm_port_space 3854 cma_select_inet_ps(struct rdma_id_private *id_priv) 3855 { 3856 switch (id_priv->id.ps) { 3857 case RDMA_PS_TCP: 3858 case RDMA_PS_UDP: 3859 case RDMA_PS_IPOIB: 3860 case RDMA_PS_IB: 3861 return id_priv->id.ps; 3862 default: 3863 3864 return 0; 3865 } 3866 } 3867 3868 static enum rdma_ucm_port_space 3869 cma_select_ib_ps(struct rdma_id_private *id_priv) 3870 { 3871 enum rdma_ucm_port_space ps = 0; 3872 struct sockaddr_ib *sib; 3873 u64 sid_ps, mask, sid; 3874 3875 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); 3876 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; 3877 sid = be64_to_cpu(sib->sib_sid) & mask; 3878 3879 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { 3880 sid_ps = RDMA_IB_IP_PS_IB; 3881 ps = RDMA_PS_IB; 3882 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && 3883 (sid == (RDMA_IB_IP_PS_TCP & mask))) { 3884 sid_ps = RDMA_IB_IP_PS_TCP; 3885 ps = RDMA_PS_TCP; 3886 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && 3887 (sid == (RDMA_IB_IP_PS_UDP & mask))) { 3888 sid_ps = RDMA_IB_IP_PS_UDP; 3889 ps = RDMA_PS_UDP; 3890 } 3891 3892 if (ps) { 3893 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); 3894 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | 3895 be64_to_cpu(sib->sib_sid_mask)); 3896 } 3897 return ps; 3898 } 3899 3900 static int cma_get_port(struct rdma_id_private *id_priv) 3901 { 3902 enum rdma_ucm_port_space ps; 3903 int ret; 3904 3905 if (cma_family(id_priv) != AF_IB) 3906 ps = cma_select_inet_ps(id_priv); 3907 else 3908 ps = cma_select_ib_ps(id_priv); 3909 if (!ps) 3910 return -EPROTONOSUPPORT; 3911 3912 mutex_lock(&lock); 3913 if (cma_any_port(cma_src_addr(id_priv))) 3914 ret = cma_alloc_any_port(ps, id_priv); 3915 else 3916 ret = cma_use_port(ps, id_priv); 3917 mutex_unlock(&lock); 3918 3919 return ret; 3920 } 3921 3922 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, 3923 struct sockaddr *addr) 3924 { 3925 #if IS_ENABLED(CONFIG_IPV6) 3926 struct sockaddr_in6 *sin6; 3927 3928 if (addr->sa_family != AF_INET6) 3929 return 0; 3930 3931 sin6 = (struct sockaddr_in6 *) addr; 3932 3933 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) 3934 return 0; 3935 3936 if (!sin6->sin6_scope_id) 3937 return -EINVAL; 3938 3939 dev_addr->bound_dev_if = sin6->sin6_scope_id; 3940 #endif 3941 return 0; 3942 } 3943 3944 int rdma_listen(struct rdma_cm_id *id, int backlog) 3945 { 3946 struct rdma_id_private *id_priv = 3947 container_of(id, struct rdma_id_private, id); 3948 int ret; 3949 3950 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { 3951 struct sockaddr_in any_in = { 3952 .sin_family = AF_INET, 3953 .sin_addr.s_addr = htonl(INADDR_ANY), 3954 }; 3955 3956 /* For a well behaved ULP state will be RDMA_CM_IDLE */ 3957 ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); 3958 if (ret) 3959 return ret; 3960 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, 3961 RDMA_CM_LISTEN))) 3962 return -EINVAL; 3963 } 3964 3965 /* 3966 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable 3967 * any more, and has to be unique in the bind list. 3968 */ 3969 if (id_priv->reuseaddr) { 3970 mutex_lock(&lock); 3971 ret = cma_check_port(id_priv->bind_list, id_priv, 0); 3972 if (!ret) 3973 id_priv->reuseaddr = 0; 3974 mutex_unlock(&lock); 3975 if (ret) 3976 goto err; 3977 } 3978 3979 id_priv->backlog = backlog; 3980 if (id_priv->cma_dev) { 3981 if (rdma_cap_ib_cm(id->device, 1)) { 3982 ret = cma_ib_listen(id_priv); 3983 if (ret) 3984 goto err; 3985 } else if (rdma_cap_iw_cm(id->device, 1)) { 3986 ret = cma_iw_listen(id_priv, backlog); 3987 if (ret) 3988 goto err; 3989 } else { 3990 ret = -ENOSYS; 3991 goto err; 3992 } 3993 } else { 3994 ret = cma_listen_on_all(id_priv); 3995 if (ret) 3996 goto err; 3997 } 3998 3999 return 0; 4000 err: 4001 id_priv->backlog = 0; 4002 /* 4003 * All the failure paths that lead here will not allow the req_handler's 4004 * to have run. 4005 */ 4006 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 4007 return ret; 4008 } 4009 EXPORT_SYMBOL(rdma_listen); 4010 4011 static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, 4012 struct sockaddr *addr, const struct sockaddr *daddr) 4013 { 4014 struct sockaddr *id_daddr; 4015 int ret; 4016 4017 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && 4018 addr->sa_family != AF_IB) 4019 return -EAFNOSUPPORT; 4020 4021 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 4022 return -EINVAL; 4023 4024 ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); 4025 if (ret) 4026 goto err1; 4027 4028 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); 4029 if (!cma_any_addr(addr)) { 4030 ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); 4031 if (ret) 4032 goto err1; 4033 4034 ret = cma_acquire_dev_by_src_ip(id_priv); 4035 if (ret) 4036 goto err1; 4037 } 4038 4039 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { 4040 if (addr->sa_family == AF_INET) 4041 id_priv->afonly = 1; 4042 #if IS_ENABLED(CONFIG_IPV6) 4043 else if (addr->sa_family == AF_INET6) { 4044 struct net *net = id_priv->id.route.addr.dev_addr.net; 4045 4046 id_priv->afonly = net->ipv6.sysctl.bindv6only; 4047 } 4048 #endif 4049 } 4050 id_daddr = cma_dst_addr(id_priv); 4051 if (daddr != id_daddr) 4052 memcpy(id_daddr, daddr, rdma_addr_size(addr)); 4053 id_daddr->sa_family = addr->sa_family; 4054 4055 ret = cma_get_port(id_priv); 4056 if (ret) 4057 goto err2; 4058 4059 if (!cma_any_addr(addr)) 4060 rdma_restrack_add(&id_priv->res); 4061 return 0; 4062 err2: 4063 if (id_priv->cma_dev) 4064 cma_release_dev(id_priv); 4065 err1: 4066 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 4067 return ret; 4068 } 4069 4070 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 4071 const struct sockaddr *dst_addr) 4072 { 4073 struct rdma_id_private *id_priv = 4074 container_of(id, struct rdma_id_private, id); 4075 struct sockaddr_storage zero_sock = {}; 4076 4077 if (src_addr && src_addr->sa_family) 4078 return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); 4079 4080 /* 4081 * When the src_addr is not specified, automatically supply an any addr 4082 */ 4083 zero_sock.ss_family = dst_addr->sa_family; 4084 if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { 4085 struct sockaddr_in6 *src_addr6 = 4086 (struct sockaddr_in6 *)&zero_sock; 4087 struct sockaddr_in6 *dst_addr6 = 4088 (struct sockaddr_in6 *)dst_addr; 4089 4090 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 4091 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 4092 id->route.addr.dev_addr.bound_dev_if = 4093 dst_addr6->sin6_scope_id; 4094 } else if (dst_addr->sa_family == AF_IB) { 4095 ((struct sockaddr_ib *)&zero_sock)->sib_pkey = 4096 ((struct sockaddr_ib *)dst_addr)->sib_pkey; 4097 } 4098 return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); 4099 } 4100 4101 /* 4102 * If required, resolve the source address for bind and leave the id_priv in 4103 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior 4104 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is 4105 * ignored. 4106 */ 4107 static int resolve_prepare_src(struct rdma_id_private *id_priv, 4108 struct sockaddr *src_addr, 4109 const struct sockaddr *dst_addr) 4110 { 4111 int ret; 4112 4113 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { 4114 /* For a well behaved ULP state will be RDMA_CM_IDLE */ 4115 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); 4116 if (ret) 4117 return ret; 4118 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, 4119 RDMA_CM_ADDR_QUERY))) 4120 return -EINVAL; 4121 4122 } else { 4123 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); 4124 } 4125 4126 if (cma_family(id_priv) != dst_addr->sa_family) { 4127 ret = -EINVAL; 4128 goto err_state; 4129 } 4130 return 0; 4131 4132 err_state: 4133 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 4134 return ret; 4135 } 4136 4137 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, 4138 const struct sockaddr *dst_addr, unsigned long timeout_ms) 4139 { 4140 struct rdma_id_private *id_priv = 4141 container_of(id, struct rdma_id_private, id); 4142 int ret; 4143 4144 ret = resolve_prepare_src(id_priv, src_addr, dst_addr); 4145 if (ret) 4146 return ret; 4147 4148 if (cma_any_addr(dst_addr)) { 4149 ret = cma_resolve_loopback(id_priv); 4150 } else { 4151 if (dst_addr->sa_family == AF_IB) { 4152 ret = cma_resolve_ib_addr(id_priv); 4153 } else { 4154 /* 4155 * The FSM can return back to RDMA_CM_ADDR_BOUND after 4156 * rdma_resolve_ip() is called, eg through the error 4157 * path in addr_handler(). If this happens the existing 4158 * request must be canceled before issuing a new one. 4159 * Since canceling a request is a bit slow and this 4160 * oddball path is rare, keep track once a request has 4161 * been issued. The track turns out to be a permanent 4162 * state since this is the only cancel as it is 4163 * immediately before rdma_resolve_ip(). 4164 */ 4165 if (id_priv->used_resolve_ip) 4166 rdma_addr_cancel(&id->route.addr.dev_addr); 4167 else 4168 id_priv->used_resolve_ip = 1; 4169 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, 4170 &id->route.addr.dev_addr, 4171 timeout_ms, addr_handler, 4172 false, id_priv); 4173 } 4174 } 4175 if (ret) 4176 goto err; 4177 4178 return 0; 4179 err: 4180 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 4181 return ret; 4182 } 4183 EXPORT_SYMBOL(rdma_resolve_addr); 4184 4185 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 4186 { 4187 struct rdma_id_private *id_priv = 4188 container_of(id, struct rdma_id_private, id); 4189 4190 return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); 4191 } 4192 EXPORT_SYMBOL(rdma_bind_addr); 4193 4194 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) 4195 { 4196 struct cma_hdr *cma_hdr; 4197 4198 cma_hdr = hdr; 4199 cma_hdr->cma_version = CMA_VERSION; 4200 if (cma_family(id_priv) == AF_INET) { 4201 struct sockaddr_in *src4, *dst4; 4202 4203 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); 4204 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); 4205 4206 cma_set_ip_ver(cma_hdr, 4); 4207 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; 4208 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; 4209 cma_hdr->port = src4->sin_port; 4210 } else if (cma_family(id_priv) == AF_INET6) { 4211 struct sockaddr_in6 *src6, *dst6; 4212 4213 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); 4214 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); 4215 4216 cma_set_ip_ver(cma_hdr, 6); 4217 cma_hdr->src_addr.ip6 = src6->sin6_addr; 4218 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; 4219 cma_hdr->port = src6->sin6_port; 4220 } 4221 return 0; 4222 } 4223 4224 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 4225 const struct ib_cm_event *ib_event) 4226 { 4227 struct rdma_id_private *id_priv = cm_id->context; 4228 struct rdma_cm_event event = {}; 4229 const struct ib_cm_sidr_rep_event_param *rep = 4230 &ib_event->param.sidr_rep_rcvd; 4231 int ret; 4232 4233 mutex_lock(&id_priv->handler_mutex); 4234 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 4235 goto out; 4236 4237 switch (ib_event->event) { 4238 case IB_CM_SIDR_REQ_ERROR: 4239 event.event = RDMA_CM_EVENT_UNREACHABLE; 4240 event.status = -ETIMEDOUT; 4241 break; 4242 case IB_CM_SIDR_REP_RECEIVED: 4243 event.param.ud.private_data = ib_event->private_data; 4244 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 4245 if (rep->status != IB_SIDR_SUCCESS) { 4246 event.event = RDMA_CM_EVENT_UNREACHABLE; 4247 event.status = ib_event->param.sidr_rep_rcvd.status; 4248 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n", 4249 event.status); 4250 break; 4251 } 4252 ret = cma_set_qkey(id_priv, rep->qkey); 4253 if (ret) { 4254 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret); 4255 event.event = RDMA_CM_EVENT_ADDR_ERROR; 4256 event.status = ret; 4257 break; 4258 } 4259 ib_init_ah_attr_from_path(id_priv->id.device, 4260 id_priv->id.port_num, 4261 id_priv->id.route.path_rec, 4262 &event.param.ud.ah_attr, 4263 rep->sgid_attr); 4264 event.param.ud.qp_num = rep->qpn; 4265 event.param.ud.qkey = rep->qkey; 4266 event.event = RDMA_CM_EVENT_ESTABLISHED; 4267 event.status = 0; 4268 break; 4269 default: 4270 pr_err("RDMA CMA: unexpected IB CM event: %d\n", 4271 ib_event->event); 4272 goto out; 4273 } 4274 4275 ret = cma_cm_event_handler(id_priv, &event); 4276 4277 rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4278 if (ret) { 4279 /* Destroy the CM ID by returning a non-zero value. */ 4280 id_priv->cm_id.ib = NULL; 4281 destroy_id_handler_unlock(id_priv); 4282 return ret; 4283 } 4284 out: 4285 mutex_unlock(&id_priv->handler_mutex); 4286 return 0; 4287 } 4288 4289 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 4290 struct rdma_conn_param *conn_param) 4291 { 4292 struct ib_cm_sidr_req_param req; 4293 struct ib_cm_id *id; 4294 void *private_data; 4295 u8 offset; 4296 int ret; 4297 4298 memset(&req, 0, sizeof req); 4299 offset = cma_user_data_offset(id_priv); 4300 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 4301 return -EINVAL; 4302 4303 if (req.private_data_len) { 4304 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 4305 if (!private_data) 4306 return -ENOMEM; 4307 } else { 4308 private_data = NULL; 4309 } 4310 4311 if (conn_param->private_data && conn_param->private_data_len) 4312 memcpy(private_data + offset, conn_param->private_data, 4313 conn_param->private_data_len); 4314 4315 if (private_data) { 4316 ret = cma_format_hdr(private_data, id_priv); 4317 if (ret) 4318 goto out; 4319 req.private_data = private_data; 4320 } 4321 4322 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, 4323 id_priv); 4324 if (IS_ERR(id)) { 4325 ret = PTR_ERR(id); 4326 goto out; 4327 } 4328 id_priv->cm_id.ib = id; 4329 4330 req.path = id_priv->id.route.path_rec; 4331 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; 4332 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 4333 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 4334 req.max_cm_retries = CMA_MAX_CM_RETRIES; 4335 4336 trace_cm_send_sidr_req(id_priv); 4337 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 4338 if (ret) { 4339 ib_destroy_cm_id(id_priv->cm_id.ib); 4340 id_priv->cm_id.ib = NULL; 4341 } 4342 out: 4343 kfree(private_data); 4344 return ret; 4345 } 4346 4347 static int cma_connect_ib(struct rdma_id_private *id_priv, 4348 struct rdma_conn_param *conn_param) 4349 { 4350 struct ib_cm_req_param req; 4351 struct rdma_route *route; 4352 void *private_data; 4353 struct ib_cm_id *id; 4354 u8 offset; 4355 int ret; 4356 4357 memset(&req, 0, sizeof req); 4358 offset = cma_user_data_offset(id_priv); 4359 if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) 4360 return -EINVAL; 4361 4362 if (req.private_data_len) { 4363 private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 4364 if (!private_data) 4365 return -ENOMEM; 4366 } else { 4367 private_data = NULL; 4368 } 4369 4370 if (conn_param->private_data && conn_param->private_data_len) 4371 memcpy(private_data + offset, conn_param->private_data, 4372 conn_param->private_data_len); 4373 4374 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); 4375 if (IS_ERR(id)) { 4376 ret = PTR_ERR(id); 4377 goto out; 4378 } 4379 id_priv->cm_id.ib = id; 4380 4381 route = &id_priv->id.route; 4382 if (private_data) { 4383 ret = cma_format_hdr(private_data, id_priv); 4384 if (ret) 4385 goto out; 4386 req.private_data = private_data; 4387 } 4388 4389 req.primary_path = &route->path_rec[0]; 4390 req.primary_path_inbound = route->path_rec_inbound; 4391 req.primary_path_outbound = route->path_rec_outbound; 4392 if (route->num_pri_alt_paths == 2) 4393 req.alternate_path = &route->path_rec[1]; 4394 4395 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; 4396 /* Alternate path SGID attribute currently unsupported */ 4397 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); 4398 req.qp_num = id_priv->qp_num; 4399 req.qp_type = id_priv->id.qp_type; 4400 req.starting_psn = id_priv->seq_num; 4401 req.responder_resources = conn_param->responder_resources; 4402 req.initiator_depth = conn_param->initiator_depth; 4403 req.flow_control = conn_param->flow_control; 4404 req.retry_count = min_t(u8, 7, conn_param->retry_count); 4405 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4406 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 4407 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; 4408 req.max_cm_retries = CMA_MAX_CM_RETRIES; 4409 req.srq = id_priv->srq ? 1 : 0; 4410 req.ece.vendor_id = id_priv->ece.vendor_id; 4411 req.ece.attr_mod = id_priv->ece.attr_mod; 4412 4413 trace_cm_send_req(id_priv); 4414 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 4415 out: 4416 if (ret && !IS_ERR(id)) { 4417 ib_destroy_cm_id(id); 4418 id_priv->cm_id.ib = NULL; 4419 } 4420 4421 kfree(private_data); 4422 return ret; 4423 } 4424 4425 static int cma_connect_iw(struct rdma_id_private *id_priv, 4426 struct rdma_conn_param *conn_param) 4427 { 4428 struct iw_cm_id *cm_id; 4429 int ret; 4430 struct iw_cm_conn_param iw_param; 4431 4432 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 4433 if (IS_ERR(cm_id)) 4434 return PTR_ERR(cm_id); 4435 4436 mutex_lock(&id_priv->qp_mutex); 4437 cm_id->tos = id_priv->tos; 4438 cm_id->tos_set = id_priv->tos_set; 4439 mutex_unlock(&id_priv->qp_mutex); 4440 4441 id_priv->cm_id.iw = cm_id; 4442 4443 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), 4444 rdma_addr_size(cma_src_addr(id_priv))); 4445 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), 4446 rdma_addr_size(cma_dst_addr(id_priv))); 4447 4448 ret = cma_modify_qp_rtr(id_priv, conn_param); 4449 if (ret) 4450 goto out; 4451 4452 if (conn_param) { 4453 iw_param.ord = conn_param->initiator_depth; 4454 iw_param.ird = conn_param->responder_resources; 4455 iw_param.private_data = conn_param->private_data; 4456 iw_param.private_data_len = conn_param->private_data_len; 4457 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; 4458 } else { 4459 memset(&iw_param, 0, sizeof iw_param); 4460 iw_param.qpn = id_priv->qp_num; 4461 } 4462 ret = iw_cm_connect(cm_id, &iw_param); 4463 out: 4464 if (ret) { 4465 iw_destroy_cm_id(cm_id); 4466 id_priv->cm_id.iw = NULL; 4467 } 4468 return ret; 4469 } 4470 4471 /** 4472 * rdma_connect_locked - Initiate an active connection request. 4473 * @id: Connection identifier to connect. 4474 * @conn_param: Connection information used for connected QPs. 4475 * 4476 * Same as rdma_connect() but can only be called from the 4477 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. 4478 */ 4479 int rdma_connect_locked(struct rdma_cm_id *id, 4480 struct rdma_conn_param *conn_param) 4481 { 4482 struct rdma_id_private *id_priv = 4483 container_of(id, struct rdma_id_private, id); 4484 int ret; 4485 4486 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 4487 return -EINVAL; 4488 4489 if (!id->qp) { 4490 id_priv->qp_num = conn_param->qp_num; 4491 id_priv->srq = conn_param->srq; 4492 } 4493 4494 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4495 if (id->qp_type == IB_QPT_UD) 4496 ret = cma_resolve_ib_udp(id_priv, conn_param); 4497 else 4498 ret = cma_connect_ib(id_priv, conn_param); 4499 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4500 ret = cma_connect_iw(id_priv, conn_param); 4501 } else { 4502 ret = -ENOSYS; 4503 } 4504 if (ret) 4505 goto err_state; 4506 return 0; 4507 err_state: 4508 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 4509 return ret; 4510 } 4511 EXPORT_SYMBOL(rdma_connect_locked); 4512 4513 /** 4514 * rdma_connect - Initiate an active connection request. 4515 * @id: Connection identifier to connect. 4516 * @conn_param: Connection information used for connected QPs. 4517 * 4518 * Users must have resolved a route for the rdma_cm_id to connect with by having 4519 * called rdma_resolve_route before calling this routine. 4520 * 4521 * This call will either connect to a remote QP or obtain remote QP information 4522 * for unconnected rdma_cm_id's. The actual operation is based on the 4523 * rdma_cm_id's port space. 4524 */ 4525 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4526 { 4527 struct rdma_id_private *id_priv = 4528 container_of(id, struct rdma_id_private, id); 4529 int ret; 4530 4531 mutex_lock(&id_priv->handler_mutex); 4532 ret = rdma_connect_locked(id, conn_param); 4533 mutex_unlock(&id_priv->handler_mutex); 4534 return ret; 4535 } 4536 EXPORT_SYMBOL(rdma_connect); 4537 4538 /** 4539 * rdma_connect_ece - Initiate an active connection request with ECE data. 4540 * @id: Connection identifier to connect. 4541 * @conn_param: Connection information used for connected QPs. 4542 * @ece: ECE parameters 4543 * 4544 * See rdma_connect() explanation. 4545 */ 4546 int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4547 struct rdma_ucm_ece *ece) 4548 { 4549 struct rdma_id_private *id_priv = 4550 container_of(id, struct rdma_id_private, id); 4551 4552 id_priv->ece.vendor_id = ece->vendor_id; 4553 id_priv->ece.attr_mod = ece->attr_mod; 4554 4555 return rdma_connect(id, conn_param); 4556 } 4557 EXPORT_SYMBOL(rdma_connect_ece); 4558 4559 static int cma_accept_ib(struct rdma_id_private *id_priv, 4560 struct rdma_conn_param *conn_param) 4561 { 4562 struct ib_cm_rep_param rep; 4563 int ret; 4564 4565 ret = cma_modify_qp_rtr(id_priv, conn_param); 4566 if (ret) 4567 goto out; 4568 4569 ret = cma_modify_qp_rts(id_priv, conn_param); 4570 if (ret) 4571 goto out; 4572 4573 memset(&rep, 0, sizeof rep); 4574 rep.qp_num = id_priv->qp_num; 4575 rep.starting_psn = id_priv->seq_num; 4576 rep.private_data = conn_param->private_data; 4577 rep.private_data_len = conn_param->private_data_len; 4578 rep.responder_resources = conn_param->responder_resources; 4579 rep.initiator_depth = conn_param->initiator_depth; 4580 rep.failover_accepted = 0; 4581 rep.flow_control = conn_param->flow_control; 4582 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4583 rep.srq = id_priv->srq ? 1 : 0; 4584 rep.ece.vendor_id = id_priv->ece.vendor_id; 4585 rep.ece.attr_mod = id_priv->ece.attr_mod; 4586 4587 trace_cm_send_rep(id_priv); 4588 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 4589 out: 4590 return ret; 4591 } 4592 4593 static int cma_accept_iw(struct rdma_id_private *id_priv, 4594 struct rdma_conn_param *conn_param) 4595 { 4596 struct iw_cm_conn_param iw_param; 4597 int ret; 4598 4599 if (!conn_param) 4600 return -EINVAL; 4601 4602 ret = cma_modify_qp_rtr(id_priv, conn_param); 4603 if (ret) 4604 return ret; 4605 4606 iw_param.ord = conn_param->initiator_depth; 4607 iw_param.ird = conn_param->responder_resources; 4608 iw_param.private_data = conn_param->private_data; 4609 iw_param.private_data_len = conn_param->private_data_len; 4610 if (id_priv->id.qp) 4611 iw_param.qpn = id_priv->qp_num; 4612 else 4613 iw_param.qpn = conn_param->qp_num; 4614 4615 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 4616 } 4617 4618 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 4619 enum ib_cm_sidr_status status, u32 qkey, 4620 const void *private_data, int private_data_len) 4621 { 4622 struct ib_cm_sidr_rep_param rep; 4623 int ret; 4624 4625 memset(&rep, 0, sizeof rep); 4626 rep.status = status; 4627 if (status == IB_SIDR_SUCCESS) { 4628 if (qkey) 4629 ret = cma_set_qkey(id_priv, qkey); 4630 else 4631 ret = cma_set_default_qkey(id_priv); 4632 if (ret) 4633 return ret; 4634 rep.qp_num = id_priv->qp_num; 4635 rep.qkey = id_priv->qkey; 4636 4637 rep.ece.vendor_id = id_priv->ece.vendor_id; 4638 rep.ece.attr_mod = id_priv->ece.attr_mod; 4639 } 4640 4641 rep.private_data = private_data; 4642 rep.private_data_len = private_data_len; 4643 4644 trace_cm_send_sidr_rep(id_priv); 4645 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 4646 } 4647 4648 /** 4649 * rdma_accept - Called to accept a connection request or response. 4650 * @id: Connection identifier associated with the request. 4651 * @conn_param: Information needed to establish the connection. This must be 4652 * provided if accepting a connection request. If accepting a connection 4653 * response, this parameter must be NULL. 4654 * 4655 * Typically, this routine is only called by the listener to accept a connection 4656 * request. It must also be called on the active side of a connection if the 4657 * user is performing their own QP transitions. 4658 * 4659 * In the case of error, a reject message is sent to the remote side and the 4660 * state of the qp associated with the id is modified to error, such that any 4661 * previously posted receive buffers would be flushed. 4662 * 4663 * This function is for use by kernel ULPs and must be called from under the 4664 * handler callback. 4665 */ 4666 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4667 { 4668 struct rdma_id_private *id_priv = 4669 container_of(id, struct rdma_id_private, id); 4670 int ret; 4671 4672 lockdep_assert_held(&id_priv->handler_mutex); 4673 4674 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 4675 return -EINVAL; 4676 4677 if (!id->qp && conn_param) { 4678 id_priv->qp_num = conn_param->qp_num; 4679 id_priv->srq = conn_param->srq; 4680 } 4681 4682 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4683 if (id->qp_type == IB_QPT_UD) { 4684 if (conn_param) 4685 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 4686 conn_param->qkey, 4687 conn_param->private_data, 4688 conn_param->private_data_len); 4689 else 4690 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 4691 0, NULL, 0); 4692 } else { 4693 if (conn_param) 4694 ret = cma_accept_ib(id_priv, conn_param); 4695 else 4696 ret = cma_rep_recv(id_priv); 4697 } 4698 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4699 ret = cma_accept_iw(id_priv, conn_param); 4700 } else { 4701 ret = -ENOSYS; 4702 } 4703 if (ret) 4704 goto reject; 4705 4706 return 0; 4707 reject: 4708 cma_modify_qp_err(id_priv); 4709 rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 4710 return ret; 4711 } 4712 EXPORT_SYMBOL(rdma_accept); 4713 4714 int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4715 struct rdma_ucm_ece *ece) 4716 { 4717 struct rdma_id_private *id_priv = 4718 container_of(id, struct rdma_id_private, id); 4719 4720 id_priv->ece.vendor_id = ece->vendor_id; 4721 id_priv->ece.attr_mod = ece->attr_mod; 4722 4723 return rdma_accept(id, conn_param); 4724 } 4725 EXPORT_SYMBOL(rdma_accept_ece); 4726 4727 void rdma_lock_handler(struct rdma_cm_id *id) 4728 { 4729 struct rdma_id_private *id_priv = 4730 container_of(id, struct rdma_id_private, id); 4731 4732 mutex_lock(&id_priv->handler_mutex); 4733 } 4734 EXPORT_SYMBOL(rdma_lock_handler); 4735 4736 void rdma_unlock_handler(struct rdma_cm_id *id) 4737 { 4738 struct rdma_id_private *id_priv = 4739 container_of(id, struct rdma_id_private, id); 4740 4741 mutex_unlock(&id_priv->handler_mutex); 4742 } 4743 EXPORT_SYMBOL(rdma_unlock_handler); 4744 4745 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 4746 { 4747 struct rdma_id_private *id_priv; 4748 int ret; 4749 4750 id_priv = container_of(id, struct rdma_id_private, id); 4751 if (!id_priv->cm_id.ib) 4752 return -EINVAL; 4753 4754 switch (id->device->node_type) { 4755 case RDMA_NODE_IB_CA: 4756 ret = ib_cm_notify(id_priv->cm_id.ib, event); 4757 break; 4758 default: 4759 ret = 0; 4760 break; 4761 } 4762 return ret; 4763 } 4764 EXPORT_SYMBOL(rdma_notify); 4765 4766 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 4767 u8 private_data_len, u8 reason) 4768 { 4769 struct rdma_id_private *id_priv; 4770 int ret; 4771 4772 id_priv = container_of(id, struct rdma_id_private, id); 4773 if (!id_priv->cm_id.ib) 4774 return -EINVAL; 4775 4776 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4777 if (id->qp_type == IB_QPT_UD) { 4778 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 4779 private_data, private_data_len); 4780 } else { 4781 trace_cm_send_rej(id_priv); 4782 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, 4783 private_data, private_data_len); 4784 } 4785 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4786 ret = iw_cm_reject(id_priv->cm_id.iw, 4787 private_data, private_data_len); 4788 } else { 4789 ret = -ENOSYS; 4790 } 4791 4792 return ret; 4793 } 4794 EXPORT_SYMBOL(rdma_reject); 4795 4796 int rdma_disconnect(struct rdma_cm_id *id) 4797 { 4798 struct rdma_id_private *id_priv; 4799 int ret; 4800 4801 id_priv = container_of(id, struct rdma_id_private, id); 4802 if (!id_priv->cm_id.ib) 4803 return -EINVAL; 4804 4805 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4806 ret = cma_modify_qp_err(id_priv); 4807 if (ret) 4808 goto out; 4809 /* Initiate or respond to a disconnect. */ 4810 trace_cm_disconnect(id_priv); 4811 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { 4812 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) 4813 trace_cm_sent_drep(id_priv); 4814 } else { 4815 trace_cm_sent_dreq(id_priv); 4816 } 4817 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4818 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 4819 } else 4820 ret = -EINVAL; 4821 4822 out: 4823 return ret; 4824 } 4825 EXPORT_SYMBOL(rdma_disconnect); 4826 4827 static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, 4828 struct ib_sa_multicast *multicast, 4829 struct rdma_cm_event *event, 4830 struct cma_multicast *mc) 4831 { 4832 struct rdma_dev_addr *dev_addr; 4833 enum ib_gid_type gid_type; 4834 struct net_device *ndev; 4835 4836 if (status) 4837 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", 4838 status); 4839 4840 event->status = status; 4841 event->param.ud.private_data = mc->context; 4842 if (status) { 4843 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; 4844 return; 4845 } 4846 4847 dev_addr = &id_priv->id.route.addr.dev_addr; 4848 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 4849 gid_type = 4850 id_priv->cma_dev 4851 ->default_gid_type[id_priv->id.port_num - 4852 rdma_start_port( 4853 id_priv->cma_dev->device)]; 4854 4855 event->event = RDMA_CM_EVENT_MULTICAST_JOIN; 4856 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, 4857 &multicast->rec, ndev, gid_type, 4858 &event->param.ud.ah_attr)) { 4859 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; 4860 goto out; 4861 } 4862 4863 event->param.ud.qp_num = 0xFFFFFF; 4864 event->param.ud.qkey = id_priv->qkey; 4865 4866 out: 4867 dev_put(ndev); 4868 } 4869 4870 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) 4871 { 4872 struct cma_multicast *mc = multicast->context; 4873 struct rdma_id_private *id_priv = mc->id_priv; 4874 struct rdma_cm_event event = {}; 4875 int ret = 0; 4876 4877 mutex_lock(&id_priv->handler_mutex); 4878 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || 4879 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) 4880 goto out; 4881 4882 ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); 4883 if (!ret) { 4884 cma_make_mc_event(status, id_priv, multicast, &event, mc); 4885 ret = cma_cm_event_handler(id_priv, &event); 4886 } 4887 rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4888 WARN_ON(ret); 4889 4890 out: 4891 mutex_unlock(&id_priv->handler_mutex); 4892 return 0; 4893 } 4894 4895 static void cma_set_mgid(struct rdma_id_private *id_priv, 4896 struct sockaddr *addr, union ib_gid *mgid) 4897 { 4898 unsigned char mc_map[MAX_ADDR_LEN]; 4899 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4900 struct sockaddr_in *sin = (struct sockaddr_in *) addr; 4901 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; 4902 4903 if (cma_any_addr(addr)) { 4904 memset(mgid, 0, sizeof *mgid); 4905 } else if ((addr->sa_family == AF_INET6) && 4906 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 4907 0xFF10A01B)) { 4908 /* IPv6 address is an SA assigned MGID. */ 4909 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4910 } else if (addr->sa_family == AF_IB) { 4911 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); 4912 } else if (addr->sa_family == AF_INET6) { 4913 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); 4914 if (id_priv->id.ps == RDMA_PS_UDP) 4915 mc_map[7] = 0x01; /* Use RDMA CM signature */ 4916 *mgid = *(union ib_gid *) (mc_map + 4); 4917 } else { 4918 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); 4919 if (id_priv->id.ps == RDMA_PS_UDP) 4920 mc_map[7] = 0x01; /* Use RDMA CM signature */ 4921 *mgid = *(union ib_gid *) (mc_map + 4); 4922 } 4923 } 4924 4925 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 4926 struct cma_multicast *mc) 4927 { 4928 struct ib_sa_mcmember_rec rec; 4929 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 4930 ib_sa_comp_mask comp_mask; 4931 int ret; 4932 4933 ib_addr_get_mgid(dev_addr, &rec.mgid); 4934 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, 4935 &rec.mgid, &rec); 4936 if (ret) 4937 return ret; 4938 4939 if (!id_priv->qkey) { 4940 ret = cma_set_default_qkey(id_priv); 4941 if (ret) 4942 return ret; 4943 } 4944 4945 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 4946 rec.qkey = cpu_to_be32(id_priv->qkey); 4947 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 4948 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 4949 rec.join_state = mc->join_state; 4950 4951 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 4952 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 4953 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | 4954 IB_SA_MCMEMBER_REC_FLOW_LABEL | 4955 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; 4956 4957 if (id_priv->id.ps == RDMA_PS_IPOIB) 4958 comp_mask |= IB_SA_MCMEMBER_REC_RATE | 4959 IB_SA_MCMEMBER_REC_RATE_SELECTOR | 4960 IB_SA_MCMEMBER_REC_MTU_SELECTOR | 4961 IB_SA_MCMEMBER_REC_MTU | 4962 IB_SA_MCMEMBER_REC_HOP_LIMIT; 4963 4964 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, 4965 id_priv->id.port_num, &rec, comp_mask, 4966 GFP_KERNEL, cma_ib_mc_handler, mc); 4967 return PTR_ERR_OR_ZERO(mc->sa_mc); 4968 } 4969 4970 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, 4971 enum ib_gid_type gid_type) 4972 { 4973 struct sockaddr_in *sin = (struct sockaddr_in *)addr; 4974 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; 4975 4976 if (cma_any_addr(addr)) { 4977 memset(mgid, 0, sizeof *mgid); 4978 } else if (addr->sa_family == AF_INET6) { 4979 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); 4980 } else { 4981 mgid->raw[0] = 4982 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; 4983 mgid->raw[1] = 4984 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; 4985 mgid->raw[2] = 0; 4986 mgid->raw[3] = 0; 4987 mgid->raw[4] = 0; 4988 mgid->raw[5] = 0; 4989 mgid->raw[6] = 0; 4990 mgid->raw[7] = 0; 4991 mgid->raw[8] = 0; 4992 mgid->raw[9] = 0; 4993 mgid->raw[10] = 0xff; 4994 mgid->raw[11] = 0xff; 4995 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; 4996 } 4997 } 4998 4999 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, 5000 struct cma_multicast *mc) 5001 { 5002 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 5003 int err = 0; 5004 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 5005 struct net_device *ndev = NULL; 5006 struct ib_sa_multicast ib = {}; 5007 enum ib_gid_type gid_type; 5008 bool send_only; 5009 5010 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 5011 5012 if (cma_zero_addr(addr)) 5013 return -EINVAL; 5014 5015 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 5016 rdma_start_port(id_priv->cma_dev->device)]; 5017 cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); 5018 5019 ib.rec.pkey = cpu_to_be16(0xffff); 5020 if (dev_addr->bound_dev_if) 5021 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); 5022 if (!ndev) 5023 return -ENODEV; 5024 5025 ib.rec.rate = IB_RATE_PORT_CURRENT; 5026 ib.rec.hop_limit = 1; 5027 ib.rec.mtu = iboe_get_mtu(ndev->mtu); 5028 5029 if (addr->sa_family == AF_INET) { 5030 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { 5031 ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 5032 if (!send_only) { 5033 err = cma_igmp_send(ndev, &ib.rec.mgid, 5034 true); 5035 } 5036 } 5037 } else { 5038 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 5039 err = -ENOTSUPP; 5040 } 5041 dev_put(ndev); 5042 if (err || !ib.rec.mtu) 5043 return err ?: -EINVAL; 5044 5045 if (!id_priv->qkey) 5046 cma_set_default_qkey(id_priv); 5047 5048 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, 5049 &ib.rec.port_gid); 5050 INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); 5051 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); 5052 queue_work(cma_wq, &mc->iboe_join.work); 5053 return 0; 5054 } 5055 5056 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 5057 u8 join_state, void *context) 5058 { 5059 struct rdma_id_private *id_priv = 5060 container_of(id, struct rdma_id_private, id); 5061 struct cma_multicast *mc; 5062 int ret; 5063 5064 /* Not supported for kernel QPs */ 5065 if (WARN_ON(id->qp)) 5066 return -EINVAL; 5067 5068 /* ULP is calling this wrong. */ 5069 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && 5070 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) 5071 return -EINVAL; 5072 5073 if (id_priv->id.qp_type != IB_QPT_UD) 5074 return -EINVAL; 5075 5076 mc = kzalloc(sizeof(*mc), GFP_KERNEL); 5077 if (!mc) 5078 return -ENOMEM; 5079 5080 memcpy(&mc->addr, addr, rdma_addr_size(addr)); 5081 mc->context = context; 5082 mc->id_priv = id_priv; 5083 mc->join_state = join_state; 5084 5085 if (rdma_protocol_roce(id->device, id->port_num)) { 5086 ret = cma_iboe_join_multicast(id_priv, mc); 5087 if (ret) 5088 goto out_err; 5089 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { 5090 ret = cma_join_ib_multicast(id_priv, mc); 5091 if (ret) 5092 goto out_err; 5093 } else { 5094 ret = -ENOSYS; 5095 goto out_err; 5096 } 5097 5098 spin_lock(&id_priv->lock); 5099 list_add(&mc->list, &id_priv->mc_list); 5100 spin_unlock(&id_priv->lock); 5101 5102 return 0; 5103 out_err: 5104 kfree(mc); 5105 return ret; 5106 } 5107 EXPORT_SYMBOL(rdma_join_multicast); 5108 5109 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) 5110 { 5111 struct rdma_id_private *id_priv; 5112 struct cma_multicast *mc; 5113 5114 id_priv = container_of(id, struct rdma_id_private, id); 5115 spin_lock_irq(&id_priv->lock); 5116 list_for_each_entry(mc, &id_priv->mc_list, list) { 5117 if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) 5118 continue; 5119 list_del(&mc->list); 5120 spin_unlock_irq(&id_priv->lock); 5121 5122 WARN_ON(id_priv->cma_dev->device != id->device); 5123 destroy_mc(id_priv, mc); 5124 return; 5125 } 5126 spin_unlock_irq(&id_priv->lock); 5127 } 5128 EXPORT_SYMBOL(rdma_leave_multicast); 5129 5130 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) 5131 { 5132 struct rdma_dev_addr *dev_addr; 5133 struct cma_work *work; 5134 5135 dev_addr = &id_priv->id.route.addr.dev_addr; 5136 5137 if ((dev_addr->bound_dev_if == ndev->ifindex) && 5138 (net_eq(dev_net(ndev), dev_addr->net)) && 5139 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { 5140 pr_info("RDMA CM addr change for ndev %s used by id %p\n", 5141 ndev->name, &id_priv->id); 5142 work = kzalloc(sizeof *work, GFP_KERNEL); 5143 if (!work) 5144 return -ENOMEM; 5145 5146 INIT_WORK(&work->work, cma_work_handler); 5147 work->id = id_priv; 5148 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; 5149 cma_id_get(id_priv); 5150 queue_work(cma_wq, &work->work); 5151 } 5152 5153 return 0; 5154 } 5155 5156 static int cma_netdev_callback(struct notifier_block *self, unsigned long event, 5157 void *ptr) 5158 { 5159 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 5160 struct cma_device *cma_dev; 5161 struct rdma_id_private *id_priv; 5162 int ret = NOTIFY_DONE; 5163 5164 if (event != NETDEV_BONDING_FAILOVER) 5165 return NOTIFY_DONE; 5166 5167 if (!netif_is_bond_master(ndev)) 5168 return NOTIFY_DONE; 5169 5170 mutex_lock(&lock); 5171 list_for_each_entry(cma_dev, &dev_list, list) 5172 list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { 5173 ret = cma_netdev_change(ndev, id_priv); 5174 if (ret) 5175 goto out; 5176 } 5177 5178 out: 5179 mutex_unlock(&lock); 5180 return ret; 5181 } 5182 5183 static void cma_netevent_work_handler(struct work_struct *_work) 5184 { 5185 struct rdma_id_private *id_priv = 5186 container_of(_work, struct rdma_id_private, id.net_work); 5187 struct rdma_cm_event event = {}; 5188 5189 mutex_lock(&id_priv->handler_mutex); 5190 5191 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || 5192 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) 5193 goto out_unlock; 5194 5195 event.event = RDMA_CM_EVENT_UNREACHABLE; 5196 event.status = -ETIMEDOUT; 5197 5198 if (cma_cm_event_handler(id_priv, &event)) { 5199 __acquire(&id_priv->handler_mutex); 5200 id_priv->cm_id.ib = NULL; 5201 cma_id_put(id_priv); 5202 destroy_id_handler_unlock(id_priv); 5203 return; 5204 } 5205 5206 out_unlock: 5207 mutex_unlock(&id_priv->handler_mutex); 5208 cma_id_put(id_priv); 5209 } 5210 5211 static int cma_netevent_callback(struct notifier_block *self, 5212 unsigned long event, void *ctx) 5213 { 5214 struct id_table_entry *ips_node = NULL; 5215 struct rdma_id_private *current_id; 5216 struct neighbour *neigh = ctx; 5217 unsigned long flags; 5218 5219 if (event != NETEVENT_NEIGH_UPDATE) 5220 return NOTIFY_DONE; 5221 5222 spin_lock_irqsave(&id_table_lock, flags); 5223 if (neigh->tbl->family == AF_INET6) { 5224 struct sockaddr_in6 neigh_sock_6; 5225 5226 neigh_sock_6.sin6_family = AF_INET6; 5227 neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; 5228 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, 5229 (struct sockaddr *)&neigh_sock_6); 5230 } else if (neigh->tbl->family == AF_INET) { 5231 struct sockaddr_in neigh_sock_4; 5232 5233 neigh_sock_4.sin_family = AF_INET; 5234 neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); 5235 ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, 5236 (struct sockaddr *)&neigh_sock_4); 5237 } else 5238 goto out; 5239 5240 if (!ips_node) 5241 goto out; 5242 5243 list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { 5244 if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, 5245 neigh->ha, ETH_ALEN)) 5246 continue; 5247 cma_id_get(current_id); 5248 queue_work(cma_wq, ¤t_id->id.net_work); 5249 } 5250 out: 5251 spin_unlock_irqrestore(&id_table_lock, flags); 5252 return NOTIFY_DONE; 5253 } 5254 5255 static struct notifier_block cma_nb = { 5256 .notifier_call = cma_netdev_callback 5257 }; 5258 5259 static struct notifier_block cma_netevent_cb = { 5260 .notifier_call = cma_netevent_callback 5261 }; 5262 5263 static void cma_send_device_removal_put(struct rdma_id_private *id_priv) 5264 { 5265 struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; 5266 enum rdma_cm_state state; 5267 unsigned long flags; 5268 5269 mutex_lock(&id_priv->handler_mutex); 5270 /* Record that we want to remove the device */ 5271 spin_lock_irqsave(&id_priv->lock, flags); 5272 state = id_priv->state; 5273 if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { 5274 spin_unlock_irqrestore(&id_priv->lock, flags); 5275 mutex_unlock(&id_priv->handler_mutex); 5276 cma_id_put(id_priv); 5277 return; 5278 } 5279 id_priv->state = RDMA_CM_DEVICE_REMOVAL; 5280 spin_unlock_irqrestore(&id_priv->lock, flags); 5281 5282 if (cma_cm_event_handler(id_priv, &event)) { 5283 /* 5284 * At this point the ULP promises it won't call 5285 * rdma_destroy_id() concurrently 5286 */ 5287 cma_id_put(id_priv); 5288 mutex_unlock(&id_priv->handler_mutex); 5289 trace_cm_id_destroy(id_priv); 5290 _destroy_id(id_priv, state); 5291 return; 5292 } 5293 mutex_unlock(&id_priv->handler_mutex); 5294 5295 /* 5296 * If this races with destroy then the thread that first assigns state 5297 * to a destroying does the cancel. 5298 */ 5299 cma_cancel_operation(id_priv, state); 5300 cma_id_put(id_priv); 5301 } 5302 5303 static void cma_process_remove(struct cma_device *cma_dev) 5304 { 5305 mutex_lock(&lock); 5306 while (!list_empty(&cma_dev->id_list)) { 5307 struct rdma_id_private *id_priv = list_first_entry( 5308 &cma_dev->id_list, struct rdma_id_private, device_item); 5309 5310 list_del_init(&id_priv->listen_item); 5311 list_del_init(&id_priv->device_item); 5312 cma_id_get(id_priv); 5313 mutex_unlock(&lock); 5314 5315 cma_send_device_removal_put(id_priv); 5316 5317 mutex_lock(&lock); 5318 } 5319 mutex_unlock(&lock); 5320 5321 cma_dev_put(cma_dev); 5322 wait_for_completion(&cma_dev->comp); 5323 } 5324 5325 static bool cma_supported(struct ib_device *device) 5326 { 5327 u32 i; 5328 5329 rdma_for_each_port(device, i) { 5330 if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) 5331 return true; 5332 } 5333 return false; 5334 } 5335 5336 static int cma_add_one(struct ib_device *device) 5337 { 5338 struct rdma_id_private *to_destroy; 5339 struct cma_device *cma_dev; 5340 struct rdma_id_private *id_priv; 5341 unsigned long supported_gids = 0; 5342 int ret; 5343 u32 i; 5344 5345 if (!cma_supported(device)) 5346 return -EOPNOTSUPP; 5347 5348 cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); 5349 if (!cma_dev) 5350 return -ENOMEM; 5351 5352 cma_dev->device = device; 5353 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, 5354 sizeof(*cma_dev->default_gid_type), 5355 GFP_KERNEL); 5356 if (!cma_dev->default_gid_type) { 5357 ret = -ENOMEM; 5358 goto free_cma_dev; 5359 } 5360 5361 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, 5362 sizeof(*cma_dev->default_roce_tos), 5363 GFP_KERNEL); 5364 if (!cma_dev->default_roce_tos) { 5365 ret = -ENOMEM; 5366 goto free_gid_type; 5367 } 5368 5369 rdma_for_each_port (device, i) { 5370 supported_gids = roce_gid_type_mask_support(device, i); 5371 WARN_ON(!supported_gids); 5372 if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) 5373 cma_dev->default_gid_type[i - rdma_start_port(device)] = 5374 CMA_PREFERRED_ROCE_GID_TYPE; 5375 else 5376 cma_dev->default_gid_type[i - rdma_start_port(device)] = 5377 find_first_bit(&supported_gids, BITS_PER_LONG); 5378 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; 5379 } 5380 5381 init_completion(&cma_dev->comp); 5382 refcount_set(&cma_dev->refcount, 1); 5383 INIT_LIST_HEAD(&cma_dev->id_list); 5384 ib_set_client_data(device, &cma_client, cma_dev); 5385 5386 mutex_lock(&lock); 5387 list_add_tail(&cma_dev->list, &dev_list); 5388 list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { 5389 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); 5390 if (ret) 5391 goto free_listen; 5392 } 5393 mutex_unlock(&lock); 5394 5395 trace_cm_add_one(device); 5396 return 0; 5397 5398 free_listen: 5399 list_del(&cma_dev->list); 5400 mutex_unlock(&lock); 5401 5402 /* cma_process_remove() will delete to_destroy */ 5403 cma_process_remove(cma_dev); 5404 kfree(cma_dev->default_roce_tos); 5405 free_gid_type: 5406 kfree(cma_dev->default_gid_type); 5407 5408 free_cma_dev: 5409 kfree(cma_dev); 5410 return ret; 5411 } 5412 5413 static void cma_remove_one(struct ib_device *device, void *client_data) 5414 { 5415 struct cma_device *cma_dev = client_data; 5416 5417 trace_cm_remove_one(device); 5418 5419 mutex_lock(&lock); 5420 list_del(&cma_dev->list); 5421 mutex_unlock(&lock); 5422 5423 cma_process_remove(cma_dev); 5424 kfree(cma_dev->default_roce_tos); 5425 kfree(cma_dev->default_gid_type); 5426 kfree(cma_dev); 5427 } 5428 5429 static int cma_init_net(struct net *net) 5430 { 5431 struct cma_pernet *pernet = cma_pernet(net); 5432 5433 xa_init(&pernet->tcp_ps); 5434 xa_init(&pernet->udp_ps); 5435 xa_init(&pernet->ipoib_ps); 5436 xa_init(&pernet->ib_ps); 5437 5438 return 0; 5439 } 5440 5441 static void cma_exit_net(struct net *net) 5442 { 5443 struct cma_pernet *pernet = cma_pernet(net); 5444 5445 WARN_ON(!xa_empty(&pernet->tcp_ps)); 5446 WARN_ON(!xa_empty(&pernet->udp_ps)); 5447 WARN_ON(!xa_empty(&pernet->ipoib_ps)); 5448 WARN_ON(!xa_empty(&pernet->ib_ps)); 5449 } 5450 5451 static struct pernet_operations cma_pernet_operations = { 5452 .init = cma_init_net, 5453 .exit = cma_exit_net, 5454 .id = &cma_pernet_id, 5455 .size = sizeof(struct cma_pernet), 5456 }; 5457 5458 static int __init cma_init(void) 5459 { 5460 int ret; 5461 5462 /* 5463 * There is a rare lock ordering dependency in cma_netdev_callback() 5464 * that only happens when bonding is enabled. Teach lockdep that rtnl 5465 * must never be nested under lock so it can find these without having 5466 * to test with bonding. 5467 */ 5468 if (IS_ENABLED(CONFIG_LOCKDEP)) { 5469 rtnl_lock(); 5470 mutex_lock(&lock); 5471 mutex_unlock(&lock); 5472 rtnl_unlock(); 5473 } 5474 5475 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); 5476 if (!cma_wq) 5477 return -ENOMEM; 5478 5479 ret = register_pernet_subsys(&cma_pernet_operations); 5480 if (ret) 5481 goto err_wq; 5482 5483 ib_sa_register_client(&sa_client); 5484 register_netdevice_notifier(&cma_nb); 5485 register_netevent_notifier(&cma_netevent_cb); 5486 5487 ret = ib_register_client(&cma_client); 5488 if (ret) 5489 goto err; 5490 5491 ret = cma_configfs_init(); 5492 if (ret) 5493 goto err_ib; 5494 5495 return 0; 5496 5497 err_ib: 5498 ib_unregister_client(&cma_client); 5499 err: 5500 unregister_netevent_notifier(&cma_netevent_cb); 5501 unregister_netdevice_notifier(&cma_nb); 5502 ib_sa_unregister_client(&sa_client); 5503 unregister_pernet_subsys(&cma_pernet_operations); 5504 err_wq: 5505 destroy_workqueue(cma_wq); 5506 return ret; 5507 } 5508 5509 static void __exit cma_cleanup(void) 5510 { 5511 cma_configfs_exit(); 5512 ib_unregister_client(&cma_client); 5513 unregister_netevent_notifier(&cma_netevent_cb); 5514 unregister_netdevice_notifier(&cma_nb); 5515 ib_sa_unregister_client(&sa_client); 5516 unregister_pernet_subsys(&cma_pernet_operations); 5517 destroy_workqueue(cma_wq); 5518 } 5519 5520 module_init(cma_init); 5521 module_exit(cma_cleanup); 5522