1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <net/net_namespace.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/hashtable.h>
45 #include <rdma/rdma_netlink.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_counter.h>
49
50 #include "core_priv.h"
51 #include "restrack.h"
52
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("core kernel InfiniBand API");
55 MODULE_LICENSE("Dual BSD/GPL");
56
57 struct workqueue_struct *ib_comp_wq;
58 struct workqueue_struct *ib_comp_unbound_wq;
59 struct workqueue_struct *ib_wq;
60 EXPORT_SYMBOL_GPL(ib_wq);
61 static struct workqueue_struct *ib_unreg_wq;
62
63 /*
64 * Each of the three rwsem locks (devices, clients, client_data) protects the
65 * xarray of the same name. Specifically it allows the caller to assert that
66 * the MARK will/will not be changing under the lock, and for devices and
67 * clients, that the value in the xarray is still a valid pointer. Change of
68 * the MARK is linked to the object state, so holding the lock and testing the
69 * MARK also asserts that the contained object is in a certain state.
70 *
71 * This is used to build a two stage register/unregister flow where objects
72 * can continue to be in the xarray even though they are still in progress to
73 * register/unregister.
74 *
75 * The xarray itself provides additional locking, and restartable iteration,
76 * which is also relied on.
77 *
78 * Locks should not be nested, with the exception of client_data, which is
79 * allowed to nest under the read side of the other two locks.
80 *
81 * The devices_rwsem also protects the device name list, any change or
82 * assignment of device name must also hold the write side to guarantee unique
83 * names.
84 */
85
86 /*
87 * devices contains devices that have had their names assigned. The
88 * devices may not be registered. Users that care about the registration
89 * status need to call ib_device_try_get() on the device to ensure it is
90 * registered, and keep it registered, for the required duration.
91 *
92 */
93 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
94 static DECLARE_RWSEM(devices_rwsem);
95 #define DEVICE_REGISTERED XA_MARK_1
96 #define DEVICE_GID_UPDATES XA_MARK_2
97
98 static u32 highest_client_id;
99 #define CLIENT_REGISTERED XA_MARK_1
100 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
101 static DECLARE_RWSEM(clients_rwsem);
102
ib_client_put(struct ib_client * client)103 static void ib_client_put(struct ib_client *client)
104 {
105 if (refcount_dec_and_test(&client->uses))
106 complete(&client->uses_zero);
107 }
108
109 /*
110 * If client_data is registered then the corresponding client must also still
111 * be registered.
112 */
113 #define CLIENT_DATA_REGISTERED XA_MARK_1
114
115 unsigned int rdma_dev_net_id;
116
117 /*
118 * A list of net namespaces is maintained in an xarray. This is necessary
119 * because we can't get the locking right using the existing net ns list. We
120 * would require a init_net callback after the list is updated.
121 */
122 static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
123 /*
124 * rwsem to protect accessing the rdma_nets xarray entries.
125 */
126 static DECLARE_RWSEM(rdma_nets_rwsem);
127
128 bool ib_devices_shared_netns = true;
129 module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
130 MODULE_PARM_DESC(netns_mode,
131 "Share device among net namespaces; default=1 (shared)");
132 /**
133 * rdma_dev_access_netns() - Return whether an rdma device can be accessed
134 * from a specified net namespace or not.
135 * @dev: Pointer to rdma device which needs to be checked
136 * @net: Pointer to net namesapce for which access to be checked
137 *
138 * When the rdma device is in shared mode, it ignores the net namespace.
139 * When the rdma device is exclusive to a net namespace, rdma device net
140 * namespace is checked against the specified one.
141 */
rdma_dev_access_netns(const struct ib_device * dev,const struct net * net)142 bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
143 {
144 return (ib_devices_shared_netns ||
145 net_eq(read_pnet(&dev->coredev.rdma_net), net));
146 }
147 EXPORT_SYMBOL(rdma_dev_access_netns);
148
149 /**
150 * rdma_dev_has_raw_cap() - Returns whether a specified rdma device has
151 * CAP_NET_RAW capability or not.
152 *
153 * @dev: Pointer to rdma device whose capability to be checked
154 *
155 * Returns true if a rdma device's owning user namespace has CAP_NET_RAW
156 * capability, otherwise false. When rdma subsystem is in legacy shared network,
157 * namespace mode, the default net namespace is considered.
158 */
rdma_dev_has_raw_cap(const struct ib_device * dev)159 bool rdma_dev_has_raw_cap(const struct ib_device *dev)
160 {
161 const struct net *net;
162
163 /* Network namespace is the resource whose user namespace
164 * to be considered. When in shared mode, there is no reliable
165 * network namespace resource, so consider the default net namespace.
166 */
167 if (ib_devices_shared_netns)
168 net = &init_net;
169 else
170 net = read_pnet(&dev->coredev.rdma_net);
171
172 return ns_capable(net->user_ns, CAP_NET_RAW);
173 }
174 EXPORT_SYMBOL(rdma_dev_has_raw_cap);
175
176 /*
177 * xarray has this behavior where it won't iterate over NULL values stored in
178 * allocated arrays. So we need our own iterator to see all values stored in
179 * the array. This does the same thing as xa_for_each except that it also
180 * returns NULL valued entries if the array is allocating. Simplified to only
181 * work on simple xarrays.
182 */
xan_find_marked(struct xarray * xa,unsigned long * indexp,xa_mark_t filter)183 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
184 xa_mark_t filter)
185 {
186 XA_STATE(xas, xa, *indexp);
187 void *entry;
188
189 rcu_read_lock();
190 do {
191 entry = xas_find_marked(&xas, ULONG_MAX, filter);
192 if (xa_is_zero(entry))
193 break;
194 } while (xas_retry(&xas, entry));
195 rcu_read_unlock();
196
197 if (entry) {
198 *indexp = xas.xa_index;
199 if (xa_is_zero(entry))
200 return NULL;
201 return entry;
202 }
203 return XA_ERROR(-ENOENT);
204 }
205 #define xan_for_each_marked(xa, index, entry, filter) \
206 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
207 !xa_is_err(entry); \
208 (index)++, entry = xan_find_marked(xa, &(index), filter))
209
210 /* RCU hash table mapping netdevice pointers to struct ib_port_data */
211 static DEFINE_SPINLOCK(ndev_hash_lock);
212 static DECLARE_HASHTABLE(ndev_hash, 5);
213
214 static void free_netdevs(struct ib_device *ib_dev);
215 static void ib_unregister_work(struct work_struct *work);
216 static void __ib_unregister_device(struct ib_device *device);
217 static int ib_security_change(struct notifier_block *nb, unsigned long event,
218 void *lsm_data);
219 static void ib_policy_change_task(struct work_struct *work);
220 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
221
__ibdev_printk(const char * level,const struct ib_device * ibdev,struct va_format * vaf)222 static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
223 struct va_format *vaf)
224 {
225 if (ibdev && ibdev->dev.parent)
226 dev_printk_emit(level[1] - '0',
227 ibdev->dev.parent,
228 "%s %s %s: %pV",
229 dev_driver_string(ibdev->dev.parent),
230 dev_name(ibdev->dev.parent),
231 dev_name(&ibdev->dev),
232 vaf);
233 else if (ibdev)
234 printk("%s%s: %pV",
235 level, dev_name(&ibdev->dev), vaf);
236 else
237 printk("%s(NULL ib_device): %pV", level, vaf);
238 }
239
240 #define define_ibdev_printk_level(func, level) \
241 void func(const struct ib_device *ibdev, const char *fmt, ...) \
242 { \
243 struct va_format vaf; \
244 va_list args; \
245 \
246 va_start(args, fmt); \
247 \
248 vaf.fmt = fmt; \
249 vaf.va = &args; \
250 \
251 __ibdev_printk(level, ibdev, &vaf); \
252 \
253 va_end(args); \
254 } \
255 EXPORT_SYMBOL(func);
256
257 define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
258 define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
259 define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
260 define_ibdev_printk_level(ibdev_err, KERN_ERR);
261 define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
262 define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
263 define_ibdev_printk_level(ibdev_info, KERN_INFO);
264
265 static struct notifier_block ibdev_lsm_nb = {
266 .notifier_call = ib_security_change,
267 };
268
269 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
270 struct net *net);
271
272 /* Pointer to the RCU head at the start of the ib_port_data array */
273 struct ib_port_data_rcu {
274 struct rcu_head rcu_head;
275 struct ib_port_data pdata[];
276 };
277
ib_device_check_mandatory(struct ib_device * device)278 static void ib_device_check_mandatory(struct ib_device *device)
279 {
280 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
281 static const struct {
282 size_t offset;
283 char *name;
284 } mandatory_table[] = {
285 IB_MANDATORY_FUNC(query_device),
286 IB_MANDATORY_FUNC(query_port),
287 IB_MANDATORY_FUNC(alloc_pd),
288 IB_MANDATORY_FUNC(dealloc_pd),
289 IB_MANDATORY_FUNC(create_qp),
290 IB_MANDATORY_FUNC(modify_qp),
291 IB_MANDATORY_FUNC(destroy_qp),
292 IB_MANDATORY_FUNC(post_send),
293 IB_MANDATORY_FUNC(post_recv),
294 IB_MANDATORY_FUNC(create_cq),
295 IB_MANDATORY_FUNC(destroy_cq),
296 IB_MANDATORY_FUNC(poll_cq),
297 IB_MANDATORY_FUNC(req_notify_cq),
298 IB_MANDATORY_FUNC(get_dma_mr),
299 IB_MANDATORY_FUNC(reg_user_mr),
300 IB_MANDATORY_FUNC(dereg_mr),
301 IB_MANDATORY_FUNC(get_port_immutable)
302 };
303 int i;
304
305 device->kverbs_provider = true;
306 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
307 if (!*(void **) ((void *) &device->ops +
308 mandatory_table[i].offset)) {
309 device->kverbs_provider = false;
310 break;
311 }
312 }
313 }
314
315 /*
316 * Caller must perform ib_device_put() to return the device reference count
317 * when ib_device_get_by_index() returns valid device pointer.
318 */
ib_device_get_by_index(const struct net * net,u32 index)319 struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
320 {
321 struct ib_device *device;
322
323 down_read(&devices_rwsem);
324 device = xa_load(&devices, index);
325 if (device) {
326 if (!rdma_dev_access_netns(device, net)) {
327 device = NULL;
328 goto out;
329 }
330
331 if (!ib_device_try_get(device))
332 device = NULL;
333 }
334 out:
335 up_read(&devices_rwsem);
336 return device;
337 }
338
339 /**
340 * ib_device_put - Release IB device reference
341 * @device: device whose reference to be released
342 *
343 * ib_device_put() releases reference to the IB device to allow it to be
344 * unregistered and eventually free.
345 */
ib_device_put(struct ib_device * device)346 void ib_device_put(struct ib_device *device)
347 {
348 if (refcount_dec_and_test(&device->refcount))
349 complete(&device->unreg_completion);
350 }
351 EXPORT_SYMBOL(ib_device_put);
352
__ib_device_get_by_name(const char * name)353 static struct ib_device *__ib_device_get_by_name(const char *name)
354 {
355 struct ib_device *device;
356 unsigned long index;
357
358 xa_for_each (&devices, index, device)
359 if (!strcmp(name, dev_name(&device->dev)))
360 return device;
361
362 return NULL;
363 }
364
rename_compat_devs(struct ib_device * device)365 static int rename_compat_devs(struct ib_device *device)
366 {
367 struct ib_core_device *cdev;
368 unsigned long index;
369 int ret = 0;
370
371 mutex_lock(&device->compat_devs_mutex);
372 xa_for_each (&device->compat_devs, index, cdev) {
373 ret = device_rename(&cdev->dev, dev_name(&device->dev));
374 if (ret) {
375 dev_warn(&cdev->dev,
376 "Fail to rename compatdev to new name %s\n",
377 dev_name(&device->dev));
378 break;
379 }
380 }
381 mutex_unlock(&device->compat_devs_mutex);
382 return ret;
383 }
384
ib_device_rename(struct ib_device * ibdev,const char * name)385 int ib_device_rename(struct ib_device *ibdev, const char *name)
386 {
387 unsigned long index;
388 void *client_data;
389 int ret;
390
391 down_write(&devices_rwsem);
392 if (!strcmp(name, dev_name(&ibdev->dev))) {
393 up_write(&devices_rwsem);
394 return 0;
395 }
396
397 if (__ib_device_get_by_name(name)) {
398 up_write(&devices_rwsem);
399 return -EEXIST;
400 }
401
402 ret = device_rename(&ibdev->dev, name);
403 if (ret) {
404 up_write(&devices_rwsem);
405 return ret;
406 }
407
408 strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
409 ret = rename_compat_devs(ibdev);
410
411 downgrade_write(&devices_rwsem);
412 down_read(&ibdev->client_data_rwsem);
413 xan_for_each_marked(&ibdev->client_data, index, client_data,
414 CLIENT_DATA_REGISTERED) {
415 struct ib_client *client = xa_load(&clients, index);
416
417 if (!client || !client->rename)
418 continue;
419
420 client->rename(ibdev, client_data);
421 }
422 up_read(&ibdev->client_data_rwsem);
423 rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT);
424 up_read(&devices_rwsem);
425 return 0;
426 }
427
ib_device_set_dim(struct ib_device * ibdev,u8 use_dim)428 int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
429 {
430 if (use_dim > 1)
431 return -EINVAL;
432 ibdev->use_cq_dim = use_dim;
433
434 return 0;
435 }
436
alloc_name(struct ib_device * ibdev,const char * name)437 static int alloc_name(struct ib_device *ibdev, const char *name)
438 {
439 struct ib_device *device;
440 unsigned long index;
441 struct ida inuse;
442 int rc;
443 int i;
444
445 lockdep_assert_held_write(&devices_rwsem);
446 ida_init(&inuse);
447 xa_for_each (&devices, index, device) {
448 char buf[IB_DEVICE_NAME_MAX];
449
450 if (sscanf(dev_name(&device->dev), name, &i) != 1)
451 continue;
452 if (i < 0 || i >= INT_MAX)
453 continue;
454 snprintf(buf, sizeof buf, name, i);
455 if (strcmp(buf, dev_name(&device->dev)) != 0)
456 continue;
457
458 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
459 if (rc < 0)
460 goto out;
461 }
462
463 rc = ida_alloc(&inuse, GFP_KERNEL);
464 if (rc < 0)
465 goto out;
466
467 rc = dev_set_name(&ibdev->dev, name, rc);
468 out:
469 ida_destroy(&inuse);
470 return rc;
471 }
472
ib_device_release(struct device * device)473 static void ib_device_release(struct device *device)
474 {
475 struct ib_device *dev = container_of(device, struct ib_device, dev);
476
477 free_netdevs(dev);
478 WARN_ON(refcount_read(&dev->refcount));
479 if (dev->hw_stats_data)
480 ib_device_release_hw_stats(dev->hw_stats_data);
481 if (dev->port_data) {
482 ib_cache_release_one(dev);
483 ib_security_release_port_pkey_list(dev);
484 rdma_counter_release(dev);
485 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
486 pdata[0]),
487 rcu_head);
488 }
489
490 mutex_destroy(&dev->subdev_lock);
491 mutex_destroy(&dev->unregistration_lock);
492 mutex_destroy(&dev->compat_devs_mutex);
493
494 xa_destroy(&dev->compat_devs);
495 xa_destroy(&dev->client_data);
496 kfree_rcu(dev, rcu_head);
497 }
498
ib_device_uevent(const struct device * device,struct kobj_uevent_env * env)499 static int ib_device_uevent(const struct device *device,
500 struct kobj_uevent_env *env)
501 {
502 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
503 return -ENOMEM;
504
505 /*
506 * It would be nice to pass the node GUID with the event...
507 */
508
509 return 0;
510 }
511
net_namespace(const struct device * d)512 static const struct ns_common *net_namespace(const struct device *d)
513 {
514 const struct ib_core_device *coredev =
515 container_of(d, struct ib_core_device, dev);
516 struct net *net = read_pnet(&coredev->rdma_net);
517
518 return net ? to_ns_common(net) : NULL;
519 }
520
521 static struct class ib_class = {
522 .name = "infiniband",
523 .dev_release = ib_device_release,
524 .dev_uevent = ib_device_uevent,
525 .ns_type = &net_ns_type_operations,
526 .namespace = net_namespace,
527 };
528
rdma_init_coredev(struct ib_core_device * coredev,struct ib_device * dev,struct net * net)529 static void rdma_init_coredev(struct ib_core_device *coredev,
530 struct ib_device *dev, struct net *net)
531 {
532 bool is_full_dev = &dev->coredev == coredev;
533
534 /* This BUILD_BUG_ON is intended to catch layout change
535 * of union of ib_core_device and device.
536 * dev must be the first element as ib_core and providers
537 * driver uses it. Adding anything in ib_core_device before
538 * device will break this assumption.
539 */
540 BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
541 offsetof(struct ib_device, dev));
542
543 coredev->dev.class = &ib_class;
544 coredev->dev.groups = dev->groups;
545
546 /*
547 * Don't expose hw counters outside of the init namespace.
548 */
549 if (!is_full_dev && dev->hw_stats_attr_index)
550 coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
551
552 device_initialize(&coredev->dev);
553 coredev->owner = dev;
554 INIT_LIST_HEAD(&coredev->port_list);
555 write_pnet(&coredev->rdma_net, net);
556 }
557
558 /**
559 * _ib_alloc_device - allocate an IB device struct
560 * @size:size of structure to allocate
561 * @net: network namespace device should be located in, namespace
562 * must stay valid until ib_register_device() is completed.
563 *
564 * Low-level drivers should use ib_alloc_device() to allocate &struct
565 * ib_device. @size is the size of the structure to be allocated,
566 * including any private data used by the low-level driver.
567 * ib_dealloc_device() must be used to free structures allocated with
568 * ib_alloc_device().
569 */
_ib_alloc_device(size_t size,struct net * net)570 struct ib_device *_ib_alloc_device(size_t size, struct net *net)
571 {
572 struct ib_device *device;
573 unsigned int i;
574
575 if (WARN_ON(size < sizeof(struct ib_device)))
576 return NULL;
577
578 device = kzalloc(size, GFP_KERNEL);
579 if (!device)
580 return NULL;
581
582 if (rdma_restrack_init(device)) {
583 kfree(device);
584 return NULL;
585 }
586
587 /* ib_devices_shared_netns can't change while we have active namespaces
588 * in the system which means either init_net is passed or the user has
589 * no idea what they are doing.
590 *
591 * To avoid breaking backward compatibility, when in shared mode,
592 * force to init the device in the init_net.
593 */
594 net = ib_devices_shared_netns ? &init_net : net;
595 rdma_init_coredev(&device->coredev, device, net);
596
597 INIT_LIST_HEAD(&device->event_handler_list);
598 spin_lock_init(&device->qp_open_list_lock);
599 init_rwsem(&device->event_handler_rwsem);
600 mutex_init(&device->unregistration_lock);
601 /*
602 * client_data needs to be alloc because we don't want our mark to be
603 * destroyed if the user stores NULL in the client data.
604 */
605 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
606 init_rwsem(&device->client_data_rwsem);
607 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
608 mutex_init(&device->compat_devs_mutex);
609 init_completion(&device->unreg_completion);
610 INIT_WORK(&device->unregistration_work, ib_unregister_work);
611
612 spin_lock_init(&device->cq_pools_lock);
613 for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
614 INIT_LIST_HEAD(&device->cq_pools[i]);
615
616 rwlock_init(&device->cache_lock);
617
618 device->uverbs_cmd_mask =
619 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
620 BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
621 BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
622 BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) |
623 BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
624 BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
625 BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
626 BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
627 BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) |
628 BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) |
629 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
630 BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
631 BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
632 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
633 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
634 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
635 BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) |
636 BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) |
637 BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
638 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
639 BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) |
640 BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) |
641 BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) |
642 BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
643 BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
644 BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
645 BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) |
646 BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
647 BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
648 BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
649
650 mutex_init(&device->subdev_lock);
651 INIT_LIST_HEAD(&device->subdev_list_head);
652 INIT_LIST_HEAD(&device->subdev_list);
653
654 return device;
655 }
656 EXPORT_SYMBOL(_ib_alloc_device);
657
658 /**
659 * ib_dealloc_device - free an IB device struct
660 * @device:structure to free
661 *
662 * Free a structure allocated with ib_alloc_device().
663 */
ib_dealloc_device(struct ib_device * device)664 void ib_dealloc_device(struct ib_device *device)
665 {
666 if (device->ops.dealloc_driver)
667 device->ops.dealloc_driver(device);
668
669 /*
670 * ib_unregister_driver() requires all devices to remain in the xarray
671 * while their ops are callable. The last op we call is dealloc_driver
672 * above. This is needed to create a fence on op callbacks prior to
673 * allowing the driver module to unload.
674 */
675 down_write(&devices_rwsem);
676 if (xa_load(&devices, device->index) == device)
677 xa_erase(&devices, device->index);
678 up_write(&devices_rwsem);
679
680 /* Expedite releasing netdev references */
681 free_netdevs(device);
682
683 WARN_ON(!xa_empty(&device->compat_devs));
684 WARN_ON(!xa_empty(&device->client_data));
685 WARN_ON(refcount_read(&device->refcount));
686 rdma_restrack_clean(device);
687 /* Balances with device_initialize */
688 put_device(&device->dev);
689 }
690 EXPORT_SYMBOL(ib_dealloc_device);
691
692 /*
693 * add_client_context() and remove_client_context() must be safe against
694 * parallel calls on the same device - registration/unregistration of both the
695 * device and client can be occurring in parallel.
696 *
697 * The routines need to be a fence, any caller must not return until the add
698 * or remove is fully completed.
699 */
add_client_context(struct ib_device * device,struct ib_client * client)700 static int add_client_context(struct ib_device *device,
701 struct ib_client *client)
702 {
703 int ret = 0;
704
705 if (!device->kverbs_provider && !client->no_kverbs_req)
706 return 0;
707
708 down_write(&device->client_data_rwsem);
709 /*
710 * So long as the client is registered hold both the client and device
711 * unregistration locks.
712 */
713 if (!refcount_inc_not_zero(&client->uses))
714 goto out_unlock;
715 refcount_inc(&device->refcount);
716
717 /*
718 * Another caller to add_client_context got here first and has already
719 * completely initialized context.
720 */
721 if (xa_get_mark(&device->client_data, client->client_id,
722 CLIENT_DATA_REGISTERED))
723 goto out;
724
725 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
726 GFP_KERNEL));
727 if (ret)
728 goto out;
729 downgrade_write(&device->client_data_rwsem);
730 if (client->add) {
731 if (client->add(device)) {
732 /*
733 * If a client fails to add then the error code is
734 * ignored, but we won't call any more ops on this
735 * client.
736 */
737 xa_erase(&device->client_data, client->client_id);
738 up_read(&device->client_data_rwsem);
739 ib_device_put(device);
740 ib_client_put(client);
741 return 0;
742 }
743 }
744
745 /* Readers shall not see a client until add has been completed */
746 xa_set_mark(&device->client_data, client->client_id,
747 CLIENT_DATA_REGISTERED);
748 up_read(&device->client_data_rwsem);
749 return 0;
750
751 out:
752 ib_device_put(device);
753 ib_client_put(client);
754 out_unlock:
755 up_write(&device->client_data_rwsem);
756 return ret;
757 }
758
remove_client_context(struct ib_device * device,unsigned int client_id)759 static void remove_client_context(struct ib_device *device,
760 unsigned int client_id)
761 {
762 struct ib_client *client;
763 void *client_data;
764
765 down_write(&device->client_data_rwsem);
766 if (!xa_get_mark(&device->client_data, client_id,
767 CLIENT_DATA_REGISTERED)) {
768 up_write(&device->client_data_rwsem);
769 return;
770 }
771 client_data = xa_load(&device->client_data, client_id);
772 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
773 client = xa_load(&clients, client_id);
774 up_write(&device->client_data_rwsem);
775
776 /*
777 * Notice we cannot be holding any exclusive locks when calling the
778 * remove callback as the remove callback can recurse back into any
779 * public functions in this module and thus try for any locks those
780 * functions take.
781 *
782 * For this reason clients and drivers should not call the
783 * unregistration functions will holdling any locks.
784 */
785 if (client->remove)
786 client->remove(device, client_data);
787
788 xa_erase(&device->client_data, client_id);
789 ib_device_put(device);
790 ib_client_put(client);
791 }
792
alloc_port_data(struct ib_device * device)793 static int alloc_port_data(struct ib_device *device)
794 {
795 struct ib_port_data_rcu *pdata_rcu;
796 u32 port;
797
798 if (device->port_data)
799 return 0;
800
801 /* This can only be called once the physical port range is defined */
802 if (WARN_ON(!device->phys_port_cnt))
803 return -EINVAL;
804
805 /* Reserve U32_MAX so the logic to go over all the ports is sane */
806 if (WARN_ON(device->phys_port_cnt == U32_MAX))
807 return -EINVAL;
808
809 /*
810 * device->port_data is indexed directly by the port number to make
811 * access to this data as efficient as possible.
812 *
813 * Therefore port_data is declared as a 1 based array with potential
814 * empty slots at the beginning.
815 */
816 pdata_rcu = kzalloc_flex(*pdata_rcu, pdata,
817 size_add(rdma_end_port(device), 1));
818 if (!pdata_rcu)
819 return -ENOMEM;
820 /*
821 * The rcu_head is put in front of the port data array and the stored
822 * pointer is adjusted since we never need to see that member until
823 * kfree_rcu.
824 */
825 device->port_data = pdata_rcu->pdata;
826
827 rdma_for_each_port (device, port) {
828 struct ib_port_data *pdata = &device->port_data[port];
829
830 pdata->ib_dev = device;
831 spin_lock_init(&pdata->pkey_list_lock);
832 INIT_LIST_HEAD(&pdata->pkey_list);
833 spin_lock_init(&pdata->netdev_lock);
834 INIT_HLIST_NODE(&pdata->ndev_hash_link);
835 }
836 return 0;
837 }
838
verify_immutable(const struct ib_device * dev,u32 port)839 static int verify_immutable(const struct ib_device *dev, u32 port)
840 {
841 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
842 rdma_max_mad_size(dev, port) != 0);
843 }
844
setup_port_data(struct ib_device * device)845 static int setup_port_data(struct ib_device *device)
846 {
847 u32 port;
848 int ret;
849
850 ret = alloc_port_data(device);
851 if (ret)
852 return ret;
853
854 rdma_for_each_port (device, port) {
855 struct ib_port_data *pdata = &device->port_data[port];
856
857 ret = device->ops.get_port_immutable(device, port,
858 &pdata->immutable);
859 if (ret)
860 return ret;
861
862 if (verify_immutable(device, port))
863 return -EINVAL;
864 }
865 return 0;
866 }
867
868 /**
869 * ib_port_immutable_read() - Read rdma port's immutable data
870 * @dev: IB device
871 * @port: port number whose immutable data to read. It starts with index 1 and
872 * valid upto including rdma_end_port().
873 */
874 const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device * dev,unsigned int port)875 ib_port_immutable_read(struct ib_device *dev, unsigned int port)
876 {
877 WARN_ON(!rdma_is_port_valid(dev, port));
878 return &dev->port_data[port].immutable;
879 }
880 EXPORT_SYMBOL(ib_port_immutable_read);
881
ib_get_device_fw_str(struct ib_device * dev,char * str)882 void ib_get_device_fw_str(struct ib_device *dev, char *str)
883 {
884 if (dev->ops.get_dev_fw_str)
885 dev->ops.get_dev_fw_str(dev, str);
886 else
887 str[0] = '\0';
888 }
889 EXPORT_SYMBOL(ib_get_device_fw_str);
890
ib_policy_change_task(struct work_struct * work)891 static void ib_policy_change_task(struct work_struct *work)
892 {
893 struct ib_device *dev;
894 unsigned long index;
895
896 down_read(&devices_rwsem);
897 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
898 unsigned int i;
899
900 rdma_for_each_port (dev, i) {
901 u64 sp;
902 ib_get_cached_subnet_prefix(dev, i, &sp);
903 ib_security_cache_change(dev, i, sp);
904 }
905 }
906 up_read(&devices_rwsem);
907 }
908
ib_security_change(struct notifier_block * nb,unsigned long event,void * lsm_data)909 static int ib_security_change(struct notifier_block *nb, unsigned long event,
910 void *lsm_data)
911 {
912 if (event != LSM_POLICY_CHANGE)
913 return NOTIFY_DONE;
914
915 schedule_work(&ib_policy_change_work);
916 ib_mad_agent_security_change();
917
918 return NOTIFY_OK;
919 }
920
compatdev_release(struct device * dev)921 static void compatdev_release(struct device *dev)
922 {
923 struct ib_core_device *cdev =
924 container_of(dev, struct ib_core_device, dev);
925
926 kfree(cdev);
927 }
928
add_one_compat_dev(struct ib_device * device,struct rdma_dev_net * rnet)929 static int add_one_compat_dev(struct ib_device *device,
930 struct rdma_dev_net *rnet)
931 {
932 struct ib_core_device *cdev;
933 int ret;
934
935 lockdep_assert_held(&rdma_nets_rwsem);
936 if (!ib_devices_shared_netns)
937 return 0;
938
939 /*
940 * Create and add compat device in all namespaces other than where it
941 * is currently bound to.
942 */
943 if (net_eq(read_pnet(&rnet->net),
944 read_pnet(&device->coredev.rdma_net)))
945 return 0;
946
947 /*
948 * The first of init_net() or ib_register_device() to take the
949 * compat_devs_mutex wins and gets to add the device. Others will wait
950 * for completion here.
951 */
952 mutex_lock(&device->compat_devs_mutex);
953 cdev = xa_load(&device->compat_devs, rnet->id);
954 if (cdev) {
955 ret = 0;
956 goto done;
957 }
958 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
959 if (ret)
960 goto done;
961
962 cdev = kzalloc_obj(*cdev);
963 if (!cdev) {
964 ret = -ENOMEM;
965 goto cdev_err;
966 }
967
968 cdev->dev.parent = device->dev.parent;
969 rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
970 cdev->dev.release = compatdev_release;
971 ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
972 if (ret)
973 goto add_err;
974
975 ret = device_add(&cdev->dev);
976 if (ret)
977 goto add_err;
978 ret = ib_setup_port_attrs(cdev);
979 if (ret)
980 goto port_err;
981
982 ret = xa_err(xa_store(&device->compat_devs, rnet->id,
983 cdev, GFP_KERNEL));
984 if (ret)
985 goto insert_err;
986
987 mutex_unlock(&device->compat_devs_mutex);
988 return 0;
989
990 insert_err:
991 ib_free_port_attrs(cdev);
992 port_err:
993 device_del(&cdev->dev);
994 add_err:
995 put_device(&cdev->dev);
996 cdev_err:
997 xa_release(&device->compat_devs, rnet->id);
998 done:
999 mutex_unlock(&device->compat_devs_mutex);
1000 return ret;
1001 }
1002
remove_one_compat_dev(struct ib_device * device,u32 id)1003 static void remove_one_compat_dev(struct ib_device *device, u32 id)
1004 {
1005 struct ib_core_device *cdev;
1006
1007 mutex_lock(&device->compat_devs_mutex);
1008 cdev = xa_erase(&device->compat_devs, id);
1009 mutex_unlock(&device->compat_devs_mutex);
1010 if (cdev) {
1011 ib_free_port_attrs(cdev);
1012 device_del(&cdev->dev);
1013 put_device(&cdev->dev);
1014 }
1015 }
1016
remove_compat_devs(struct ib_device * device)1017 static void remove_compat_devs(struct ib_device *device)
1018 {
1019 struct ib_core_device *cdev;
1020 unsigned long index;
1021
1022 xa_for_each (&device->compat_devs, index, cdev)
1023 remove_one_compat_dev(device, index);
1024 }
1025
add_compat_devs(struct ib_device * device)1026 static int add_compat_devs(struct ib_device *device)
1027 {
1028 struct rdma_dev_net *rnet;
1029 unsigned long index;
1030 int ret = 0;
1031
1032 lockdep_assert_held(&devices_rwsem);
1033
1034 down_read(&rdma_nets_rwsem);
1035 xa_for_each (&rdma_nets, index, rnet) {
1036 ret = add_one_compat_dev(device, rnet);
1037 if (ret)
1038 break;
1039 }
1040 up_read(&rdma_nets_rwsem);
1041 return ret;
1042 }
1043
remove_all_compat_devs(void)1044 static void remove_all_compat_devs(void)
1045 {
1046 struct ib_compat_device *cdev;
1047 struct ib_device *dev;
1048 unsigned long index;
1049
1050 down_read(&devices_rwsem);
1051 xa_for_each (&devices, index, dev) {
1052 unsigned long c_index = 0;
1053
1054 /* Hold nets_rwsem so that any other thread modifying this
1055 * system param can sync with this thread.
1056 */
1057 down_read(&rdma_nets_rwsem);
1058 xa_for_each (&dev->compat_devs, c_index, cdev)
1059 remove_one_compat_dev(dev, c_index);
1060 up_read(&rdma_nets_rwsem);
1061 }
1062 up_read(&devices_rwsem);
1063 }
1064
add_all_compat_devs(void)1065 static int add_all_compat_devs(void)
1066 {
1067 struct rdma_dev_net *rnet;
1068 struct ib_device *dev;
1069 unsigned long index;
1070 int ret = 0;
1071
1072 down_read(&devices_rwsem);
1073 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1074 unsigned long net_index = 0;
1075
1076 /* Hold nets_rwsem so that any other thread modifying this
1077 * system param can sync with this thread.
1078 */
1079 down_read(&rdma_nets_rwsem);
1080 xa_for_each (&rdma_nets, net_index, rnet) {
1081 ret = add_one_compat_dev(dev, rnet);
1082 if (ret)
1083 break;
1084 }
1085 up_read(&rdma_nets_rwsem);
1086 }
1087 up_read(&devices_rwsem);
1088 if (ret)
1089 remove_all_compat_devs();
1090 return ret;
1091 }
1092
rdma_compatdev_set(u8 enable)1093 int rdma_compatdev_set(u8 enable)
1094 {
1095 struct rdma_dev_net *rnet;
1096 unsigned long index;
1097 int ret = 0;
1098
1099 down_write(&rdma_nets_rwsem);
1100 if (ib_devices_shared_netns == enable) {
1101 up_write(&rdma_nets_rwsem);
1102 return 0;
1103 }
1104
1105 /* enable/disable of compat devices is not supported
1106 * when more than default init_net exists.
1107 */
1108 xa_for_each (&rdma_nets, index, rnet) {
1109 ret++;
1110 break;
1111 }
1112 if (!ret)
1113 ib_devices_shared_netns = enable;
1114 up_write(&rdma_nets_rwsem);
1115 if (ret)
1116 return -EBUSY;
1117
1118 if (enable)
1119 ret = add_all_compat_devs();
1120 else
1121 remove_all_compat_devs();
1122 return ret;
1123 }
1124
rdma_dev_exit_net(struct net * net)1125 static void rdma_dev_exit_net(struct net *net)
1126 {
1127 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1128 struct ib_device *dev;
1129 unsigned long index;
1130 int ret;
1131
1132 down_write(&rdma_nets_rwsem);
1133 /*
1134 * Prevent the ID from being re-used and hide the id from xa_for_each.
1135 */
1136 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
1137 WARN_ON(ret);
1138 up_write(&rdma_nets_rwsem);
1139
1140 down_read(&devices_rwsem);
1141 xa_for_each (&devices, index, dev) {
1142 get_device(&dev->dev);
1143 /*
1144 * Release the devices_rwsem so that pontentially blocking
1145 * device_del, doesn't hold the devices_rwsem for too long.
1146 */
1147 up_read(&devices_rwsem);
1148
1149 remove_one_compat_dev(dev, rnet->id);
1150
1151 /*
1152 * If the real device is in the NS then move it back to init.
1153 */
1154 rdma_dev_change_netns(dev, net, &init_net);
1155
1156 put_device(&dev->dev);
1157 down_read(&devices_rwsem);
1158 }
1159 up_read(&devices_rwsem);
1160
1161 rdma_nl_net_exit(rnet);
1162 xa_erase(&rdma_nets, rnet->id);
1163 }
1164
rdma_dev_init_net(struct net * net)1165 static __net_init int rdma_dev_init_net(struct net *net)
1166 {
1167 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1168 unsigned long index;
1169 struct ib_device *dev;
1170 int ret;
1171
1172 write_pnet(&rnet->net, net);
1173
1174 ret = rdma_nl_net_init(rnet);
1175 if (ret)
1176 return ret;
1177
1178 /* No need to create any compat devices in default init_net. */
1179 if (net_eq(net, &init_net))
1180 return 0;
1181
1182 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
1183 if (ret) {
1184 rdma_nl_net_exit(rnet);
1185 return ret;
1186 }
1187
1188 down_read(&devices_rwsem);
1189 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1190 /* Hold nets_rwsem so that netlink command cannot change
1191 * system configuration for device sharing mode.
1192 */
1193 down_read(&rdma_nets_rwsem);
1194 ret = add_one_compat_dev(dev, rnet);
1195 up_read(&rdma_nets_rwsem);
1196 if (ret)
1197 break;
1198 }
1199 up_read(&devices_rwsem);
1200
1201 if (ret)
1202 rdma_dev_exit_net(net);
1203
1204 return ret;
1205 }
1206
1207 /*
1208 * Assign the unique string device name and the unique device index. This is
1209 * undone by ib_dealloc_device.
1210 */
assign_name(struct ib_device * device,const char * name)1211 static int assign_name(struct ib_device *device, const char *name)
1212 {
1213 static u32 last_id;
1214 int ret;
1215
1216 down_write(&devices_rwsem);
1217 /* Assign a unique name to the device */
1218 if (strchr(name, '%'))
1219 ret = alloc_name(device, name);
1220 else
1221 ret = dev_set_name(&device->dev, name);
1222 if (ret)
1223 goto out;
1224
1225 if (__ib_device_get_by_name(dev_name(&device->dev))) {
1226 ret = -ENFILE;
1227 goto out;
1228 }
1229 strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
1230
1231 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1232 &last_id, GFP_KERNEL);
1233 if (ret > 0)
1234 ret = 0;
1235
1236 out:
1237 up_write(&devices_rwsem);
1238 return ret;
1239 }
1240
1241 /*
1242 * setup_device() allocates memory and sets up data that requires calling the
1243 * device ops, this is the only reason these actions are not done during
1244 * ib_alloc_device. It is undone by ib_dealloc_device().
1245 */
setup_device(struct ib_device * device)1246 static int setup_device(struct ib_device *device)
1247 {
1248 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1249 int ret;
1250
1251 ib_device_check_mandatory(device);
1252
1253 ret = setup_port_data(device);
1254 if (ret) {
1255 dev_warn(&device->dev, "Couldn't create per-port data\n");
1256 return ret;
1257 }
1258
1259 memset(&device->attrs, 0, sizeof(device->attrs));
1260 ret = device->ops.query_device(device, &device->attrs, &uhw);
1261 if (ret) {
1262 dev_warn(&device->dev,
1263 "Couldn't query the device attributes\n");
1264 return ret;
1265 }
1266
1267 return 0;
1268 }
1269
disable_device(struct ib_device * device)1270 static void disable_device(struct ib_device *device)
1271 {
1272 u32 cid;
1273
1274 WARN_ON(!refcount_read(&device->refcount));
1275
1276 down_write(&devices_rwsem);
1277 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1278 up_write(&devices_rwsem);
1279
1280 /*
1281 * Remove clients in LIFO order, see assign_client_id. This could be
1282 * more efficient if xarray learns to reverse iterate. Since no new
1283 * clients can be added to this ib_device past this point we only need
1284 * the maximum possible client_id value here.
1285 */
1286 down_read(&clients_rwsem);
1287 cid = highest_client_id;
1288 up_read(&clients_rwsem);
1289 while (cid) {
1290 cid--;
1291 remove_client_context(device, cid);
1292 }
1293
1294 ib_cq_pool_cleanup(device);
1295
1296 /* Pairs with refcount_set in enable_device */
1297 ib_device_put(device);
1298 wait_for_completion(&device->unreg_completion);
1299
1300 /*
1301 * compat devices must be removed after device refcount drops to zero.
1302 * Otherwise init_net() may add more compatdevs after removing compat
1303 * devices and before device is disabled.
1304 */
1305 remove_compat_devs(device);
1306 }
1307
1308 /*
1309 * An enabled device is visible to all clients and to all the public facing
1310 * APIs that return a device pointer. This always returns with a new get, even
1311 * if it fails.
1312 */
enable_device_and_get(struct ib_device * device)1313 static int enable_device_and_get(struct ib_device *device)
1314 {
1315 struct ib_client *client;
1316 unsigned long index;
1317 int ret = 0;
1318
1319 /*
1320 * One ref belongs to the xa and the other belongs to this
1321 * thread. This is needed to guard against parallel unregistration.
1322 */
1323 refcount_set(&device->refcount, 2);
1324 down_write(&devices_rwsem);
1325 xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
1326
1327 /*
1328 * By using downgrade_write() we ensure that no other thread can clear
1329 * DEVICE_REGISTERED while we are completing the client setup.
1330 */
1331 downgrade_write(&devices_rwsem);
1332
1333 if (device->ops.enable_driver) {
1334 ret = device->ops.enable_driver(device);
1335 if (ret)
1336 goto out;
1337 }
1338
1339 down_read(&clients_rwsem);
1340 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1341 ret = add_client_context(device, client);
1342 if (ret)
1343 break;
1344 }
1345 up_read(&clients_rwsem);
1346 if (!ret)
1347 ret = add_compat_devs(device);
1348 out:
1349 up_read(&devices_rwsem);
1350 return ret;
1351 }
1352
prevent_dealloc_device(struct ib_device * ib_dev)1353 static void prevent_dealloc_device(struct ib_device *ib_dev)
1354 {
1355 }
1356
ib_device_notify_register(struct ib_device * device)1357 static void ib_device_notify_register(struct ib_device *device)
1358 {
1359 struct net_device *netdev;
1360 u32 port;
1361 int ret;
1362
1363 down_read(&devices_rwsem);
1364
1365 /* Mark for userspace that device is ready */
1366 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1367
1368 ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
1369 if (ret)
1370 goto out;
1371
1372 rdma_for_each_port(device, port) {
1373 netdev = ib_device_get_netdev(device, port);
1374 if (!netdev)
1375 continue;
1376
1377 ret = rdma_nl_notify_event(device, port,
1378 RDMA_NETDEV_ATTACH_EVENT);
1379 dev_put(netdev);
1380 if (ret)
1381 goto out;
1382 }
1383
1384 out:
1385 up_read(&devices_rwsem);
1386 }
1387
1388 /**
1389 * ib_register_device - Register an IB device with IB core
1390 * @device: Device to register
1391 * @name: unique string device name. This may include a '%' which will
1392 * cause a unique index to be added to the passed device name.
1393 * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
1394 * device will be used. In this case the caller should fully
1395 * setup the ibdev for DMA. This usually means using dma_virt_ops.
1396 *
1397 * Low-level drivers use ib_register_device() to register their
1398 * devices with the IB core. All registered clients will receive a
1399 * callback for each device that is added. @device must be allocated
1400 * with ib_alloc_device().
1401 *
1402 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1403 * asynchronously then the device pointer may become freed as soon as this
1404 * function returns.
1405 */
ib_register_device(struct ib_device * device,const char * name,struct device * dma_device)1406 int ib_register_device(struct ib_device *device, const char *name,
1407 struct device *dma_device)
1408 {
1409 int ret;
1410
1411 ret = assign_name(device, name);
1412 if (ret)
1413 return ret;
1414
1415 /*
1416 * If the caller does not provide a DMA capable device then the IB core
1417 * will set up ib_sge and scatterlist structures that stash the kernel
1418 * virtual address into the address field.
1419 */
1420 WARN_ON(dma_device && !dma_device->dma_parms);
1421 device->dma_device = dma_device;
1422
1423 ret = setup_device(device);
1424 if (ret)
1425 return ret;
1426
1427 ret = ib_cache_setup_one(device);
1428 if (ret) {
1429 dev_warn(&device->dev,
1430 "Couldn't set up InfiniBand P_Key/GID cache\n");
1431 return ret;
1432 }
1433
1434 device->groups[0] = &ib_dev_attr_group;
1435 device->groups[1] = device->ops.device_group;
1436 ret = ib_setup_device_attrs(device);
1437 if (ret)
1438 goto cache_cleanup;
1439
1440 ib_device_register_rdmacg(device);
1441
1442 rdma_counter_init(device);
1443
1444 /*
1445 * Ensure that ADD uevent is not fired because it
1446 * is too early amd device is not initialized yet.
1447 */
1448 dev_set_uevent_suppress(&device->dev, true);
1449 ret = device_add(&device->dev);
1450 if (ret)
1451 goto cg_cleanup;
1452
1453 ret = ib_setup_port_attrs(&device->coredev);
1454 if (ret) {
1455 dev_warn(&device->dev,
1456 "Couldn't register device with driver model\n");
1457 goto dev_cleanup;
1458 }
1459
1460 ret = enable_device_and_get(device);
1461 if (ret) {
1462 void (*dealloc_fn)(struct ib_device *);
1463
1464 /*
1465 * If we hit this error flow then we don't want to
1466 * automatically dealloc the device since the caller is
1467 * expected to call ib_dealloc_device() after
1468 * ib_register_device() fails. This is tricky due to the
1469 * possibility for a parallel unregistration along with this
1470 * error flow. Since we have a refcount here we know any
1471 * parallel flow is stopped in disable_device and will see the
1472 * special dealloc_driver pointer, causing the responsibility to
1473 * ib_dealloc_device() to revert back to this thread.
1474 */
1475 dealloc_fn = device->ops.dealloc_driver;
1476 device->ops.dealloc_driver = prevent_dealloc_device;
1477 ib_device_put(device);
1478 __ib_unregister_device(device);
1479 device->ops.dealloc_driver = dealloc_fn;
1480 dev_set_uevent_suppress(&device->dev, false);
1481 return ret;
1482 }
1483 dev_set_uevent_suppress(&device->dev, false);
1484
1485 ib_device_notify_register(device);
1486
1487 ib_device_put(device);
1488
1489 return 0;
1490
1491 dev_cleanup:
1492 device_del(&device->dev);
1493 cg_cleanup:
1494 dev_set_uevent_suppress(&device->dev, false);
1495 ib_device_unregister_rdmacg(device);
1496 cache_cleanup:
1497 ib_cache_cleanup_one(device);
1498 return ret;
1499 }
1500 EXPORT_SYMBOL(ib_register_device);
1501
1502 /* Callers must hold a get on the device. */
__ib_unregister_device(struct ib_device * ib_dev)1503 static void __ib_unregister_device(struct ib_device *ib_dev)
1504 {
1505 struct ib_device *sub, *tmp;
1506
1507 mutex_lock(&ib_dev->subdev_lock);
1508 list_for_each_entry_safe_reverse(sub, tmp,
1509 &ib_dev->subdev_list_head,
1510 subdev_list) {
1511 list_del(&sub->subdev_list);
1512 ib_dev->ops.del_sub_dev(sub);
1513 ib_device_put(ib_dev);
1514 }
1515 mutex_unlock(&ib_dev->subdev_lock);
1516
1517 /*
1518 * We have a registration lock so that all the calls to unregister are
1519 * fully fenced, once any unregister returns the device is truly
1520 * unregistered even if multiple callers are unregistering it at the
1521 * same time. This also interacts with the registration flow and
1522 * provides sane semantics if register and unregister are racing.
1523 */
1524 mutex_lock(&ib_dev->unregistration_lock);
1525 if (!refcount_read(&ib_dev->refcount))
1526 goto out;
1527
1528 disable_device(ib_dev);
1529 rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
1530
1531 /* Expedite removing unregistered pointers from the hash table */
1532 free_netdevs(ib_dev);
1533
1534 ib_free_port_attrs(&ib_dev->coredev);
1535 device_del(&ib_dev->dev);
1536 ib_device_unregister_rdmacg(ib_dev);
1537 ib_cache_cleanup_one(ib_dev);
1538
1539 /*
1540 * Drivers using the new flow may not call ib_dealloc_device except
1541 * in error unwind prior to registration success.
1542 */
1543 if (ib_dev->ops.dealloc_driver &&
1544 ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
1545 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1546 ib_dealloc_device(ib_dev);
1547 }
1548 out:
1549 mutex_unlock(&ib_dev->unregistration_lock);
1550 }
1551
1552 /**
1553 * ib_unregister_device - Unregister an IB device
1554 * @ib_dev: The device to unregister
1555 *
1556 * Unregister an IB device. All clients will receive a remove callback.
1557 *
1558 * Callers should call this routine only once, and protect against races with
1559 * registration. Typically it should only be called as part of a remove
1560 * callback in an implementation of driver core's struct device_driver and
1561 * related.
1562 *
1563 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1564 * this function.
1565 */
ib_unregister_device(struct ib_device * ib_dev)1566 void ib_unregister_device(struct ib_device *ib_dev)
1567 {
1568 get_device(&ib_dev->dev);
1569 __ib_unregister_device(ib_dev);
1570 put_device(&ib_dev->dev);
1571 }
1572 EXPORT_SYMBOL(ib_unregister_device);
1573
1574 /**
1575 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1576 * @ib_dev: The device to unregister
1577 *
1578 * This is the same as ib_unregister_device(), except it includes an internal
1579 * ib_device_put() that should match a 'get' obtained by the caller.
1580 *
1581 * It is safe to call this routine concurrently from multiple threads while
1582 * holding the 'get'. When the function returns the device is fully
1583 * unregistered.
1584 *
1585 * Drivers using this flow MUST use the driver_unregister callback to clean up
1586 * their resources associated with the device and dealloc it.
1587 */
ib_unregister_device_and_put(struct ib_device * ib_dev)1588 void ib_unregister_device_and_put(struct ib_device *ib_dev)
1589 {
1590 WARN_ON(!ib_dev->ops.dealloc_driver);
1591 get_device(&ib_dev->dev);
1592 ib_device_put(ib_dev);
1593 __ib_unregister_device(ib_dev);
1594 put_device(&ib_dev->dev);
1595 }
1596 EXPORT_SYMBOL(ib_unregister_device_and_put);
1597
1598 /**
1599 * ib_unregister_driver - Unregister all IB devices for a driver
1600 * @driver_id: The driver to unregister
1601 *
1602 * This implements a fence for device unregistration. It only returns once all
1603 * devices associated with the driver_id have fully completed their
1604 * unregistration and returned from ib_unregister_device*().
1605 *
1606 * If device's are not yet unregistered it goes ahead and starts unregistering
1607 * them.
1608 *
1609 * This does not block creation of new devices with the given driver_id, that
1610 * is the responsibility of the caller.
1611 */
ib_unregister_driver(enum rdma_driver_id driver_id)1612 void ib_unregister_driver(enum rdma_driver_id driver_id)
1613 {
1614 struct ib_device *ib_dev;
1615 unsigned long index;
1616
1617 down_read(&devices_rwsem);
1618 xa_for_each (&devices, index, ib_dev) {
1619 if (ib_dev->ops.driver_id != driver_id)
1620 continue;
1621
1622 get_device(&ib_dev->dev);
1623 up_read(&devices_rwsem);
1624
1625 WARN_ON(!ib_dev->ops.dealloc_driver);
1626 __ib_unregister_device(ib_dev);
1627
1628 put_device(&ib_dev->dev);
1629 down_read(&devices_rwsem);
1630 }
1631 up_read(&devices_rwsem);
1632 }
1633 EXPORT_SYMBOL(ib_unregister_driver);
1634
ib_unregister_work(struct work_struct * work)1635 static void ib_unregister_work(struct work_struct *work)
1636 {
1637 struct ib_device *ib_dev =
1638 container_of(work, struct ib_device, unregistration_work);
1639
1640 __ib_unregister_device(ib_dev);
1641 put_device(&ib_dev->dev);
1642 }
1643
1644 /**
1645 * ib_unregister_device_queued - Unregister a device using a work queue
1646 * @ib_dev: The device to unregister
1647 *
1648 * This schedules an asynchronous unregistration using a WQ for the device. A
1649 * driver should use this to avoid holding locks while doing unregistration,
1650 * such as holding the RTNL lock.
1651 *
1652 * Drivers using this API must use ib_unregister_driver before module unload
1653 * to ensure that all scheduled unregistrations have completed.
1654 */
ib_unregister_device_queued(struct ib_device * ib_dev)1655 void ib_unregister_device_queued(struct ib_device *ib_dev)
1656 {
1657 WARN_ON(!refcount_read(&ib_dev->refcount));
1658 WARN_ON(!ib_dev->ops.dealloc_driver);
1659 get_device(&ib_dev->dev);
1660 if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
1661 put_device(&ib_dev->dev);
1662 }
1663 EXPORT_SYMBOL(ib_unregister_device_queued);
1664
1665 /*
1666 * The caller must pass in a device that has the kref held and the refcount
1667 * released. If the device is in cur_net and still registered then it is moved
1668 * into net.
1669 */
rdma_dev_change_netns(struct ib_device * device,struct net * cur_net,struct net * net)1670 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1671 struct net *net)
1672 {
1673 int ret2 = -EINVAL;
1674 int ret;
1675
1676 mutex_lock(&device->unregistration_lock);
1677
1678 /*
1679 * If a device not under ib_device_get() or if the unregistration_lock
1680 * is not held, the namespace can be changed, or it can be unregistered.
1681 * Check again under the lock.
1682 */
1683 if (refcount_read(&device->refcount) == 0 ||
1684 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1685 ret = -ENODEV;
1686 goto out;
1687 }
1688
1689 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1690 disable_device(device);
1691
1692 /*
1693 * At this point no one can be using the device, so it is safe to
1694 * change the namespace.
1695 */
1696 write_pnet(&device->coredev.rdma_net, net);
1697
1698 down_read(&devices_rwsem);
1699 /*
1700 * Currently rdma devices are system wide unique. So the device name
1701 * is guaranteed free in the new namespace. Publish the new namespace
1702 * at the sysfs level.
1703 */
1704 ret = device_rename(&device->dev, dev_name(&device->dev));
1705 up_read(&devices_rwsem);
1706 if (ret) {
1707 dev_warn(&device->dev,
1708 "%s: Couldn't rename device after namespace change\n",
1709 __func__);
1710 /* Try and put things back and re-enable the device */
1711 write_pnet(&device->coredev.rdma_net, cur_net);
1712 }
1713
1714 ret2 = enable_device_and_get(device);
1715 if (ret2) {
1716 /*
1717 * This shouldn't really happen, but if it does, let the user
1718 * retry at later point. So don't disable the device.
1719 */
1720 dev_warn(&device->dev,
1721 "%s: Couldn't re-enable device after namespace change\n",
1722 __func__);
1723 }
1724 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1725
1726 ib_device_put(device);
1727 out:
1728 mutex_unlock(&device->unregistration_lock);
1729 if (ret)
1730 return ret;
1731 return ret2;
1732 }
1733
ib_device_set_netns_put(struct sk_buff * skb,struct ib_device * dev,u32 ns_fd)1734 int ib_device_set_netns_put(struct sk_buff *skb,
1735 struct ib_device *dev, u32 ns_fd)
1736 {
1737 struct net *net;
1738 int ret;
1739
1740 net = get_net_ns_by_fd(ns_fd);
1741 if (IS_ERR(net)) {
1742 ret = PTR_ERR(net);
1743 goto net_err;
1744 }
1745
1746 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1747 ret = -EPERM;
1748 goto ns_err;
1749 }
1750
1751 /*
1752 * All the ib_clients, including uverbs, are reset when the namespace is
1753 * changed and this cannot be blocked waiting for userspace to do
1754 * something, so disassociation is mandatory.
1755 */
1756 if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) {
1757 ret = -EOPNOTSUPP;
1758 goto ns_err;
1759 }
1760
1761 get_device(&dev->dev);
1762 ib_device_put(dev);
1763 ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
1764 put_device(&dev->dev);
1765
1766 put_net(net);
1767 return ret;
1768
1769 ns_err:
1770 put_net(net);
1771 net_err:
1772 ib_device_put(dev);
1773 return ret;
1774 }
1775
1776 static struct pernet_operations rdma_dev_net_ops = {
1777 .init = rdma_dev_init_net,
1778 .exit = rdma_dev_exit_net,
1779 .id = &rdma_dev_net_id,
1780 .size = sizeof(struct rdma_dev_net),
1781 };
1782
assign_client_id(struct ib_client * client)1783 static int assign_client_id(struct ib_client *client)
1784 {
1785 int ret;
1786
1787 lockdep_assert_held(&clients_rwsem);
1788 /*
1789 * The add/remove callbacks must be called in FIFO/LIFO order. To
1790 * achieve this we assign client_ids so they are sorted in
1791 * registration order.
1792 */
1793 client->client_id = highest_client_id;
1794 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1795 if (ret)
1796 return ret;
1797
1798 highest_client_id++;
1799 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1800 return 0;
1801 }
1802
remove_client_id(struct ib_client * client)1803 static void remove_client_id(struct ib_client *client)
1804 {
1805 down_write(&clients_rwsem);
1806 xa_erase(&clients, client->client_id);
1807 for (; highest_client_id; highest_client_id--)
1808 if (xa_load(&clients, highest_client_id - 1))
1809 break;
1810 up_write(&clients_rwsem);
1811 }
1812
1813 /**
1814 * ib_register_client - Register an IB client
1815 * @client:Client to register
1816 *
1817 * Upper level users of the IB drivers can use ib_register_client() to
1818 * register callbacks for IB device addition and removal. When an IB
1819 * device is added, each registered client's add method will be called
1820 * (in the order the clients were registered), and when a device is
1821 * removed, each client's remove method will be called (in the reverse
1822 * order that clients were registered). In addition, when
1823 * ib_register_client() is called, the client will receive an add
1824 * callback for all devices already registered.
1825 */
ib_register_client(struct ib_client * client)1826 int ib_register_client(struct ib_client *client)
1827 {
1828 struct ib_device *device;
1829 unsigned long index;
1830 bool need_unreg = false;
1831 int ret;
1832
1833 refcount_set(&client->uses, 1);
1834 init_completion(&client->uses_zero);
1835
1836 /*
1837 * The devices_rwsem is held in write mode to ensure that a racing
1838 * ib_register_device() sees a consisent view of clients and devices.
1839 */
1840 down_write(&devices_rwsem);
1841 down_write(&clients_rwsem);
1842 ret = assign_client_id(client);
1843 if (ret)
1844 goto out;
1845
1846 need_unreg = true;
1847 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1848 ret = add_client_context(device, client);
1849 if (ret)
1850 goto out;
1851 }
1852 ret = 0;
1853 out:
1854 up_write(&clients_rwsem);
1855 up_write(&devices_rwsem);
1856 if (need_unreg && ret)
1857 ib_unregister_client(client);
1858 return ret;
1859 }
1860 EXPORT_SYMBOL(ib_register_client);
1861
1862 /**
1863 * ib_unregister_client - Unregister an IB client
1864 * @client:Client to unregister
1865 *
1866 * Upper level users use ib_unregister_client() to remove their client
1867 * registration. When ib_unregister_client() is called, the client
1868 * will receive a remove callback for each IB device still registered.
1869 *
1870 * This is a full fence, once it returns no client callbacks will be called,
1871 * or are running in another thread.
1872 */
ib_unregister_client(struct ib_client * client)1873 void ib_unregister_client(struct ib_client *client)
1874 {
1875 struct ib_device *device;
1876 unsigned long index;
1877
1878 down_write(&clients_rwsem);
1879 ib_client_put(client);
1880 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1881 up_write(&clients_rwsem);
1882
1883 /* We do not want to have locks while calling client->remove() */
1884 rcu_read_lock();
1885 xa_for_each (&devices, index, device) {
1886 if (!ib_device_try_get(device))
1887 continue;
1888 rcu_read_unlock();
1889
1890 remove_client_context(device, client->client_id);
1891
1892 ib_device_put(device);
1893 rcu_read_lock();
1894 }
1895 rcu_read_unlock();
1896
1897 /*
1898 * remove_client_context() is not a fence, it can return even though a
1899 * removal is ongoing. Wait until all removals are completed.
1900 */
1901 wait_for_completion(&client->uses_zero);
1902 remove_client_id(client);
1903 }
1904 EXPORT_SYMBOL(ib_unregister_client);
1905
__ib_get_global_client_nl_info(const char * client_name,struct ib_client_nl_info * res)1906 static int __ib_get_global_client_nl_info(const char *client_name,
1907 struct ib_client_nl_info *res)
1908 {
1909 struct ib_client *client;
1910 unsigned long index;
1911 int ret = -ENOENT;
1912
1913 down_read(&clients_rwsem);
1914 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1915 if (strcmp(client->name, client_name) != 0)
1916 continue;
1917 if (!client->get_global_nl_info) {
1918 ret = -EOPNOTSUPP;
1919 break;
1920 }
1921 ret = client->get_global_nl_info(res);
1922 if (WARN_ON(ret == -ENOENT))
1923 ret = -EINVAL;
1924 if (!ret && res->cdev)
1925 get_device(res->cdev);
1926 break;
1927 }
1928 up_read(&clients_rwsem);
1929 return ret;
1930 }
1931
__ib_get_client_nl_info(struct ib_device * ibdev,const char * client_name,struct ib_client_nl_info * res)1932 static int __ib_get_client_nl_info(struct ib_device *ibdev,
1933 const char *client_name,
1934 struct ib_client_nl_info *res)
1935 {
1936 unsigned long index;
1937 void *client_data;
1938 int ret = -ENOENT;
1939
1940 down_read(&ibdev->client_data_rwsem);
1941 xan_for_each_marked (&ibdev->client_data, index, client_data,
1942 CLIENT_DATA_REGISTERED) {
1943 struct ib_client *client = xa_load(&clients, index);
1944
1945 if (!client || strcmp(client->name, client_name) != 0)
1946 continue;
1947 if (!client->get_nl_info) {
1948 ret = -EOPNOTSUPP;
1949 break;
1950 }
1951 ret = client->get_nl_info(ibdev, client_data, res);
1952 if (WARN_ON(ret == -ENOENT))
1953 ret = -EINVAL;
1954
1955 /*
1956 * The cdev is guaranteed valid as long as we are inside the
1957 * client_data_rwsem as remove_one can't be called. Keep it
1958 * valid for the caller.
1959 */
1960 if (!ret && res->cdev)
1961 get_device(res->cdev);
1962 break;
1963 }
1964 up_read(&ibdev->client_data_rwsem);
1965
1966 return ret;
1967 }
1968
1969 /**
1970 * ib_get_client_nl_info - Fetch the nl_info from a client
1971 * @ibdev: IB device
1972 * @client_name: Name of the client
1973 * @res: Result of the query
1974 */
ib_get_client_nl_info(struct ib_device * ibdev,const char * client_name,struct ib_client_nl_info * res)1975 int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
1976 struct ib_client_nl_info *res)
1977 {
1978 int ret;
1979
1980 if (ibdev)
1981 ret = __ib_get_client_nl_info(ibdev, client_name, res);
1982 else
1983 ret = __ib_get_global_client_nl_info(client_name, res);
1984 #ifdef CONFIG_MODULES
1985 if (ret == -ENOENT) {
1986 request_module("rdma-client-%s", client_name);
1987 if (ibdev)
1988 ret = __ib_get_client_nl_info(ibdev, client_name, res);
1989 else
1990 ret = __ib_get_global_client_nl_info(client_name, res);
1991 }
1992 #endif
1993 if (ret) {
1994 if (ret == -ENOENT)
1995 return -EOPNOTSUPP;
1996 return ret;
1997 }
1998
1999 if (WARN_ON(!res->cdev))
2000 return -EINVAL;
2001 return 0;
2002 }
2003
2004 /**
2005 * ib_set_client_data - Set IB client context
2006 * @device:Device to set context for
2007 * @client:Client to set context for
2008 * @data:Context to set
2009 *
2010 * ib_set_client_data() sets client context data that can be retrieved with
2011 * ib_get_client_data(). This can only be called while the client is
2012 * registered to the device, once the ib_client remove() callback returns this
2013 * cannot be called.
2014 */
ib_set_client_data(struct ib_device * device,struct ib_client * client,void * data)2015 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2016 void *data)
2017 {
2018 void *rc;
2019
2020 if (WARN_ON(IS_ERR(data)))
2021 data = NULL;
2022
2023 rc = xa_store(&device->client_data, client->client_id, data,
2024 GFP_KERNEL);
2025 WARN_ON(xa_is_err(rc));
2026 }
2027 EXPORT_SYMBOL(ib_set_client_data);
2028
2029 /**
2030 * ib_register_event_handler - Register an IB event handler
2031 * @event_handler:Handler to register
2032 *
2033 * ib_register_event_handler() registers an event handler that will be
2034 * called back when asynchronous IB events occur (as defined in
2035 * chapter 11 of the InfiniBand Architecture Specification). This
2036 * callback occurs in workqueue context.
2037 */
ib_register_event_handler(struct ib_event_handler * event_handler)2038 void ib_register_event_handler(struct ib_event_handler *event_handler)
2039 {
2040 down_write(&event_handler->device->event_handler_rwsem);
2041 list_add_tail(&event_handler->list,
2042 &event_handler->device->event_handler_list);
2043 up_write(&event_handler->device->event_handler_rwsem);
2044 }
2045 EXPORT_SYMBOL(ib_register_event_handler);
2046
2047 /**
2048 * ib_unregister_event_handler - Unregister an event handler
2049 * @event_handler:Handler to unregister
2050 *
2051 * Unregister an event handler registered with
2052 * ib_register_event_handler().
2053 */
ib_unregister_event_handler(struct ib_event_handler * event_handler)2054 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
2055 {
2056 down_write(&event_handler->device->event_handler_rwsem);
2057 list_del(&event_handler->list);
2058 up_write(&event_handler->device->event_handler_rwsem);
2059 }
2060 EXPORT_SYMBOL(ib_unregister_event_handler);
2061
ib_dispatch_event_clients(struct ib_event * event)2062 void ib_dispatch_event_clients(struct ib_event *event)
2063 {
2064 struct ib_event_handler *handler;
2065
2066 down_read(&event->device->event_handler_rwsem);
2067
2068 list_for_each_entry(handler, &event->device->event_handler_list, list)
2069 handler->handler(handler, event);
2070
2071 up_read(&event->device->event_handler_rwsem);
2072 }
2073
iw_query_port(struct ib_device * device,u32 port_num,struct ib_port_attr * port_attr)2074 static int iw_query_port(struct ib_device *device,
2075 u32 port_num,
2076 struct ib_port_attr *port_attr)
2077 {
2078 struct in_device *inetdev;
2079 struct net_device *netdev;
2080
2081 memset(port_attr, 0, sizeof(*port_attr));
2082
2083 netdev = ib_device_get_netdev(device, port_num);
2084 if (!netdev)
2085 return -ENODEV;
2086
2087 port_attr->max_mtu = IB_MTU_4096;
2088 port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
2089
2090 if (!netif_carrier_ok(netdev)) {
2091 port_attr->state = IB_PORT_DOWN;
2092 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2093 } else {
2094 rcu_read_lock();
2095 inetdev = __in_dev_get_rcu(netdev);
2096
2097 if (inetdev && inetdev->ifa_list) {
2098 port_attr->state = IB_PORT_ACTIVE;
2099 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2100 } else {
2101 port_attr->state = IB_PORT_INIT;
2102 port_attr->phys_state =
2103 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
2104 }
2105
2106 rcu_read_unlock();
2107 }
2108
2109 dev_put(netdev);
2110 return device->ops.query_port(device, port_num, port_attr);
2111 }
2112
__ib_query_port(struct ib_device * device,u32 port_num,struct ib_port_attr * port_attr)2113 static int __ib_query_port(struct ib_device *device,
2114 u32 port_num,
2115 struct ib_port_attr *port_attr)
2116 {
2117 int err;
2118
2119 memset(port_attr, 0, sizeof(*port_attr));
2120
2121 err = device->ops.query_port(device, port_num, port_attr);
2122 if (err || port_attr->subnet_prefix)
2123 return err;
2124
2125 if (rdma_port_get_link_layer(device, port_num) !=
2126 IB_LINK_LAYER_INFINIBAND)
2127 return 0;
2128
2129 ib_get_cached_subnet_prefix(device, port_num,
2130 &port_attr->subnet_prefix);
2131 return 0;
2132 }
2133
2134 /**
2135 * ib_query_port - Query IB port attributes
2136 * @device:Device to query
2137 * @port_num:Port number to query
2138 * @port_attr:Port attributes
2139 *
2140 * ib_query_port() returns the attributes of a port through the
2141 * @port_attr pointer.
2142 */
ib_query_port(struct ib_device * device,u32 port_num,struct ib_port_attr * port_attr)2143 int ib_query_port(struct ib_device *device,
2144 u32 port_num,
2145 struct ib_port_attr *port_attr)
2146 {
2147 if (!rdma_is_port_valid(device, port_num))
2148 return -EINVAL;
2149
2150 if (rdma_protocol_iwarp(device, port_num))
2151 return iw_query_port(device, port_num, port_attr);
2152 else
2153 return __ib_query_port(device, port_num, port_attr);
2154 }
2155 EXPORT_SYMBOL(ib_query_port);
2156
add_ndev_hash(struct ib_port_data * pdata)2157 static void add_ndev_hash(struct ib_port_data *pdata)
2158 {
2159 unsigned long flags;
2160
2161 might_sleep();
2162
2163 spin_lock_irqsave(&ndev_hash_lock, flags);
2164 if (hash_hashed(&pdata->ndev_hash_link)) {
2165 hash_del_rcu(&pdata->ndev_hash_link);
2166 spin_unlock_irqrestore(&ndev_hash_lock, flags);
2167 /*
2168 * We cannot do hash_add_rcu after a hash_del_rcu until the
2169 * grace period
2170 */
2171 synchronize_rcu();
2172 spin_lock_irqsave(&ndev_hash_lock, flags);
2173 }
2174 if (pdata->netdev)
2175 hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
2176 (uintptr_t)pdata->netdev);
2177 spin_unlock_irqrestore(&ndev_hash_lock, flags);
2178 }
2179
2180 /**
2181 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
2182 * @ib_dev: Device to modify
2183 * @ndev: net_device to affiliate, may be NULL
2184 * @port: IB port the net_device is connected to
2185 *
2186 * Drivers should use this to link the ib_device to a netdev so the netdev
2187 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
2188 * affiliated with any port.
2189 *
2190 * The caller must ensure that the given ndev is not unregistered or
2191 * unregistering, and that either the ib_device is unregistered or
2192 * ib_device_set_netdev() is called with NULL when the ndev sends a
2193 * NETDEV_UNREGISTER event.
2194 */
ib_device_set_netdev(struct ib_device * ib_dev,struct net_device * ndev,u32 port)2195 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
2196 u32 port)
2197 {
2198 enum rdma_nl_notify_event_type etype;
2199 struct net_device *old_ndev;
2200 struct ib_port_data *pdata;
2201 unsigned long flags;
2202 int ret;
2203
2204 if (!rdma_is_port_valid(ib_dev, port))
2205 return -EINVAL;
2206
2207 /*
2208 * Drivers wish to call this before ib_register_driver, so we have to
2209 * setup the port data early.
2210 */
2211 ret = alloc_port_data(ib_dev);
2212 if (ret)
2213 return ret;
2214
2215 pdata = &ib_dev->port_data[port];
2216 spin_lock_irqsave(&pdata->netdev_lock, flags);
2217 old_ndev = rcu_dereference_protected(
2218 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2219 if (old_ndev == ndev) {
2220 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2221 return 0;
2222 }
2223
2224 rcu_assign_pointer(pdata->netdev, ndev);
2225 netdev_put(old_ndev, &pdata->netdev_tracker);
2226 netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
2227 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2228
2229 add_ndev_hash(pdata);
2230
2231 /* Make sure that the device is registered before we send events */
2232 if (xa_load(&devices, ib_dev->index) != ib_dev)
2233 return 0;
2234
2235 etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
2236 rdma_nl_notify_event(ib_dev, port, etype);
2237
2238 return 0;
2239 }
2240 EXPORT_SYMBOL(ib_device_set_netdev);
2241
free_netdevs(struct ib_device * ib_dev)2242 static void free_netdevs(struct ib_device *ib_dev)
2243 {
2244 unsigned long flags;
2245 u32 port;
2246
2247 if (!ib_dev->port_data)
2248 return;
2249
2250 rdma_for_each_port (ib_dev, port) {
2251 struct ib_port_data *pdata = &ib_dev->port_data[port];
2252 struct net_device *ndev;
2253
2254 spin_lock_irqsave(&pdata->netdev_lock, flags);
2255 ndev = rcu_dereference_protected(
2256 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2257 if (ndev) {
2258 spin_lock(&ndev_hash_lock);
2259 hash_del_rcu(&pdata->ndev_hash_link);
2260 spin_unlock(&ndev_hash_lock);
2261
2262 /*
2263 * If this is the last dev_put there is still a
2264 * synchronize_rcu before the netdev is kfreed, so we
2265 * can continue to rely on unlocked pointer
2266 * comparisons after the put
2267 */
2268 rcu_assign_pointer(pdata->netdev, NULL);
2269 netdev_put(ndev, &pdata->netdev_tracker);
2270 }
2271 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2272 }
2273 }
2274
ib_device_get_netdev(struct ib_device * ib_dev,u32 port)2275 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
2276 u32 port)
2277 {
2278 struct ib_port_data *pdata;
2279 struct net_device *res;
2280
2281 if (!rdma_is_port_valid(ib_dev, port))
2282 return NULL;
2283
2284 if (!ib_dev->port_data)
2285 return NULL;
2286
2287 pdata = &ib_dev->port_data[port];
2288
2289 /*
2290 * New drivers should use ib_device_set_netdev() not the legacy
2291 * get_netdev().
2292 */
2293 if (ib_dev->ops.get_netdev)
2294 res = ib_dev->ops.get_netdev(ib_dev, port);
2295 else {
2296 spin_lock(&pdata->netdev_lock);
2297 res = rcu_dereference_protected(
2298 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2299 dev_hold(res);
2300 spin_unlock(&pdata->netdev_lock);
2301 }
2302
2303 return res;
2304 }
2305 EXPORT_SYMBOL(ib_device_get_netdev);
2306
2307 /**
2308 * ib_query_netdev_port - Query the port number of a net_device
2309 * associated with an ibdev
2310 * @ibdev: IB device
2311 * @ndev: Network device
2312 * @port: IB port the net_device is connected to
2313 */
ib_query_netdev_port(struct ib_device * ibdev,struct net_device * ndev,u32 * port)2314 int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
2315 u32 *port)
2316 {
2317 struct net_device *ib_ndev;
2318 u32 port_num;
2319
2320 rdma_for_each_port(ibdev, port_num) {
2321 ib_ndev = ib_device_get_netdev(ibdev, port_num);
2322 if (ndev == ib_ndev) {
2323 *port = port_num;
2324 dev_put(ib_ndev);
2325 return 0;
2326 }
2327 dev_put(ib_ndev);
2328 }
2329
2330 return -ENOENT;
2331 }
2332 EXPORT_SYMBOL(ib_query_netdev_port);
2333
2334 /**
2335 * ib_device_get_by_netdev - Find an IB device associated with a netdev
2336 * @ndev: netdev to locate
2337 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2338 *
2339 * Find and hold an ib_device that is associated with a netdev via
2340 * ib_device_set_netdev(). The caller must call ib_device_put() on the
2341 * returned pointer.
2342 */
ib_device_get_by_netdev(struct net_device * ndev,enum rdma_driver_id driver_id)2343 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2344 enum rdma_driver_id driver_id)
2345 {
2346 struct ib_device *res = NULL;
2347 struct ib_port_data *cur;
2348
2349 rcu_read_lock();
2350 hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2351 (uintptr_t)ndev) {
2352 if (rcu_access_pointer(cur->netdev) == ndev &&
2353 (driver_id == RDMA_DRIVER_UNKNOWN ||
2354 cur->ib_dev->ops.driver_id == driver_id) &&
2355 ib_device_try_get(cur->ib_dev)) {
2356 res = cur->ib_dev;
2357 break;
2358 }
2359 }
2360 rcu_read_unlock();
2361
2362 return res;
2363 }
2364 EXPORT_SYMBOL(ib_device_get_by_netdev);
2365
2366 /**
2367 * ib_enum_roce_netdev - enumerate all RoCE ports
2368 * @ib_dev : IB device we want to query
2369 * @filter: Should we call the callback?
2370 * @filter_cookie: Cookie passed to filter
2371 * @cb: Callback to call for each found RoCE ports
2372 * @cookie: Cookie passed back to the callback
2373 *
2374 * Enumerates all of the physical RoCE ports of ib_dev
2375 * which are related to netdevice and calls callback() on each
2376 * device for which filter() function returns non zero.
2377 */
ib_enum_roce_netdev(struct ib_device * ib_dev,roce_netdev_filter filter,void * filter_cookie,roce_netdev_callback cb,void * cookie)2378 void ib_enum_roce_netdev(struct ib_device *ib_dev,
2379 roce_netdev_filter filter,
2380 void *filter_cookie,
2381 roce_netdev_callback cb,
2382 void *cookie)
2383 {
2384 u32 port;
2385
2386 rdma_for_each_port (ib_dev, port)
2387 if (rdma_protocol_roce(ib_dev, port)) {
2388 struct net_device *idev =
2389 ib_device_get_netdev(ib_dev, port);
2390
2391 if (filter(ib_dev, port, idev, filter_cookie))
2392 cb(ib_dev, port, idev, cookie);
2393 dev_put(idev);
2394 }
2395 }
2396
2397 /**
2398 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2399 * @filter: Should we call the callback?
2400 * @filter_cookie: Cookie passed to filter
2401 * @cb: Callback to call for each found RoCE ports
2402 * @cookie: Cookie passed back to the callback
2403 *
2404 * Enumerates all RoCE devices' physical ports which are related
2405 * to netdevices and calls callback() on each device for which
2406 * filter() function returns non zero.
2407 */
ib_enum_all_roce_netdevs(roce_netdev_filter filter,void * filter_cookie,roce_netdev_callback cb,void * cookie)2408 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
2409 void *filter_cookie,
2410 roce_netdev_callback cb,
2411 void *cookie)
2412 {
2413 struct ib_device *dev;
2414 unsigned long index;
2415
2416 down_read(&devices_rwsem);
2417 xa_for_each_marked(&devices, index, dev, DEVICE_GID_UPDATES)
2418 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
2419 up_read(&devices_rwsem);
2420 }
2421
2422 /**
2423 * ib_device_enable_gid_updates - Mark device as ready for GID cache updates
2424 * @device: Device to mark
2425 *
2426 * Called after GID table is allocated and initialized. After this mark is set,
2427 * netdevice event handlers can update the device's GID cache. This allows
2428 * events that arrive during device registration to be processed, avoiding
2429 * stale GID entries when netdev properties change during the device
2430 * registration process.
2431 */
ib_device_enable_gid_updates(struct ib_device * device)2432 void ib_device_enable_gid_updates(struct ib_device *device)
2433 {
2434 down_write(&devices_rwsem);
2435 xa_set_mark(&devices, device->index, DEVICE_GID_UPDATES);
2436 up_write(&devices_rwsem);
2437 }
2438
2439 /**
2440 * ib_device_disable_gid_updates - Clear the GID updates mark
2441 * @device: Device to unmark
2442 *
2443 * Called before GID table cleanup to prevent event handlers from accessing
2444 * the device while it's being torn down.
2445 */
ib_device_disable_gid_updates(struct ib_device * device)2446 void ib_device_disable_gid_updates(struct ib_device *device)
2447 {
2448 down_write(&devices_rwsem);
2449 xa_clear_mark(&devices, device->index, DEVICE_GID_UPDATES);
2450 up_write(&devices_rwsem);
2451 }
2452
2453 /*
2454 * ib_enum_all_devs - enumerate all ib_devices
2455 * @cb: Callback to call for each found ib_device
2456 *
2457 * Enumerates all ib_devices and calls callback() on each device.
2458 */
ib_enum_all_devs(nldev_callback nldev_cb,struct sk_buff * skb,struct netlink_callback * cb)2459 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
2460 struct netlink_callback *cb)
2461 {
2462 unsigned long index;
2463 struct ib_device *dev;
2464 unsigned int idx = 0;
2465 int ret = 0;
2466
2467 down_read(&devices_rwsem);
2468 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
2469 if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
2470 continue;
2471
2472 ret = nldev_cb(dev, skb, cb, idx);
2473 if (ret)
2474 break;
2475 idx++;
2476 }
2477 up_read(&devices_rwsem);
2478 return ret;
2479 }
2480
2481 /**
2482 * ib_query_pkey - Get P_Key table entry
2483 * @device:Device to query
2484 * @port_num:Port number to query
2485 * @index:P_Key table index to query
2486 * @pkey:Returned P_Key
2487 *
2488 * ib_query_pkey() fetches the specified P_Key table entry.
2489 */
ib_query_pkey(struct ib_device * device,u32 port_num,u16 index,u16 * pkey)2490 int ib_query_pkey(struct ib_device *device,
2491 u32 port_num, u16 index, u16 *pkey)
2492 {
2493 if (!rdma_is_port_valid(device, port_num))
2494 return -EINVAL;
2495
2496 if (!device->ops.query_pkey)
2497 return -EOPNOTSUPP;
2498
2499 return device->ops.query_pkey(device, port_num, index, pkey);
2500 }
2501 EXPORT_SYMBOL(ib_query_pkey);
2502
2503 /**
2504 * ib_modify_device - Change IB device attributes
2505 * @device:Device to modify
2506 * @device_modify_mask:Mask of attributes to change
2507 * @device_modify:New attribute values
2508 *
2509 * ib_modify_device() changes a device's attributes as specified by
2510 * the @device_modify_mask and @device_modify structure.
2511 */
ib_modify_device(struct ib_device * device,int device_modify_mask,struct ib_device_modify * device_modify)2512 int ib_modify_device(struct ib_device *device,
2513 int device_modify_mask,
2514 struct ib_device_modify *device_modify)
2515 {
2516 if (!device->ops.modify_device)
2517 return -EOPNOTSUPP;
2518
2519 return device->ops.modify_device(device, device_modify_mask,
2520 device_modify);
2521 }
2522 EXPORT_SYMBOL(ib_modify_device);
2523
2524 /**
2525 * ib_modify_port - Modifies the attributes for the specified port.
2526 * @device: The device to modify.
2527 * @port_num: The number of the port to modify.
2528 * @port_modify_mask: Mask used to specify which attributes of the port
2529 * to change.
2530 * @port_modify: New attribute values for the port.
2531 *
2532 * ib_modify_port() changes a port's attributes as specified by the
2533 * @port_modify_mask and @port_modify structure.
2534 */
ib_modify_port(struct ib_device * device,u32 port_num,int port_modify_mask,struct ib_port_modify * port_modify)2535 int ib_modify_port(struct ib_device *device,
2536 u32 port_num, int port_modify_mask,
2537 struct ib_port_modify *port_modify)
2538 {
2539 int rc;
2540
2541 if (!rdma_is_port_valid(device, port_num))
2542 return -EINVAL;
2543
2544 if (device->ops.modify_port)
2545 rc = device->ops.modify_port(device, port_num,
2546 port_modify_mask,
2547 port_modify);
2548 else if (rdma_protocol_roce(device, port_num) &&
2549 ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
2550 (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
2551 rc = 0;
2552 else
2553 rc = -EOPNOTSUPP;
2554 return rc;
2555 }
2556 EXPORT_SYMBOL(ib_modify_port);
2557
2558 /**
2559 * ib_find_gid - Returns the port number and GID table index where
2560 * a specified GID value occurs. Its searches only for IB link layer.
2561 * @device: The device to query.
2562 * @gid: The GID value to search for.
2563 * @port_num: The port number of the device where the GID value was found.
2564 * @index: The index into the GID table where the GID was found. This
2565 * parameter may be NULL.
2566 */
ib_find_gid(struct ib_device * device,union ib_gid * gid,u32 * port_num,u16 * index)2567 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2568 u32 *port_num, u16 *index)
2569 {
2570 union ib_gid tmp_gid;
2571 u32 port;
2572 int ret, i;
2573
2574 rdma_for_each_port (device, port) {
2575 if (!rdma_protocol_ib(device, port))
2576 continue;
2577
2578 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
2579 ++i) {
2580 ret = rdma_query_gid(device, port, i, &tmp_gid);
2581 if (ret)
2582 continue;
2583
2584 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
2585 *port_num = port;
2586 if (index)
2587 *index = i;
2588 return 0;
2589 }
2590 }
2591 }
2592
2593 return -ENOENT;
2594 }
2595 EXPORT_SYMBOL(ib_find_gid);
2596
2597 /**
2598 * ib_find_pkey - Returns the PKey table index where a specified
2599 * PKey value occurs.
2600 * @device: The device to query.
2601 * @port_num: The port number of the device to search for the PKey.
2602 * @pkey: The PKey value to search for.
2603 * @index: The index into the PKey table where the PKey was found.
2604 */
ib_find_pkey(struct ib_device * device,u32 port_num,u16 pkey,u16 * index)2605 int ib_find_pkey(struct ib_device *device,
2606 u32 port_num, u16 pkey, u16 *index)
2607 {
2608 int ret, i;
2609 u16 tmp_pkey;
2610 int partial_ix = -1;
2611
2612 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
2613 ++i) {
2614 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
2615 if (ret)
2616 return ret;
2617 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
2618 /* if there is full-member pkey take it.*/
2619 if (tmp_pkey & 0x8000) {
2620 *index = i;
2621 return 0;
2622 }
2623 if (partial_ix < 0)
2624 partial_ix = i;
2625 }
2626 }
2627
2628 /*no full-member, if exists take the limited*/
2629 if (partial_ix >= 0) {
2630 *index = partial_ix;
2631 return 0;
2632 }
2633 return -ENOENT;
2634 }
2635 EXPORT_SYMBOL(ib_find_pkey);
2636
2637 /**
2638 * ib_get_net_dev_by_params() - Return the appropriate net_dev
2639 * for a received CM request
2640 * @dev: An RDMA device on which the request has been received.
2641 * @port: Port number on the RDMA device.
2642 * @pkey: The Pkey the request came on.
2643 * @gid: A GID that the net_dev uses to communicate.
2644 * @addr: Contains the IP address that the request specified as its
2645 * destination.
2646 *
2647 */
ib_get_net_dev_by_params(struct ib_device * dev,u32 port,u16 pkey,const union ib_gid * gid,const struct sockaddr * addr)2648 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
2649 u32 port,
2650 u16 pkey,
2651 const union ib_gid *gid,
2652 const struct sockaddr *addr)
2653 {
2654 struct net_device *net_dev = NULL;
2655 unsigned long index;
2656 void *client_data;
2657
2658 if (!rdma_protocol_ib(dev, port))
2659 return NULL;
2660
2661 /*
2662 * Holding the read side guarantees that the client will not become
2663 * unregistered while we are calling get_net_dev_by_params()
2664 */
2665 down_read(&dev->client_data_rwsem);
2666 xan_for_each_marked (&dev->client_data, index, client_data,
2667 CLIENT_DATA_REGISTERED) {
2668 struct ib_client *client = xa_load(&clients, index);
2669
2670 if (!client || !client->get_net_dev_by_params)
2671 continue;
2672
2673 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
2674 addr, client_data);
2675 if (net_dev)
2676 break;
2677 }
2678 up_read(&dev->client_data_rwsem);
2679
2680 return net_dev;
2681 }
2682 EXPORT_SYMBOL(ib_get_net_dev_by_params);
2683
ib_set_device_ops(struct ib_device * dev,const struct ib_device_ops * ops)2684 void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2685 {
2686 struct ib_device_ops *dev_ops = &dev->ops;
2687 #define SET_DEVICE_OP(ptr, name) \
2688 do { \
2689 if (ops->name) \
2690 if (!((ptr)->name)) \
2691 (ptr)->name = ops->name; \
2692 } while (0)
2693
2694 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2695
2696 if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
2697 WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
2698 dev_ops->driver_id != ops->driver_id);
2699 dev_ops->driver_id = ops->driver_id;
2700 }
2701 if (ops->owner) {
2702 WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
2703 dev_ops->owner = ops->owner;
2704 }
2705 if (ops->uverbs_abi_ver)
2706 dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
2707
2708 dev_ops->uverbs_no_driver_id_binding |=
2709 ops->uverbs_no_driver_id_binding;
2710
2711 SET_DEVICE_OP(dev_ops, add_gid);
2712 SET_DEVICE_OP(dev_ops, add_sub_dev);
2713 SET_DEVICE_OP(dev_ops, advise_mr);
2714 SET_DEVICE_OP(dev_ops, alloc_dm);
2715 SET_DEVICE_OP(dev_ops, alloc_dmah);
2716 SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
2717 SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
2718 SET_DEVICE_OP(dev_ops, alloc_mr);
2719 SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
2720 SET_DEVICE_OP(dev_ops, alloc_mw);
2721 SET_DEVICE_OP(dev_ops, alloc_pd);
2722 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
2723 SET_DEVICE_OP(dev_ops, alloc_ucontext);
2724 SET_DEVICE_OP(dev_ops, alloc_xrcd);
2725 SET_DEVICE_OP(dev_ops, attach_mcast);
2726 SET_DEVICE_OP(dev_ops, check_mr_status);
2727 SET_DEVICE_OP(dev_ops, counter_alloc_stats);
2728 SET_DEVICE_OP(dev_ops, counter_bind_qp);
2729 SET_DEVICE_OP(dev_ops, counter_dealloc);
2730 SET_DEVICE_OP(dev_ops, counter_init);
2731 SET_DEVICE_OP(dev_ops, counter_unbind_qp);
2732 SET_DEVICE_OP(dev_ops, counter_update_stats);
2733 SET_DEVICE_OP(dev_ops, create_ah);
2734 SET_DEVICE_OP(dev_ops, create_counters);
2735 SET_DEVICE_OP(dev_ops, create_cq);
2736 SET_DEVICE_OP(dev_ops, create_cq_umem);
2737 SET_DEVICE_OP(dev_ops, create_flow);
2738 SET_DEVICE_OP(dev_ops, create_qp);
2739 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
2740 SET_DEVICE_OP(dev_ops, create_srq);
2741 SET_DEVICE_OP(dev_ops, create_user_ah);
2742 SET_DEVICE_OP(dev_ops, create_wq);
2743 SET_DEVICE_OP(dev_ops, dealloc_dm);
2744 SET_DEVICE_OP(dev_ops, dealloc_dmah);
2745 SET_DEVICE_OP(dev_ops, dealloc_driver);
2746 SET_DEVICE_OP(dev_ops, dealloc_mw);
2747 SET_DEVICE_OP(dev_ops, dealloc_pd);
2748 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
2749 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
2750 SET_DEVICE_OP(dev_ops, del_gid);
2751 SET_DEVICE_OP(dev_ops, del_sub_dev);
2752 SET_DEVICE_OP(dev_ops, dereg_mr);
2753 SET_DEVICE_OP(dev_ops, destroy_ah);
2754 SET_DEVICE_OP(dev_ops, destroy_counters);
2755 SET_DEVICE_OP(dev_ops, destroy_cq);
2756 SET_DEVICE_OP(dev_ops, destroy_flow);
2757 SET_DEVICE_OP(dev_ops, destroy_flow_action);
2758 SET_DEVICE_OP(dev_ops, destroy_qp);
2759 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
2760 SET_DEVICE_OP(dev_ops, destroy_srq);
2761 SET_DEVICE_OP(dev_ops, destroy_wq);
2762 SET_DEVICE_OP(dev_ops, device_group);
2763 SET_DEVICE_OP(dev_ops, detach_mcast);
2764 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
2765 SET_DEVICE_OP(dev_ops, drain_rq);
2766 SET_DEVICE_OP(dev_ops, drain_sq);
2767 SET_DEVICE_OP(dev_ops, enable_driver);
2768 SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry);
2769 SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
2770 SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw);
2771 SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
2772 SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
2773 SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
2774 SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
2775 SET_DEVICE_OP(dev_ops, fill_res_srq_entry);
2776 SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw);
2777 SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
2778 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
2779 SET_DEVICE_OP(dev_ops, get_dma_mr);
2780 SET_DEVICE_OP(dev_ops, get_hw_stats);
2781 SET_DEVICE_OP(dev_ops, get_link_layer);
2782 SET_DEVICE_OP(dev_ops, get_netdev);
2783 SET_DEVICE_OP(dev_ops, get_numa_node);
2784 SET_DEVICE_OP(dev_ops, get_port_immutable);
2785 SET_DEVICE_OP(dev_ops, get_vector_affinity);
2786 SET_DEVICE_OP(dev_ops, get_vf_config);
2787 SET_DEVICE_OP(dev_ops, get_vf_guid);
2788 SET_DEVICE_OP(dev_ops, get_vf_stats);
2789 SET_DEVICE_OP(dev_ops, iw_accept);
2790 SET_DEVICE_OP(dev_ops, iw_add_ref);
2791 SET_DEVICE_OP(dev_ops, iw_connect);
2792 SET_DEVICE_OP(dev_ops, iw_create_listen);
2793 SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2794 SET_DEVICE_OP(dev_ops, iw_get_qp);
2795 SET_DEVICE_OP(dev_ops, iw_reject);
2796 SET_DEVICE_OP(dev_ops, iw_rem_ref);
2797 SET_DEVICE_OP(dev_ops, map_mr_sg);
2798 SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
2799 SET_DEVICE_OP(dev_ops, mmap);
2800 SET_DEVICE_OP(dev_ops, mmap_get_pfns);
2801 SET_DEVICE_OP(dev_ops, mmap_free);
2802 SET_DEVICE_OP(dev_ops, modify_ah);
2803 SET_DEVICE_OP(dev_ops, modify_cq);
2804 SET_DEVICE_OP(dev_ops, modify_device);
2805 SET_DEVICE_OP(dev_ops, modify_hw_stat);
2806 SET_DEVICE_OP(dev_ops, modify_port);
2807 SET_DEVICE_OP(dev_ops, modify_qp);
2808 SET_DEVICE_OP(dev_ops, modify_srq);
2809 SET_DEVICE_OP(dev_ops, modify_wq);
2810 SET_DEVICE_OP(dev_ops, peek_cq);
2811 SET_DEVICE_OP(dev_ops, pgoff_to_mmap_entry);
2812 SET_DEVICE_OP(dev_ops, pre_destroy_cq);
2813 SET_DEVICE_OP(dev_ops, poll_cq);
2814 SET_DEVICE_OP(dev_ops, port_groups);
2815 SET_DEVICE_OP(dev_ops, post_destroy_cq);
2816 SET_DEVICE_OP(dev_ops, post_recv);
2817 SET_DEVICE_OP(dev_ops, post_send);
2818 SET_DEVICE_OP(dev_ops, post_srq_recv);
2819 SET_DEVICE_OP(dev_ops, process_mad);
2820 SET_DEVICE_OP(dev_ops, query_ah);
2821 SET_DEVICE_OP(dev_ops, query_device);
2822 SET_DEVICE_OP(dev_ops, query_gid);
2823 SET_DEVICE_OP(dev_ops, query_pkey);
2824 SET_DEVICE_OP(dev_ops, query_port);
2825 SET_DEVICE_OP(dev_ops, query_port_speed);
2826 SET_DEVICE_OP(dev_ops, query_qp);
2827 SET_DEVICE_OP(dev_ops, query_srq);
2828 SET_DEVICE_OP(dev_ops, query_ucontext);
2829 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
2830 SET_DEVICE_OP(dev_ops, read_counters);
2831 SET_DEVICE_OP(dev_ops, reg_dm_mr);
2832 SET_DEVICE_OP(dev_ops, reg_user_mr);
2833 SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
2834 SET_DEVICE_OP(dev_ops, req_notify_cq);
2835 SET_DEVICE_OP(dev_ops, rereg_user_mr);
2836 SET_DEVICE_OP(dev_ops, resize_cq);
2837 SET_DEVICE_OP(dev_ops, set_vf_guid);
2838 SET_DEVICE_OP(dev_ops, set_vf_link_state);
2839 SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
2840 SET_DEVICE_OP(dev_ops, report_port_event);
2841
2842 SET_OBJ_SIZE(dev_ops, ib_ah);
2843 SET_OBJ_SIZE(dev_ops, ib_counters);
2844 SET_OBJ_SIZE(dev_ops, ib_cq);
2845 SET_OBJ_SIZE(dev_ops, ib_dmah);
2846 SET_OBJ_SIZE(dev_ops, ib_mw);
2847 SET_OBJ_SIZE(dev_ops, ib_pd);
2848 SET_OBJ_SIZE(dev_ops, ib_qp);
2849 SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
2850 SET_OBJ_SIZE(dev_ops, ib_srq);
2851 SET_OBJ_SIZE(dev_ops, ib_ucontext);
2852 SET_OBJ_SIZE(dev_ops, ib_xrcd);
2853 SET_OBJ_SIZE(dev_ops, rdma_counter);
2854 }
2855 EXPORT_SYMBOL(ib_set_device_ops);
2856
ib_add_sub_device(struct ib_device * parent,enum rdma_nl_dev_type type,const char * name)2857 int ib_add_sub_device(struct ib_device *parent,
2858 enum rdma_nl_dev_type type,
2859 const char *name)
2860 {
2861 struct ib_device *sub;
2862 int ret = 0;
2863
2864 if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev)
2865 return -EOPNOTSUPP;
2866
2867 if (!ib_device_try_get(parent))
2868 return -EINVAL;
2869
2870 sub = parent->ops.add_sub_dev(parent, type, name);
2871 if (IS_ERR(sub)) {
2872 ib_device_put(parent);
2873 return PTR_ERR(sub);
2874 }
2875
2876 sub->type = type;
2877 sub->parent = parent;
2878
2879 mutex_lock(&parent->subdev_lock);
2880 list_add_tail(&parent->subdev_list_head, &sub->subdev_list);
2881 mutex_unlock(&parent->subdev_lock);
2882
2883 return ret;
2884 }
2885
ib_del_sub_device_and_put(struct ib_device * sub)2886 int ib_del_sub_device_and_put(struct ib_device *sub)
2887 {
2888 struct ib_device *parent = sub->parent;
2889
2890 if (!parent) {
2891 ib_device_put(sub);
2892 return -EOPNOTSUPP;
2893 }
2894
2895 mutex_lock(&parent->subdev_lock);
2896 list_del(&sub->subdev_list);
2897 mutex_unlock(&parent->subdev_lock);
2898
2899 ib_device_put(sub);
2900 parent->ops.del_sub_dev(sub);
2901 ib_device_put(parent);
2902
2903 return 0;
2904 }
2905
2906 #ifdef CONFIG_INFINIBAND_VIRT_DMA
ib_dma_virt_map_sg(struct ib_device * dev,struct scatterlist * sg,int nents)2907 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
2908 {
2909 struct scatterlist *s;
2910 int i;
2911
2912 for_each_sg(sg, s, nents, i) {
2913 sg_dma_address(s) = (uintptr_t)sg_virt(s);
2914 sg_dma_len(s) = s->length;
2915 }
2916 return nents;
2917 }
2918 EXPORT_SYMBOL(ib_dma_virt_map_sg);
2919 #endif /* CONFIG_INFINIBAND_VIRT_DMA */
2920
2921 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
2922 [RDMA_NL_LS_OP_RESOLVE] = {
2923 .doit = ib_nl_handle_resolve_resp,
2924 .flags = RDMA_NL_ADMIN_PERM,
2925 },
2926 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
2927 .doit = ib_nl_handle_set_timeout,
2928 .flags = RDMA_NL_ADMIN_PERM,
2929 },
2930 [RDMA_NL_LS_OP_IP_RESOLVE] = {
2931 .doit = ib_nl_handle_ip_res_resp,
2932 .flags = RDMA_NL_ADMIN_PERM,
2933 },
2934 };
2935
ib_dispatch_port_state_event(struct ib_device * ibdev,struct net_device * ndev)2936 void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
2937 {
2938 enum ib_port_state curr_state;
2939 struct ib_event ibevent = {};
2940 u32 port;
2941
2942 if (ib_query_netdev_port(ibdev, ndev, &port))
2943 return;
2944
2945 curr_state = ib_get_curr_port_state(ndev);
2946
2947 write_lock_irq(&ibdev->cache_lock);
2948 if (ibdev->port_data[port].cache.last_port_state == curr_state) {
2949 write_unlock_irq(&ibdev->cache_lock);
2950 return;
2951 }
2952 ibdev->port_data[port].cache.last_port_state = curr_state;
2953 write_unlock_irq(&ibdev->cache_lock);
2954
2955 ibevent.event = (curr_state == IB_PORT_DOWN) ?
2956 IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
2957 ibevent.device = ibdev;
2958 ibevent.element.port_num = port;
2959 ib_dispatch_event(&ibevent);
2960 }
2961 EXPORT_SYMBOL(ib_dispatch_port_state_event);
2962
handle_port_event(struct net_device * ndev,unsigned long event)2963 static void handle_port_event(struct net_device *ndev, unsigned long event)
2964 {
2965 struct ib_device *ibdev;
2966
2967 /* Currently, link events in bonding scenarios are still
2968 * reported by drivers that support bonding.
2969 */
2970 if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
2971 return;
2972
2973 ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
2974 if (!ibdev)
2975 return;
2976
2977 if (ibdev->ops.report_port_event) {
2978 ibdev->ops.report_port_event(ibdev, ndev, event);
2979 goto put_ibdev;
2980 }
2981
2982 ib_dispatch_port_state_event(ibdev, ndev);
2983
2984 put_ibdev:
2985 ib_device_put(ibdev);
2986 };
2987
ib_netdevice_event(struct notifier_block * this,unsigned long event,void * ptr)2988 static int ib_netdevice_event(struct notifier_block *this,
2989 unsigned long event, void *ptr)
2990 {
2991 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2992 struct ib_device *ibdev;
2993 u32 port;
2994
2995 switch (event) {
2996 case NETDEV_CHANGENAME:
2997 ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
2998 if (!ibdev)
2999 return NOTIFY_DONE;
3000
3001 if (ib_query_netdev_port(ibdev, ndev, &port)) {
3002 ib_device_put(ibdev);
3003 break;
3004 }
3005
3006 rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
3007 ib_device_put(ibdev);
3008 break;
3009
3010 case NETDEV_UP:
3011 case NETDEV_CHANGE:
3012 case NETDEV_DOWN:
3013 handle_port_event(ndev, event);
3014 break;
3015
3016 default:
3017 break;
3018 }
3019
3020 return NOTIFY_DONE;
3021 }
3022
3023 static struct notifier_block nb_netdevice = {
3024 .notifier_call = ib_netdevice_event,
3025 };
3026
ib_core_init(void)3027 static int __init ib_core_init(void)
3028 {
3029 int ret = -ENOMEM;
3030
3031 ib_wq = alloc_workqueue("infiniband", WQ_PERCPU, 0);
3032 if (!ib_wq)
3033 return -ENOMEM;
3034
3035 ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
3036 WQ_UNBOUND_MAX_ACTIVE);
3037 if (!ib_unreg_wq)
3038 goto err;
3039
3040 ib_comp_wq = alloc_workqueue("ib-comp-wq",
3041 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS | WQ_PERCPU, 0);
3042 if (!ib_comp_wq)
3043 goto err_unbound;
3044
3045 ib_comp_unbound_wq =
3046 alloc_workqueue("ib-comp-unb-wq",
3047 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
3048 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
3049 if (!ib_comp_unbound_wq)
3050 goto err_comp;
3051
3052 ret = class_register(&ib_class);
3053 if (ret) {
3054 pr_warn("Couldn't create InfiniBand device class\n");
3055 goto err_comp_unbound;
3056 }
3057
3058 rdma_nl_init();
3059
3060 ret = addr_init();
3061 if (ret) {
3062 pr_warn("Couldn't init IB address resolution\n");
3063 goto err_ibnl;
3064 }
3065
3066 ret = ib_mad_init();
3067 if (ret) {
3068 pr_warn("Couldn't init IB MAD\n");
3069 goto err_addr;
3070 }
3071
3072 ret = ib_sa_init();
3073 if (ret) {
3074 pr_warn("Couldn't init SA\n");
3075 goto err_mad;
3076 }
3077
3078 ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
3079 if (ret) {
3080 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
3081 goto err_sa;
3082 }
3083
3084 ret = register_pernet_device(&rdma_dev_net_ops);
3085 if (ret) {
3086 pr_warn("Couldn't init compat dev. ret %d\n", ret);
3087 goto err_compat;
3088 }
3089
3090 nldev_init();
3091 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
3092 ret = roce_gid_mgmt_init();
3093 if (ret) {
3094 pr_warn("Couldn't init RoCE GID management\n");
3095 goto err_parent;
3096 }
3097
3098 register_netdevice_notifier(&nb_netdevice);
3099
3100 return 0;
3101
3102 err_parent:
3103 rdma_nl_unregister(RDMA_NL_LS);
3104 nldev_exit();
3105 unregister_pernet_device(&rdma_dev_net_ops);
3106 err_compat:
3107 unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
3108 err_sa:
3109 ib_sa_cleanup();
3110 err_mad:
3111 ib_mad_cleanup();
3112 err_addr:
3113 addr_cleanup();
3114 err_ibnl:
3115 class_unregister(&ib_class);
3116 err_comp_unbound:
3117 destroy_workqueue(ib_comp_unbound_wq);
3118 err_comp:
3119 destroy_workqueue(ib_comp_wq);
3120 err_unbound:
3121 destroy_workqueue(ib_unreg_wq);
3122 err:
3123 destroy_workqueue(ib_wq);
3124 return ret;
3125 }
3126
ib_core_cleanup(void)3127 static void __exit ib_core_cleanup(void)
3128 {
3129 unregister_netdevice_notifier(&nb_netdevice);
3130 roce_gid_mgmt_cleanup();
3131 rdma_nl_unregister(RDMA_NL_LS);
3132 nldev_exit();
3133 unregister_pernet_device(&rdma_dev_net_ops);
3134 unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
3135 ib_sa_cleanup();
3136 ib_mad_cleanup();
3137 addr_cleanup();
3138 rdma_nl_exit();
3139 class_unregister(&ib_class);
3140 destroy_workqueue(ib_comp_unbound_wq);
3141 destroy_workqueue(ib_comp_wq);
3142 /* Make sure that any pending umem accounting work is done. */
3143 destroy_workqueue(ib_wq);
3144 destroy_workqueue(ib_unreg_wq);
3145 WARN_ON(!xa_empty(&clients));
3146 WARN_ON(!xa_empty(&devices));
3147 }
3148
3149 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
3150
3151 /* ib core relies on netdev stack to first register net_ns_type_operations
3152 * ns kobject type before ib_core initialization.
3153 */
3154 fs_initcall(ib_core_init);
3155 module_exit(ib_core_cleanup);
3156