1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <net/net_namespace.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <linux/hashtable.h>
45 #include <rdma/rdma_netlink.h>
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_counter.h>
49 
50 #include "core_priv.h"
51 #include "restrack.h"
52 
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("core kernel InfiniBand API");
55 MODULE_LICENSE("Dual BSD/GPL");
56 
57 struct workqueue_struct *ib_comp_wq;
58 struct workqueue_struct *ib_comp_unbound_wq;
59 struct workqueue_struct *ib_wq;
60 EXPORT_SYMBOL_GPL(ib_wq);
61 static struct workqueue_struct *ib_unreg_wq;
62 
63 /*
64  * Each of the three rwsem locks (devices, clients, client_data) protects the
65  * xarray of the same name. Specifically it allows the caller to assert that
66  * the MARK will/will not be changing under the lock, and for devices and
67  * clients, that the value in the xarray is still a valid pointer. Change of
68  * the MARK is linked to the object state, so holding the lock and testing the
69  * MARK also asserts that the contained object is in a certain state.
70  *
71  * This is used to build a two stage register/unregister flow where objects
72  * can continue to be in the xarray even though they are still in progress to
73  * register/unregister.
74  *
75  * The xarray itself provides additional locking, and restartable iteration,
76  * which is also relied on.
77  *
78  * Locks should not be nested, with the exception of client_data, which is
79  * allowed to nest under the read side of the other two locks.
80  *
81  * The devices_rwsem also protects the device name list, any change or
82  * assignment of device name must also hold the write side to guarantee unique
83  * names.
84  */
85 
86 /*
87  * devices contains devices that have had their names assigned. The
88  * devices may not be registered. Users that care about the registration
89  * status need to call ib_device_try_get() on the device to ensure it is
90  * registered, and keep it registered, for the required duration.
91  *
92  */
93 static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
94 static DECLARE_RWSEM(devices_rwsem);
95 #define DEVICE_REGISTERED XA_MARK_1
96 
97 static u32 highest_client_id;
98 #define CLIENT_REGISTERED XA_MARK_1
99 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
100 static DECLARE_RWSEM(clients_rwsem);
101 
102 static void ib_client_put(struct ib_client *client)
103 {
104 	if (refcount_dec_and_test(&client->uses))
105 		complete(&client->uses_zero);
106 }
107 
108 /*
109  * If client_data is registered then the corresponding client must also still
110  * be registered.
111  */
112 #define CLIENT_DATA_REGISTERED XA_MARK_1
113 
114 unsigned int rdma_dev_net_id;
115 
116 /*
117  * A list of net namespaces is maintained in an xarray. This is necessary
118  * because we can't get the locking right using the existing net ns list. We
119  * would require a init_net callback after the list is updated.
120  */
121 static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
122 /*
123  * rwsem to protect accessing the rdma_nets xarray entries.
124  */
125 static DECLARE_RWSEM(rdma_nets_rwsem);
126 
127 bool ib_devices_shared_netns = true;
128 module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
129 MODULE_PARM_DESC(netns_mode,
130 		 "Share device among net namespaces; default=1 (shared)");
131 /**
132  * rdma_dev_access_netns() - Return whether an rdma device can be accessed
133  *			     from a specified net namespace or not.
134  * @dev:	Pointer to rdma device which needs to be checked
135  * @net:	Pointer to net namesapce for which access to be checked
136  *
137  * When the rdma device is in shared mode, it ignores the net namespace.
138  * When the rdma device is exclusive to a net namespace, rdma device net
139  * namespace is checked against the specified one.
140  */
141 bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
142 {
143 	return (ib_devices_shared_netns ||
144 		net_eq(read_pnet(&dev->coredev.rdma_net), net));
145 }
146 EXPORT_SYMBOL(rdma_dev_access_netns);
147 
148 /*
149  * xarray has this behavior where it won't iterate over NULL values stored in
150  * allocated arrays.  So we need our own iterator to see all values stored in
151  * the array. This does the same thing as xa_for_each except that it also
152  * returns NULL valued entries if the array is allocating. Simplified to only
153  * work on simple xarrays.
154  */
155 static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
156 			     xa_mark_t filter)
157 {
158 	XA_STATE(xas, xa, *indexp);
159 	void *entry;
160 
161 	rcu_read_lock();
162 	do {
163 		entry = xas_find_marked(&xas, ULONG_MAX, filter);
164 		if (xa_is_zero(entry))
165 			break;
166 	} while (xas_retry(&xas, entry));
167 	rcu_read_unlock();
168 
169 	if (entry) {
170 		*indexp = xas.xa_index;
171 		if (xa_is_zero(entry))
172 			return NULL;
173 		return entry;
174 	}
175 	return XA_ERROR(-ENOENT);
176 }
177 #define xan_for_each_marked(xa, index, entry, filter)                          \
178 	for (index = 0, entry = xan_find_marked(xa, &(index), filter);         \
179 	     !xa_is_err(entry);                                                \
180 	     (index)++, entry = xan_find_marked(xa, &(index), filter))
181 
182 /* RCU hash table mapping netdevice pointers to struct ib_port_data */
183 static DEFINE_SPINLOCK(ndev_hash_lock);
184 static DECLARE_HASHTABLE(ndev_hash, 5);
185 
186 static void free_netdevs(struct ib_device *ib_dev);
187 static void ib_unregister_work(struct work_struct *work);
188 static void __ib_unregister_device(struct ib_device *device);
189 static int ib_security_change(struct notifier_block *nb, unsigned long event,
190 			      void *lsm_data);
191 static void ib_policy_change_task(struct work_struct *work);
192 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
193 
194 static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
195 			   struct va_format *vaf)
196 {
197 	if (ibdev && ibdev->dev.parent)
198 		dev_printk_emit(level[1] - '0',
199 				ibdev->dev.parent,
200 				"%s %s %s: %pV",
201 				dev_driver_string(ibdev->dev.parent),
202 				dev_name(ibdev->dev.parent),
203 				dev_name(&ibdev->dev),
204 				vaf);
205 	else if (ibdev)
206 		printk("%s%s: %pV",
207 		       level, dev_name(&ibdev->dev), vaf);
208 	else
209 		printk("%s(NULL ib_device): %pV", level, vaf);
210 }
211 
212 #define define_ibdev_printk_level(func, level)                  \
213 void func(const struct ib_device *ibdev, const char *fmt, ...)  \
214 {                                                               \
215 	struct va_format vaf;                                   \
216 	va_list args;                                           \
217 								\
218 	va_start(args, fmt);                                    \
219 								\
220 	vaf.fmt = fmt;                                          \
221 	vaf.va = &args;                                         \
222 								\
223 	__ibdev_printk(level, ibdev, &vaf);                     \
224 								\
225 	va_end(args);                                           \
226 }                                                               \
227 EXPORT_SYMBOL(func);
228 
229 define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
230 define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
231 define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
232 define_ibdev_printk_level(ibdev_err, KERN_ERR);
233 define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
234 define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
235 define_ibdev_printk_level(ibdev_info, KERN_INFO);
236 
237 static struct notifier_block ibdev_lsm_nb = {
238 	.notifier_call = ib_security_change,
239 };
240 
241 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
242 				 struct net *net);
243 
244 /* Pointer to the RCU head at the start of the ib_port_data array */
245 struct ib_port_data_rcu {
246 	struct rcu_head rcu_head;
247 	struct ib_port_data pdata[];
248 };
249 
250 static void ib_device_check_mandatory(struct ib_device *device)
251 {
252 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
253 	static const struct {
254 		size_t offset;
255 		char  *name;
256 	} mandatory_table[] = {
257 		IB_MANDATORY_FUNC(query_device),
258 		IB_MANDATORY_FUNC(query_port),
259 		IB_MANDATORY_FUNC(alloc_pd),
260 		IB_MANDATORY_FUNC(dealloc_pd),
261 		IB_MANDATORY_FUNC(create_qp),
262 		IB_MANDATORY_FUNC(modify_qp),
263 		IB_MANDATORY_FUNC(destroy_qp),
264 		IB_MANDATORY_FUNC(post_send),
265 		IB_MANDATORY_FUNC(post_recv),
266 		IB_MANDATORY_FUNC(create_cq),
267 		IB_MANDATORY_FUNC(destroy_cq),
268 		IB_MANDATORY_FUNC(poll_cq),
269 		IB_MANDATORY_FUNC(req_notify_cq),
270 		IB_MANDATORY_FUNC(get_dma_mr),
271 		IB_MANDATORY_FUNC(reg_user_mr),
272 		IB_MANDATORY_FUNC(dereg_mr),
273 		IB_MANDATORY_FUNC(get_port_immutable)
274 	};
275 	int i;
276 
277 	device->kverbs_provider = true;
278 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
279 		if (!*(void **) ((void *) &device->ops +
280 				 mandatory_table[i].offset)) {
281 			device->kverbs_provider = false;
282 			break;
283 		}
284 	}
285 }
286 
287 /*
288  * Caller must perform ib_device_put() to return the device reference count
289  * when ib_device_get_by_index() returns valid device pointer.
290  */
291 struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
292 {
293 	struct ib_device *device;
294 
295 	down_read(&devices_rwsem);
296 	device = xa_load(&devices, index);
297 	if (device) {
298 		if (!rdma_dev_access_netns(device, net)) {
299 			device = NULL;
300 			goto out;
301 		}
302 
303 		if (!ib_device_try_get(device))
304 			device = NULL;
305 	}
306 out:
307 	up_read(&devices_rwsem);
308 	return device;
309 }
310 
311 /**
312  * ib_device_put - Release IB device reference
313  * @device: device whose reference to be released
314  *
315  * ib_device_put() releases reference to the IB device to allow it to be
316  * unregistered and eventually free.
317  */
318 void ib_device_put(struct ib_device *device)
319 {
320 	if (refcount_dec_and_test(&device->refcount))
321 		complete(&device->unreg_completion);
322 }
323 EXPORT_SYMBOL(ib_device_put);
324 
325 static struct ib_device *__ib_device_get_by_name(const char *name)
326 {
327 	struct ib_device *device;
328 	unsigned long index;
329 
330 	xa_for_each (&devices, index, device)
331 		if (!strcmp(name, dev_name(&device->dev)))
332 			return device;
333 
334 	return NULL;
335 }
336 
337 /**
338  * ib_device_get_by_name - Find an IB device by name
339  * @name: The name to look for
340  * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
341  *
342  * Find and hold an ib_device by its name. The caller must call
343  * ib_device_put() on the returned pointer.
344  */
345 struct ib_device *ib_device_get_by_name(const char *name,
346 					enum rdma_driver_id driver_id)
347 {
348 	struct ib_device *device;
349 
350 	down_read(&devices_rwsem);
351 	device = __ib_device_get_by_name(name);
352 	if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
353 	    device->ops.driver_id != driver_id)
354 		device = NULL;
355 
356 	if (device) {
357 		if (!ib_device_try_get(device))
358 			device = NULL;
359 	}
360 	up_read(&devices_rwsem);
361 	return device;
362 }
363 EXPORT_SYMBOL(ib_device_get_by_name);
364 
365 static int rename_compat_devs(struct ib_device *device)
366 {
367 	struct ib_core_device *cdev;
368 	unsigned long index;
369 	int ret = 0;
370 
371 	mutex_lock(&device->compat_devs_mutex);
372 	xa_for_each (&device->compat_devs, index, cdev) {
373 		ret = device_rename(&cdev->dev, dev_name(&device->dev));
374 		if (ret) {
375 			dev_warn(&cdev->dev,
376 				 "Fail to rename compatdev to new name %s\n",
377 				 dev_name(&device->dev));
378 			break;
379 		}
380 	}
381 	mutex_unlock(&device->compat_devs_mutex);
382 	return ret;
383 }
384 
385 int ib_device_rename(struct ib_device *ibdev, const char *name)
386 {
387 	unsigned long index;
388 	void *client_data;
389 	int ret;
390 
391 	down_write(&devices_rwsem);
392 	if (!strcmp(name, dev_name(&ibdev->dev))) {
393 		up_write(&devices_rwsem);
394 		return 0;
395 	}
396 
397 	if (__ib_device_get_by_name(name)) {
398 		up_write(&devices_rwsem);
399 		return -EEXIST;
400 	}
401 
402 	ret = device_rename(&ibdev->dev, name);
403 	if (ret) {
404 		up_write(&devices_rwsem);
405 		return ret;
406 	}
407 
408 	strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
409 	ret = rename_compat_devs(ibdev);
410 
411 	downgrade_write(&devices_rwsem);
412 	down_read(&ibdev->client_data_rwsem);
413 	xan_for_each_marked(&ibdev->client_data, index, client_data,
414 			    CLIENT_DATA_REGISTERED) {
415 		struct ib_client *client = xa_load(&clients, index);
416 
417 		if (!client || !client->rename)
418 			continue;
419 
420 		client->rename(ibdev, client_data);
421 	}
422 	up_read(&ibdev->client_data_rwsem);
423 	rdma_nl_notify_event(ibdev, 0, RDMA_RENAME_EVENT);
424 	up_read(&devices_rwsem);
425 	return 0;
426 }
427 
428 int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
429 {
430 	if (use_dim > 1)
431 		return -EINVAL;
432 	ibdev->use_cq_dim = use_dim;
433 
434 	return 0;
435 }
436 
437 static int alloc_name(struct ib_device *ibdev, const char *name)
438 {
439 	struct ib_device *device;
440 	unsigned long index;
441 	struct ida inuse;
442 	int rc;
443 	int i;
444 
445 	lockdep_assert_held_write(&devices_rwsem);
446 	ida_init(&inuse);
447 	xa_for_each (&devices, index, device) {
448 		char buf[IB_DEVICE_NAME_MAX];
449 
450 		if (sscanf(dev_name(&device->dev), name, &i) != 1)
451 			continue;
452 		if (i < 0 || i >= INT_MAX)
453 			continue;
454 		snprintf(buf, sizeof buf, name, i);
455 		if (strcmp(buf, dev_name(&device->dev)) != 0)
456 			continue;
457 
458 		rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
459 		if (rc < 0)
460 			goto out;
461 	}
462 
463 	rc = ida_alloc(&inuse, GFP_KERNEL);
464 	if (rc < 0)
465 		goto out;
466 
467 	rc = dev_set_name(&ibdev->dev, name, rc);
468 out:
469 	ida_destroy(&inuse);
470 	return rc;
471 }
472 
473 static void ib_device_release(struct device *device)
474 {
475 	struct ib_device *dev = container_of(device, struct ib_device, dev);
476 
477 	free_netdevs(dev);
478 	WARN_ON(refcount_read(&dev->refcount));
479 	if (dev->hw_stats_data)
480 		ib_device_release_hw_stats(dev->hw_stats_data);
481 	if (dev->port_data) {
482 		ib_cache_release_one(dev);
483 		ib_security_release_port_pkey_list(dev);
484 		rdma_counter_release(dev);
485 		kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
486 				       pdata[0]),
487 			  rcu_head);
488 	}
489 
490 	mutex_destroy(&dev->subdev_lock);
491 	mutex_destroy(&dev->unregistration_lock);
492 	mutex_destroy(&dev->compat_devs_mutex);
493 
494 	xa_destroy(&dev->compat_devs);
495 	xa_destroy(&dev->client_data);
496 	kfree_rcu(dev, rcu_head);
497 }
498 
499 static int ib_device_uevent(const struct device *device,
500 			    struct kobj_uevent_env *env)
501 {
502 	if (add_uevent_var(env, "NAME=%s", dev_name(device)))
503 		return -ENOMEM;
504 
505 	/*
506 	 * It would be nice to pass the node GUID with the event...
507 	 */
508 
509 	return 0;
510 }
511 
512 static const void *net_namespace(const struct device *d)
513 {
514 	const struct ib_core_device *coredev =
515 			container_of(d, struct ib_core_device, dev);
516 
517 	return read_pnet(&coredev->rdma_net);
518 }
519 
520 static struct class ib_class = {
521 	.name    = "infiniband",
522 	.dev_release = ib_device_release,
523 	.dev_uevent = ib_device_uevent,
524 	.ns_type = &net_ns_type_operations,
525 	.namespace = net_namespace,
526 };
527 
528 static void rdma_init_coredev(struct ib_core_device *coredev,
529 			      struct ib_device *dev, struct net *net)
530 {
531 	bool is_full_dev = &dev->coredev == coredev;
532 
533 	/* This BUILD_BUG_ON is intended to catch layout change
534 	 * of union of ib_core_device and device.
535 	 * dev must be the first element as ib_core and providers
536 	 * driver uses it. Adding anything in ib_core_device before
537 	 * device will break this assumption.
538 	 */
539 	BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
540 		     offsetof(struct ib_device, dev));
541 
542 	coredev->dev.class = &ib_class;
543 	coredev->dev.groups = dev->groups;
544 
545 	/*
546 	 * Don't expose hw counters outside of the init namespace.
547 	 */
548 	if (!is_full_dev && dev->hw_stats_attr_index)
549 		coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
550 
551 	device_initialize(&coredev->dev);
552 	coredev->owner = dev;
553 	INIT_LIST_HEAD(&coredev->port_list);
554 	write_pnet(&coredev->rdma_net, net);
555 }
556 
557 /**
558  * _ib_alloc_device - allocate an IB device struct
559  * @size:size of structure to allocate
560  *
561  * Low-level drivers should use ib_alloc_device() to allocate &struct
562  * ib_device.  @size is the size of the structure to be allocated,
563  * including any private data used by the low-level driver.
564  * ib_dealloc_device() must be used to free structures allocated with
565  * ib_alloc_device().
566  */
567 struct ib_device *_ib_alloc_device(size_t size)
568 {
569 	struct ib_device *device;
570 	unsigned int i;
571 
572 	if (WARN_ON(size < sizeof(struct ib_device)))
573 		return NULL;
574 
575 	device = kzalloc(size, GFP_KERNEL);
576 	if (!device)
577 		return NULL;
578 
579 	if (rdma_restrack_init(device)) {
580 		kfree(device);
581 		return NULL;
582 	}
583 
584 	rdma_init_coredev(&device->coredev, device, &init_net);
585 
586 	INIT_LIST_HEAD(&device->event_handler_list);
587 	spin_lock_init(&device->qp_open_list_lock);
588 	init_rwsem(&device->event_handler_rwsem);
589 	mutex_init(&device->unregistration_lock);
590 	/*
591 	 * client_data needs to be alloc because we don't want our mark to be
592 	 * destroyed if the user stores NULL in the client data.
593 	 */
594 	xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
595 	init_rwsem(&device->client_data_rwsem);
596 	xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
597 	mutex_init(&device->compat_devs_mutex);
598 	init_completion(&device->unreg_completion);
599 	INIT_WORK(&device->unregistration_work, ib_unregister_work);
600 
601 	spin_lock_init(&device->cq_pools_lock);
602 	for (i = 0; i < ARRAY_SIZE(device->cq_pools); i++)
603 		INIT_LIST_HEAD(&device->cq_pools[i]);
604 
605 	rwlock_init(&device->cache_lock);
606 
607 	device->uverbs_cmd_mask =
608 		BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
609 		BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD) |
610 		BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST) |
611 		BIT_ULL(IB_USER_VERBS_CMD_CLOSE_XRCD) |
612 		BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH) |
613 		BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
614 		BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ) |
615 		BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP) |
616 		BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ) |
617 		BIT_ULL(IB_USER_VERBS_CMD_CREATE_XSRQ) |
618 		BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
619 		BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD) |
620 		BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR) |
621 		BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH) |
622 		BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ) |
623 		BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
624 		BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ) |
625 		BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST) |
626 		BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) |
627 		BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP) |
628 		BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ) |
629 		BIT_ULL(IB_USER_VERBS_CMD_OPEN_QP) |
630 		BIT_ULL(IB_USER_VERBS_CMD_OPEN_XRCD) |
631 		BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE) |
632 		BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT) |
633 		BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
634 		BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ) |
635 		BIT_ULL(IB_USER_VERBS_CMD_REG_MR) |
636 		BIT_ULL(IB_USER_VERBS_CMD_REREG_MR) |
637 		BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ);
638 
639 	mutex_init(&device->subdev_lock);
640 	INIT_LIST_HEAD(&device->subdev_list_head);
641 	INIT_LIST_HEAD(&device->subdev_list);
642 
643 	return device;
644 }
645 EXPORT_SYMBOL(_ib_alloc_device);
646 
647 /**
648  * ib_dealloc_device - free an IB device struct
649  * @device:structure to free
650  *
651  * Free a structure allocated with ib_alloc_device().
652  */
653 void ib_dealloc_device(struct ib_device *device)
654 {
655 	if (device->ops.dealloc_driver)
656 		device->ops.dealloc_driver(device);
657 
658 	/*
659 	 * ib_unregister_driver() requires all devices to remain in the xarray
660 	 * while their ops are callable. The last op we call is dealloc_driver
661 	 * above.  This is needed to create a fence on op callbacks prior to
662 	 * allowing the driver module to unload.
663 	 */
664 	down_write(&devices_rwsem);
665 	if (xa_load(&devices, device->index) == device)
666 		xa_erase(&devices, device->index);
667 	up_write(&devices_rwsem);
668 
669 	/* Expedite releasing netdev references */
670 	free_netdevs(device);
671 
672 	WARN_ON(!xa_empty(&device->compat_devs));
673 	WARN_ON(!xa_empty(&device->client_data));
674 	WARN_ON(refcount_read(&device->refcount));
675 	rdma_restrack_clean(device);
676 	/* Balances with device_initialize */
677 	put_device(&device->dev);
678 }
679 EXPORT_SYMBOL(ib_dealloc_device);
680 
681 /*
682  * add_client_context() and remove_client_context() must be safe against
683  * parallel calls on the same device - registration/unregistration of both the
684  * device and client can be occurring in parallel.
685  *
686  * The routines need to be a fence, any caller must not return until the add
687  * or remove is fully completed.
688  */
689 static int add_client_context(struct ib_device *device,
690 			      struct ib_client *client)
691 {
692 	int ret = 0;
693 
694 	if (!device->kverbs_provider && !client->no_kverbs_req)
695 		return 0;
696 
697 	down_write(&device->client_data_rwsem);
698 	/*
699 	 * So long as the client is registered hold both the client and device
700 	 * unregistration locks.
701 	 */
702 	if (!refcount_inc_not_zero(&client->uses))
703 		goto out_unlock;
704 	refcount_inc(&device->refcount);
705 
706 	/*
707 	 * Another caller to add_client_context got here first and has already
708 	 * completely initialized context.
709 	 */
710 	if (xa_get_mark(&device->client_data, client->client_id,
711 		    CLIENT_DATA_REGISTERED))
712 		goto out;
713 
714 	ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
715 			      GFP_KERNEL));
716 	if (ret)
717 		goto out;
718 	downgrade_write(&device->client_data_rwsem);
719 	if (client->add) {
720 		if (client->add(device)) {
721 			/*
722 			 * If a client fails to add then the error code is
723 			 * ignored, but we won't call any more ops on this
724 			 * client.
725 			 */
726 			xa_erase(&device->client_data, client->client_id);
727 			up_read(&device->client_data_rwsem);
728 			ib_device_put(device);
729 			ib_client_put(client);
730 			return 0;
731 		}
732 	}
733 
734 	/* Readers shall not see a client until add has been completed */
735 	xa_set_mark(&device->client_data, client->client_id,
736 		    CLIENT_DATA_REGISTERED);
737 	up_read(&device->client_data_rwsem);
738 	return 0;
739 
740 out:
741 	ib_device_put(device);
742 	ib_client_put(client);
743 out_unlock:
744 	up_write(&device->client_data_rwsem);
745 	return ret;
746 }
747 
748 static void remove_client_context(struct ib_device *device,
749 				  unsigned int client_id)
750 {
751 	struct ib_client *client;
752 	void *client_data;
753 
754 	down_write(&device->client_data_rwsem);
755 	if (!xa_get_mark(&device->client_data, client_id,
756 			 CLIENT_DATA_REGISTERED)) {
757 		up_write(&device->client_data_rwsem);
758 		return;
759 	}
760 	client_data = xa_load(&device->client_data, client_id);
761 	xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
762 	client = xa_load(&clients, client_id);
763 	up_write(&device->client_data_rwsem);
764 
765 	/*
766 	 * Notice we cannot be holding any exclusive locks when calling the
767 	 * remove callback as the remove callback can recurse back into any
768 	 * public functions in this module and thus try for any locks those
769 	 * functions take.
770 	 *
771 	 * For this reason clients and drivers should not call the
772 	 * unregistration functions will holdling any locks.
773 	 */
774 	if (client->remove)
775 		client->remove(device, client_data);
776 
777 	xa_erase(&device->client_data, client_id);
778 	ib_device_put(device);
779 	ib_client_put(client);
780 }
781 
782 static int alloc_port_data(struct ib_device *device)
783 {
784 	struct ib_port_data_rcu *pdata_rcu;
785 	u32 port;
786 
787 	if (device->port_data)
788 		return 0;
789 
790 	/* This can only be called once the physical port range is defined */
791 	if (WARN_ON(!device->phys_port_cnt))
792 		return -EINVAL;
793 
794 	/* Reserve U32_MAX so the logic to go over all the ports is sane */
795 	if (WARN_ON(device->phys_port_cnt == U32_MAX))
796 		return -EINVAL;
797 
798 	/*
799 	 * device->port_data is indexed directly by the port number to make
800 	 * access to this data as efficient as possible.
801 	 *
802 	 * Therefore port_data is declared as a 1 based array with potential
803 	 * empty slots at the beginning.
804 	 */
805 	pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
806 					size_add(rdma_end_port(device), 1)),
807 			    GFP_KERNEL);
808 	if (!pdata_rcu)
809 		return -ENOMEM;
810 	/*
811 	 * The rcu_head is put in front of the port data array and the stored
812 	 * pointer is adjusted since we never need to see that member until
813 	 * kfree_rcu.
814 	 */
815 	device->port_data = pdata_rcu->pdata;
816 
817 	rdma_for_each_port (device, port) {
818 		struct ib_port_data *pdata = &device->port_data[port];
819 
820 		pdata->ib_dev = device;
821 		spin_lock_init(&pdata->pkey_list_lock);
822 		INIT_LIST_HEAD(&pdata->pkey_list);
823 		spin_lock_init(&pdata->netdev_lock);
824 		INIT_HLIST_NODE(&pdata->ndev_hash_link);
825 	}
826 	return 0;
827 }
828 
829 static int verify_immutable(const struct ib_device *dev, u32 port)
830 {
831 	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
832 			    rdma_max_mad_size(dev, port) != 0);
833 }
834 
835 static int setup_port_data(struct ib_device *device)
836 {
837 	u32 port;
838 	int ret;
839 
840 	ret = alloc_port_data(device);
841 	if (ret)
842 		return ret;
843 
844 	rdma_for_each_port (device, port) {
845 		struct ib_port_data *pdata = &device->port_data[port];
846 
847 		ret = device->ops.get_port_immutable(device, port,
848 						     &pdata->immutable);
849 		if (ret)
850 			return ret;
851 
852 		if (verify_immutable(device, port))
853 			return -EINVAL;
854 	}
855 	return 0;
856 }
857 
858 /**
859  * ib_port_immutable_read() - Read rdma port's immutable data
860  * @dev: IB device
861  * @port: port number whose immutable data to read. It starts with index 1 and
862  *        valid upto including rdma_end_port().
863  */
864 const struct ib_port_immutable*
865 ib_port_immutable_read(struct ib_device *dev, unsigned int port)
866 {
867 	WARN_ON(!rdma_is_port_valid(dev, port));
868 	return &dev->port_data[port].immutable;
869 }
870 EXPORT_SYMBOL(ib_port_immutable_read);
871 
872 void ib_get_device_fw_str(struct ib_device *dev, char *str)
873 {
874 	if (dev->ops.get_dev_fw_str)
875 		dev->ops.get_dev_fw_str(dev, str);
876 	else
877 		str[0] = '\0';
878 }
879 EXPORT_SYMBOL(ib_get_device_fw_str);
880 
881 static void ib_policy_change_task(struct work_struct *work)
882 {
883 	struct ib_device *dev;
884 	unsigned long index;
885 
886 	down_read(&devices_rwsem);
887 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
888 		unsigned int i;
889 
890 		rdma_for_each_port (dev, i) {
891 			u64 sp;
892 			ib_get_cached_subnet_prefix(dev, i, &sp);
893 			ib_security_cache_change(dev, i, sp);
894 		}
895 	}
896 	up_read(&devices_rwsem);
897 }
898 
899 static int ib_security_change(struct notifier_block *nb, unsigned long event,
900 			      void *lsm_data)
901 {
902 	if (event != LSM_POLICY_CHANGE)
903 		return NOTIFY_DONE;
904 
905 	schedule_work(&ib_policy_change_work);
906 	ib_mad_agent_security_change();
907 
908 	return NOTIFY_OK;
909 }
910 
911 static void compatdev_release(struct device *dev)
912 {
913 	struct ib_core_device *cdev =
914 		container_of(dev, struct ib_core_device, dev);
915 
916 	kfree(cdev);
917 }
918 
919 static int add_one_compat_dev(struct ib_device *device,
920 			      struct rdma_dev_net *rnet)
921 {
922 	struct ib_core_device *cdev;
923 	int ret;
924 
925 	lockdep_assert_held(&rdma_nets_rwsem);
926 	if (!ib_devices_shared_netns)
927 		return 0;
928 
929 	/*
930 	 * Create and add compat device in all namespaces other than where it
931 	 * is currently bound to.
932 	 */
933 	if (net_eq(read_pnet(&rnet->net),
934 		   read_pnet(&device->coredev.rdma_net)))
935 		return 0;
936 
937 	/*
938 	 * The first of init_net() or ib_register_device() to take the
939 	 * compat_devs_mutex wins and gets to add the device. Others will wait
940 	 * for completion here.
941 	 */
942 	mutex_lock(&device->compat_devs_mutex);
943 	cdev = xa_load(&device->compat_devs, rnet->id);
944 	if (cdev) {
945 		ret = 0;
946 		goto done;
947 	}
948 	ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
949 	if (ret)
950 		goto done;
951 
952 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
953 	if (!cdev) {
954 		ret = -ENOMEM;
955 		goto cdev_err;
956 	}
957 
958 	cdev->dev.parent = device->dev.parent;
959 	rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
960 	cdev->dev.release = compatdev_release;
961 	ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
962 	if (ret)
963 		goto add_err;
964 
965 	ret = device_add(&cdev->dev);
966 	if (ret)
967 		goto add_err;
968 	ret = ib_setup_port_attrs(cdev);
969 	if (ret)
970 		goto port_err;
971 
972 	ret = xa_err(xa_store(&device->compat_devs, rnet->id,
973 			      cdev, GFP_KERNEL));
974 	if (ret)
975 		goto insert_err;
976 
977 	mutex_unlock(&device->compat_devs_mutex);
978 	return 0;
979 
980 insert_err:
981 	ib_free_port_attrs(cdev);
982 port_err:
983 	device_del(&cdev->dev);
984 add_err:
985 	put_device(&cdev->dev);
986 cdev_err:
987 	xa_release(&device->compat_devs, rnet->id);
988 done:
989 	mutex_unlock(&device->compat_devs_mutex);
990 	return ret;
991 }
992 
993 static void remove_one_compat_dev(struct ib_device *device, u32 id)
994 {
995 	struct ib_core_device *cdev;
996 
997 	mutex_lock(&device->compat_devs_mutex);
998 	cdev = xa_erase(&device->compat_devs, id);
999 	mutex_unlock(&device->compat_devs_mutex);
1000 	if (cdev) {
1001 		ib_free_port_attrs(cdev);
1002 		device_del(&cdev->dev);
1003 		put_device(&cdev->dev);
1004 	}
1005 }
1006 
1007 static void remove_compat_devs(struct ib_device *device)
1008 {
1009 	struct ib_core_device *cdev;
1010 	unsigned long index;
1011 
1012 	xa_for_each (&device->compat_devs, index, cdev)
1013 		remove_one_compat_dev(device, index);
1014 }
1015 
1016 static int add_compat_devs(struct ib_device *device)
1017 {
1018 	struct rdma_dev_net *rnet;
1019 	unsigned long index;
1020 	int ret = 0;
1021 
1022 	lockdep_assert_held(&devices_rwsem);
1023 
1024 	down_read(&rdma_nets_rwsem);
1025 	xa_for_each (&rdma_nets, index, rnet) {
1026 		ret = add_one_compat_dev(device, rnet);
1027 		if (ret)
1028 			break;
1029 	}
1030 	up_read(&rdma_nets_rwsem);
1031 	return ret;
1032 }
1033 
1034 static void remove_all_compat_devs(void)
1035 {
1036 	struct ib_compat_device *cdev;
1037 	struct ib_device *dev;
1038 	unsigned long index;
1039 
1040 	down_read(&devices_rwsem);
1041 	xa_for_each (&devices, index, dev) {
1042 		unsigned long c_index = 0;
1043 
1044 		/* Hold nets_rwsem so that any other thread modifying this
1045 		 * system param can sync with this thread.
1046 		 */
1047 		down_read(&rdma_nets_rwsem);
1048 		xa_for_each (&dev->compat_devs, c_index, cdev)
1049 			remove_one_compat_dev(dev, c_index);
1050 		up_read(&rdma_nets_rwsem);
1051 	}
1052 	up_read(&devices_rwsem);
1053 }
1054 
1055 static int add_all_compat_devs(void)
1056 {
1057 	struct rdma_dev_net *rnet;
1058 	struct ib_device *dev;
1059 	unsigned long index;
1060 	int ret = 0;
1061 
1062 	down_read(&devices_rwsem);
1063 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1064 		unsigned long net_index = 0;
1065 
1066 		/* Hold nets_rwsem so that any other thread modifying this
1067 		 * system param can sync with this thread.
1068 		 */
1069 		down_read(&rdma_nets_rwsem);
1070 		xa_for_each (&rdma_nets, net_index, rnet) {
1071 			ret = add_one_compat_dev(dev, rnet);
1072 			if (ret)
1073 				break;
1074 		}
1075 		up_read(&rdma_nets_rwsem);
1076 	}
1077 	up_read(&devices_rwsem);
1078 	if (ret)
1079 		remove_all_compat_devs();
1080 	return ret;
1081 }
1082 
1083 int rdma_compatdev_set(u8 enable)
1084 {
1085 	struct rdma_dev_net *rnet;
1086 	unsigned long index;
1087 	int ret = 0;
1088 
1089 	down_write(&rdma_nets_rwsem);
1090 	if (ib_devices_shared_netns == enable) {
1091 		up_write(&rdma_nets_rwsem);
1092 		return 0;
1093 	}
1094 
1095 	/* enable/disable of compat devices is not supported
1096 	 * when more than default init_net exists.
1097 	 */
1098 	xa_for_each (&rdma_nets, index, rnet) {
1099 		ret++;
1100 		break;
1101 	}
1102 	if (!ret)
1103 		ib_devices_shared_netns = enable;
1104 	up_write(&rdma_nets_rwsem);
1105 	if (ret)
1106 		return -EBUSY;
1107 
1108 	if (enable)
1109 		ret = add_all_compat_devs();
1110 	else
1111 		remove_all_compat_devs();
1112 	return ret;
1113 }
1114 
1115 static void rdma_dev_exit_net(struct net *net)
1116 {
1117 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1118 	struct ib_device *dev;
1119 	unsigned long index;
1120 	int ret;
1121 
1122 	down_write(&rdma_nets_rwsem);
1123 	/*
1124 	 * Prevent the ID from being re-used and hide the id from xa_for_each.
1125 	 */
1126 	ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
1127 	WARN_ON(ret);
1128 	up_write(&rdma_nets_rwsem);
1129 
1130 	down_read(&devices_rwsem);
1131 	xa_for_each (&devices, index, dev) {
1132 		get_device(&dev->dev);
1133 		/*
1134 		 * Release the devices_rwsem so that pontentially blocking
1135 		 * device_del, doesn't hold the devices_rwsem for too long.
1136 		 */
1137 		up_read(&devices_rwsem);
1138 
1139 		remove_one_compat_dev(dev, rnet->id);
1140 
1141 		/*
1142 		 * If the real device is in the NS then move it back to init.
1143 		 */
1144 		rdma_dev_change_netns(dev, net, &init_net);
1145 
1146 		put_device(&dev->dev);
1147 		down_read(&devices_rwsem);
1148 	}
1149 	up_read(&devices_rwsem);
1150 
1151 	rdma_nl_net_exit(rnet);
1152 	xa_erase(&rdma_nets, rnet->id);
1153 }
1154 
1155 static __net_init int rdma_dev_init_net(struct net *net)
1156 {
1157 	struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1158 	unsigned long index;
1159 	struct ib_device *dev;
1160 	int ret;
1161 
1162 	write_pnet(&rnet->net, net);
1163 
1164 	ret = rdma_nl_net_init(rnet);
1165 	if (ret)
1166 		return ret;
1167 
1168 	/* No need to create any compat devices in default init_net. */
1169 	if (net_eq(net, &init_net))
1170 		return 0;
1171 
1172 	ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
1173 	if (ret) {
1174 		rdma_nl_net_exit(rnet);
1175 		return ret;
1176 	}
1177 
1178 	down_read(&devices_rwsem);
1179 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1180 		/* Hold nets_rwsem so that netlink command cannot change
1181 		 * system configuration for device sharing mode.
1182 		 */
1183 		down_read(&rdma_nets_rwsem);
1184 		ret = add_one_compat_dev(dev, rnet);
1185 		up_read(&rdma_nets_rwsem);
1186 		if (ret)
1187 			break;
1188 	}
1189 	up_read(&devices_rwsem);
1190 
1191 	if (ret)
1192 		rdma_dev_exit_net(net);
1193 
1194 	return ret;
1195 }
1196 
1197 /*
1198  * Assign the unique string device name and the unique device index. This is
1199  * undone by ib_dealloc_device.
1200  */
1201 static int assign_name(struct ib_device *device, const char *name)
1202 {
1203 	static u32 last_id;
1204 	int ret;
1205 
1206 	down_write(&devices_rwsem);
1207 	/* Assign a unique name to the device */
1208 	if (strchr(name, '%'))
1209 		ret = alloc_name(device, name);
1210 	else
1211 		ret = dev_set_name(&device->dev, name);
1212 	if (ret)
1213 		goto out;
1214 
1215 	if (__ib_device_get_by_name(dev_name(&device->dev))) {
1216 		ret = -ENFILE;
1217 		goto out;
1218 	}
1219 	strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
1220 
1221 	ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1222 			&last_id, GFP_KERNEL);
1223 	if (ret > 0)
1224 		ret = 0;
1225 
1226 out:
1227 	up_write(&devices_rwsem);
1228 	return ret;
1229 }
1230 
1231 /*
1232  * setup_device() allocates memory and sets up data that requires calling the
1233  * device ops, this is the only reason these actions are not done during
1234  * ib_alloc_device. It is undone by ib_dealloc_device().
1235  */
1236 static int setup_device(struct ib_device *device)
1237 {
1238 	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1239 	int ret;
1240 
1241 	ib_device_check_mandatory(device);
1242 
1243 	ret = setup_port_data(device);
1244 	if (ret) {
1245 		dev_warn(&device->dev, "Couldn't create per-port data\n");
1246 		return ret;
1247 	}
1248 
1249 	memset(&device->attrs, 0, sizeof(device->attrs));
1250 	ret = device->ops.query_device(device, &device->attrs, &uhw);
1251 	if (ret) {
1252 		dev_warn(&device->dev,
1253 			 "Couldn't query the device attributes\n");
1254 		return ret;
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 static void disable_device(struct ib_device *device)
1261 {
1262 	u32 cid;
1263 
1264 	WARN_ON(!refcount_read(&device->refcount));
1265 
1266 	down_write(&devices_rwsem);
1267 	xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1268 	up_write(&devices_rwsem);
1269 
1270 	/*
1271 	 * Remove clients in LIFO order, see assign_client_id. This could be
1272 	 * more efficient if xarray learns to reverse iterate. Since no new
1273 	 * clients can be added to this ib_device past this point we only need
1274 	 * the maximum possible client_id value here.
1275 	 */
1276 	down_read(&clients_rwsem);
1277 	cid = highest_client_id;
1278 	up_read(&clients_rwsem);
1279 	while (cid) {
1280 		cid--;
1281 		remove_client_context(device, cid);
1282 	}
1283 
1284 	ib_cq_pool_cleanup(device);
1285 
1286 	/* Pairs with refcount_set in enable_device */
1287 	ib_device_put(device);
1288 	wait_for_completion(&device->unreg_completion);
1289 
1290 	/*
1291 	 * compat devices must be removed after device refcount drops to zero.
1292 	 * Otherwise init_net() may add more compatdevs after removing compat
1293 	 * devices and before device is disabled.
1294 	 */
1295 	remove_compat_devs(device);
1296 }
1297 
1298 /*
1299  * An enabled device is visible to all clients and to all the public facing
1300  * APIs that return a device pointer. This always returns with a new get, even
1301  * if it fails.
1302  */
1303 static int enable_device_and_get(struct ib_device *device)
1304 {
1305 	struct ib_client *client;
1306 	unsigned long index;
1307 	int ret = 0;
1308 
1309 	/*
1310 	 * One ref belongs to the xa and the other belongs to this
1311 	 * thread. This is needed to guard against parallel unregistration.
1312 	 */
1313 	refcount_set(&device->refcount, 2);
1314 	down_write(&devices_rwsem);
1315 	xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
1316 
1317 	/*
1318 	 * By using downgrade_write() we ensure that no other thread can clear
1319 	 * DEVICE_REGISTERED while we are completing the client setup.
1320 	 */
1321 	downgrade_write(&devices_rwsem);
1322 
1323 	if (device->ops.enable_driver) {
1324 		ret = device->ops.enable_driver(device);
1325 		if (ret)
1326 			goto out;
1327 	}
1328 
1329 	down_read(&clients_rwsem);
1330 	xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1331 		ret = add_client_context(device, client);
1332 		if (ret)
1333 			break;
1334 	}
1335 	up_read(&clients_rwsem);
1336 	if (!ret)
1337 		ret = add_compat_devs(device);
1338 out:
1339 	up_read(&devices_rwsem);
1340 	return ret;
1341 }
1342 
1343 static void prevent_dealloc_device(struct ib_device *ib_dev)
1344 {
1345 }
1346 
1347 static void ib_device_notify_register(struct ib_device *device)
1348 {
1349 	struct net_device *netdev;
1350 	u32 port;
1351 	int ret;
1352 
1353 	down_read(&devices_rwsem);
1354 
1355 	/* Mark for userspace that device is ready */
1356 	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1357 
1358 	ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
1359 	if (ret)
1360 		goto out;
1361 
1362 	rdma_for_each_port(device, port) {
1363 		netdev = ib_device_get_netdev(device, port);
1364 		if (!netdev)
1365 			continue;
1366 
1367 		ret = rdma_nl_notify_event(device, port,
1368 					   RDMA_NETDEV_ATTACH_EVENT);
1369 		dev_put(netdev);
1370 		if (ret)
1371 			goto out;
1372 	}
1373 
1374 out:
1375 	up_read(&devices_rwsem);
1376 }
1377 
1378 /**
1379  * ib_register_device - Register an IB device with IB core
1380  * @device: Device to register
1381  * @name: unique string device name. This may include a '%' which will
1382  * 	  cause a unique index to be added to the passed device name.
1383  * @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
1384  *	        device will be used. In this case the caller should fully
1385  *		setup the ibdev for DMA. This usually means using dma_virt_ops.
1386  *
1387  * Low-level drivers use ib_register_device() to register their
1388  * devices with the IB core.  All registered clients will receive a
1389  * callback for each device that is added. @device must be allocated
1390  * with ib_alloc_device().
1391  *
1392  * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1393  * asynchronously then the device pointer may become freed as soon as this
1394  * function returns.
1395  */
1396 int ib_register_device(struct ib_device *device, const char *name,
1397 		       struct device *dma_device)
1398 {
1399 	int ret;
1400 
1401 	ret = assign_name(device, name);
1402 	if (ret)
1403 		return ret;
1404 
1405 	/*
1406 	 * If the caller does not provide a DMA capable device then the IB core
1407 	 * will set up ib_sge and scatterlist structures that stash the kernel
1408 	 * virtual address into the address field.
1409 	 */
1410 	WARN_ON(dma_device && !dma_device->dma_parms);
1411 	device->dma_device = dma_device;
1412 
1413 	ret = setup_device(device);
1414 	if (ret)
1415 		return ret;
1416 
1417 	ret = ib_cache_setup_one(device);
1418 	if (ret) {
1419 		dev_warn(&device->dev,
1420 			 "Couldn't set up InfiniBand P_Key/GID cache\n");
1421 		return ret;
1422 	}
1423 
1424 	device->groups[0] = &ib_dev_attr_group;
1425 	device->groups[1] = device->ops.device_group;
1426 	ret = ib_setup_device_attrs(device);
1427 	if (ret)
1428 		goto cache_cleanup;
1429 
1430 	ib_device_register_rdmacg(device);
1431 
1432 	rdma_counter_init(device);
1433 
1434 	/*
1435 	 * Ensure that ADD uevent is not fired because it
1436 	 * is too early amd device is not initialized yet.
1437 	 */
1438 	dev_set_uevent_suppress(&device->dev, true);
1439 	ret = device_add(&device->dev);
1440 	if (ret)
1441 		goto cg_cleanup;
1442 
1443 	ret = ib_setup_port_attrs(&device->coredev);
1444 	if (ret) {
1445 		dev_warn(&device->dev,
1446 			 "Couldn't register device with driver model\n");
1447 		goto dev_cleanup;
1448 	}
1449 
1450 	ret = enable_device_and_get(device);
1451 	if (ret) {
1452 		void (*dealloc_fn)(struct ib_device *);
1453 
1454 		/*
1455 		 * If we hit this error flow then we don't want to
1456 		 * automatically dealloc the device since the caller is
1457 		 * expected to call ib_dealloc_device() after
1458 		 * ib_register_device() fails. This is tricky due to the
1459 		 * possibility for a parallel unregistration along with this
1460 		 * error flow. Since we have a refcount here we know any
1461 		 * parallel flow is stopped in disable_device and will see the
1462 		 * special dealloc_driver pointer, causing the responsibility to
1463 		 * ib_dealloc_device() to revert back to this thread.
1464 		 */
1465 		dealloc_fn = device->ops.dealloc_driver;
1466 		device->ops.dealloc_driver = prevent_dealloc_device;
1467 		ib_device_put(device);
1468 		__ib_unregister_device(device);
1469 		device->ops.dealloc_driver = dealloc_fn;
1470 		dev_set_uevent_suppress(&device->dev, false);
1471 		return ret;
1472 	}
1473 	dev_set_uevent_suppress(&device->dev, false);
1474 
1475 	ib_device_notify_register(device);
1476 
1477 	ib_device_put(device);
1478 
1479 	return 0;
1480 
1481 dev_cleanup:
1482 	device_del(&device->dev);
1483 cg_cleanup:
1484 	dev_set_uevent_suppress(&device->dev, false);
1485 	ib_device_unregister_rdmacg(device);
1486 cache_cleanup:
1487 	ib_cache_cleanup_one(device);
1488 	return ret;
1489 }
1490 EXPORT_SYMBOL(ib_register_device);
1491 
1492 /* Callers must hold a get on the device. */
1493 static void __ib_unregister_device(struct ib_device *ib_dev)
1494 {
1495 	struct ib_device *sub, *tmp;
1496 
1497 	mutex_lock(&ib_dev->subdev_lock);
1498 	list_for_each_entry_safe_reverse(sub, tmp,
1499 					 &ib_dev->subdev_list_head,
1500 					 subdev_list) {
1501 		list_del(&sub->subdev_list);
1502 		ib_dev->ops.del_sub_dev(sub);
1503 		ib_device_put(ib_dev);
1504 	}
1505 	mutex_unlock(&ib_dev->subdev_lock);
1506 
1507 	/*
1508 	 * We have a registration lock so that all the calls to unregister are
1509 	 * fully fenced, once any unregister returns the device is truely
1510 	 * unregistered even if multiple callers are unregistering it at the
1511 	 * same time. This also interacts with the registration flow and
1512 	 * provides sane semantics if register and unregister are racing.
1513 	 */
1514 	mutex_lock(&ib_dev->unregistration_lock);
1515 	if (!refcount_read(&ib_dev->refcount))
1516 		goto out;
1517 
1518 	disable_device(ib_dev);
1519 	rdma_nl_notify_event(ib_dev, 0, RDMA_UNREGISTER_EVENT);
1520 
1521 	/* Expedite removing unregistered pointers from the hash table */
1522 	free_netdevs(ib_dev);
1523 
1524 	ib_free_port_attrs(&ib_dev->coredev);
1525 	device_del(&ib_dev->dev);
1526 	ib_device_unregister_rdmacg(ib_dev);
1527 	ib_cache_cleanup_one(ib_dev);
1528 
1529 	/*
1530 	 * Drivers using the new flow may not call ib_dealloc_device except
1531 	 * in error unwind prior to registration success.
1532 	 */
1533 	if (ib_dev->ops.dealloc_driver &&
1534 	    ib_dev->ops.dealloc_driver != prevent_dealloc_device) {
1535 		WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1536 		ib_dealloc_device(ib_dev);
1537 	}
1538 out:
1539 	mutex_unlock(&ib_dev->unregistration_lock);
1540 }
1541 
1542 /**
1543  * ib_unregister_device - Unregister an IB device
1544  * @ib_dev: The device to unregister
1545  *
1546  * Unregister an IB device.  All clients will receive a remove callback.
1547  *
1548  * Callers should call this routine only once, and protect against races with
1549  * registration. Typically it should only be called as part of a remove
1550  * callback in an implementation of driver core's struct device_driver and
1551  * related.
1552  *
1553  * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1554  * this function.
1555  */
1556 void ib_unregister_device(struct ib_device *ib_dev)
1557 {
1558 	get_device(&ib_dev->dev);
1559 	__ib_unregister_device(ib_dev);
1560 	put_device(&ib_dev->dev);
1561 }
1562 EXPORT_SYMBOL(ib_unregister_device);
1563 
1564 /**
1565  * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1566  * @ib_dev: The device to unregister
1567  *
1568  * This is the same as ib_unregister_device(), except it includes an internal
1569  * ib_device_put() that should match a 'get' obtained by the caller.
1570  *
1571  * It is safe to call this routine concurrently from multiple threads while
1572  * holding the 'get'. When the function returns the device is fully
1573  * unregistered.
1574  *
1575  * Drivers using this flow MUST use the driver_unregister callback to clean up
1576  * their resources associated with the device and dealloc it.
1577  */
1578 void ib_unregister_device_and_put(struct ib_device *ib_dev)
1579 {
1580 	WARN_ON(!ib_dev->ops.dealloc_driver);
1581 	get_device(&ib_dev->dev);
1582 	ib_device_put(ib_dev);
1583 	__ib_unregister_device(ib_dev);
1584 	put_device(&ib_dev->dev);
1585 }
1586 EXPORT_SYMBOL(ib_unregister_device_and_put);
1587 
1588 /**
1589  * ib_unregister_driver - Unregister all IB devices for a driver
1590  * @driver_id: The driver to unregister
1591  *
1592  * This implements a fence for device unregistration. It only returns once all
1593  * devices associated with the driver_id have fully completed their
1594  * unregistration and returned from ib_unregister_device*().
1595  *
1596  * If device's are not yet unregistered it goes ahead and starts unregistering
1597  * them.
1598  *
1599  * This does not block creation of new devices with the given driver_id, that
1600  * is the responsibility of the caller.
1601  */
1602 void ib_unregister_driver(enum rdma_driver_id driver_id)
1603 {
1604 	struct ib_device *ib_dev;
1605 	unsigned long index;
1606 
1607 	down_read(&devices_rwsem);
1608 	xa_for_each (&devices, index, ib_dev) {
1609 		if (ib_dev->ops.driver_id != driver_id)
1610 			continue;
1611 
1612 		get_device(&ib_dev->dev);
1613 		up_read(&devices_rwsem);
1614 
1615 		WARN_ON(!ib_dev->ops.dealloc_driver);
1616 		__ib_unregister_device(ib_dev);
1617 
1618 		put_device(&ib_dev->dev);
1619 		down_read(&devices_rwsem);
1620 	}
1621 	up_read(&devices_rwsem);
1622 }
1623 EXPORT_SYMBOL(ib_unregister_driver);
1624 
1625 static void ib_unregister_work(struct work_struct *work)
1626 {
1627 	struct ib_device *ib_dev =
1628 		container_of(work, struct ib_device, unregistration_work);
1629 
1630 	__ib_unregister_device(ib_dev);
1631 	put_device(&ib_dev->dev);
1632 }
1633 
1634 /**
1635  * ib_unregister_device_queued - Unregister a device using a work queue
1636  * @ib_dev: The device to unregister
1637  *
1638  * This schedules an asynchronous unregistration using a WQ for the device. A
1639  * driver should use this to avoid holding locks while doing unregistration,
1640  * such as holding the RTNL lock.
1641  *
1642  * Drivers using this API must use ib_unregister_driver before module unload
1643  * to ensure that all scheduled unregistrations have completed.
1644  */
1645 void ib_unregister_device_queued(struct ib_device *ib_dev)
1646 {
1647 	WARN_ON(!refcount_read(&ib_dev->refcount));
1648 	WARN_ON(!ib_dev->ops.dealloc_driver);
1649 	get_device(&ib_dev->dev);
1650 	if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
1651 		put_device(&ib_dev->dev);
1652 }
1653 EXPORT_SYMBOL(ib_unregister_device_queued);
1654 
1655 /*
1656  * The caller must pass in a device that has the kref held and the refcount
1657  * released. If the device is in cur_net and still registered then it is moved
1658  * into net.
1659  */
1660 static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1661 				 struct net *net)
1662 {
1663 	int ret2 = -EINVAL;
1664 	int ret;
1665 
1666 	mutex_lock(&device->unregistration_lock);
1667 
1668 	/*
1669 	 * If a device not under ib_device_get() or if the unregistration_lock
1670 	 * is not held, the namespace can be changed, or it can be unregistered.
1671 	 * Check again under the lock.
1672 	 */
1673 	if (refcount_read(&device->refcount) == 0 ||
1674 	    !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1675 		ret = -ENODEV;
1676 		goto out;
1677 	}
1678 
1679 	kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1680 	disable_device(device);
1681 
1682 	/*
1683 	 * At this point no one can be using the device, so it is safe to
1684 	 * change the namespace.
1685 	 */
1686 	write_pnet(&device->coredev.rdma_net, net);
1687 
1688 	down_read(&devices_rwsem);
1689 	/*
1690 	 * Currently rdma devices are system wide unique. So the device name
1691 	 * is guaranteed free in the new namespace. Publish the new namespace
1692 	 * at the sysfs level.
1693 	 */
1694 	ret = device_rename(&device->dev, dev_name(&device->dev));
1695 	up_read(&devices_rwsem);
1696 	if (ret) {
1697 		dev_warn(&device->dev,
1698 			 "%s: Couldn't rename device after namespace change\n",
1699 			 __func__);
1700 		/* Try and put things back and re-enable the device */
1701 		write_pnet(&device->coredev.rdma_net, cur_net);
1702 	}
1703 
1704 	ret2 = enable_device_and_get(device);
1705 	if (ret2) {
1706 		/*
1707 		 * This shouldn't really happen, but if it does, let the user
1708 		 * retry at later point. So don't disable the device.
1709 		 */
1710 		dev_warn(&device->dev,
1711 			 "%s: Couldn't re-enable device after namespace change\n",
1712 			 __func__);
1713 	}
1714 	kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1715 
1716 	ib_device_put(device);
1717 out:
1718 	mutex_unlock(&device->unregistration_lock);
1719 	if (ret)
1720 		return ret;
1721 	return ret2;
1722 }
1723 
1724 int ib_device_set_netns_put(struct sk_buff *skb,
1725 			    struct ib_device *dev, u32 ns_fd)
1726 {
1727 	struct net *net;
1728 	int ret;
1729 
1730 	net = get_net_ns_by_fd(ns_fd);
1731 	if (IS_ERR(net)) {
1732 		ret = PTR_ERR(net);
1733 		goto net_err;
1734 	}
1735 
1736 	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1737 		ret = -EPERM;
1738 		goto ns_err;
1739 	}
1740 
1741 	/*
1742 	 * All the ib_clients, including uverbs, are reset when the namespace is
1743 	 * changed and this cannot be blocked waiting for userspace to do
1744 	 * something, so disassociation is mandatory.
1745 	 */
1746 	if (!dev->ops.disassociate_ucontext || ib_devices_shared_netns) {
1747 		ret = -EOPNOTSUPP;
1748 		goto ns_err;
1749 	}
1750 
1751 	get_device(&dev->dev);
1752 	ib_device_put(dev);
1753 	ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
1754 	put_device(&dev->dev);
1755 
1756 	put_net(net);
1757 	return ret;
1758 
1759 ns_err:
1760 	put_net(net);
1761 net_err:
1762 	ib_device_put(dev);
1763 	return ret;
1764 }
1765 
1766 static struct pernet_operations rdma_dev_net_ops = {
1767 	.init = rdma_dev_init_net,
1768 	.exit = rdma_dev_exit_net,
1769 	.id = &rdma_dev_net_id,
1770 	.size = sizeof(struct rdma_dev_net),
1771 };
1772 
1773 static int assign_client_id(struct ib_client *client)
1774 {
1775 	int ret;
1776 
1777 	lockdep_assert_held(&clients_rwsem);
1778 	/*
1779 	 * The add/remove callbacks must be called in FIFO/LIFO order. To
1780 	 * achieve this we assign client_ids so they are sorted in
1781 	 * registration order.
1782 	 */
1783 	client->client_id = highest_client_id;
1784 	ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1785 	if (ret)
1786 		return ret;
1787 
1788 	highest_client_id++;
1789 	xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1790 	return 0;
1791 }
1792 
1793 static void remove_client_id(struct ib_client *client)
1794 {
1795 	down_write(&clients_rwsem);
1796 	xa_erase(&clients, client->client_id);
1797 	for (; highest_client_id; highest_client_id--)
1798 		if (xa_load(&clients, highest_client_id - 1))
1799 			break;
1800 	up_write(&clients_rwsem);
1801 }
1802 
1803 /**
1804  * ib_register_client - Register an IB client
1805  * @client:Client to register
1806  *
1807  * Upper level users of the IB drivers can use ib_register_client() to
1808  * register callbacks for IB device addition and removal.  When an IB
1809  * device is added, each registered client's add method will be called
1810  * (in the order the clients were registered), and when a device is
1811  * removed, each client's remove method will be called (in the reverse
1812  * order that clients were registered).  In addition, when
1813  * ib_register_client() is called, the client will receive an add
1814  * callback for all devices already registered.
1815  */
1816 int ib_register_client(struct ib_client *client)
1817 {
1818 	struct ib_device *device;
1819 	unsigned long index;
1820 	bool need_unreg = false;
1821 	int ret;
1822 
1823 	refcount_set(&client->uses, 1);
1824 	init_completion(&client->uses_zero);
1825 
1826 	/*
1827 	 * The devices_rwsem is held in write mode to ensure that a racing
1828 	 * ib_register_device() sees a consisent view of clients and devices.
1829 	 */
1830 	down_write(&devices_rwsem);
1831 	down_write(&clients_rwsem);
1832 	ret = assign_client_id(client);
1833 	if (ret)
1834 		goto out;
1835 
1836 	need_unreg = true;
1837 	xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1838 		ret = add_client_context(device, client);
1839 		if (ret)
1840 			goto out;
1841 	}
1842 	ret = 0;
1843 out:
1844 	up_write(&clients_rwsem);
1845 	up_write(&devices_rwsem);
1846 	if (need_unreg && ret)
1847 		ib_unregister_client(client);
1848 	return ret;
1849 }
1850 EXPORT_SYMBOL(ib_register_client);
1851 
1852 /**
1853  * ib_unregister_client - Unregister an IB client
1854  * @client:Client to unregister
1855  *
1856  * Upper level users use ib_unregister_client() to remove their client
1857  * registration.  When ib_unregister_client() is called, the client
1858  * will receive a remove callback for each IB device still registered.
1859  *
1860  * This is a full fence, once it returns no client callbacks will be called,
1861  * or are running in another thread.
1862  */
1863 void ib_unregister_client(struct ib_client *client)
1864 {
1865 	struct ib_device *device;
1866 	unsigned long index;
1867 
1868 	down_write(&clients_rwsem);
1869 	ib_client_put(client);
1870 	xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1871 	up_write(&clients_rwsem);
1872 
1873 	/* We do not want to have locks while calling client->remove() */
1874 	rcu_read_lock();
1875 	xa_for_each (&devices, index, device) {
1876 		if (!ib_device_try_get(device))
1877 			continue;
1878 		rcu_read_unlock();
1879 
1880 		remove_client_context(device, client->client_id);
1881 
1882 		ib_device_put(device);
1883 		rcu_read_lock();
1884 	}
1885 	rcu_read_unlock();
1886 
1887 	/*
1888 	 * remove_client_context() is not a fence, it can return even though a
1889 	 * removal is ongoing. Wait until all removals are completed.
1890 	 */
1891 	wait_for_completion(&client->uses_zero);
1892 	remove_client_id(client);
1893 }
1894 EXPORT_SYMBOL(ib_unregister_client);
1895 
1896 static int __ib_get_global_client_nl_info(const char *client_name,
1897 					  struct ib_client_nl_info *res)
1898 {
1899 	struct ib_client *client;
1900 	unsigned long index;
1901 	int ret = -ENOENT;
1902 
1903 	down_read(&clients_rwsem);
1904 	xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1905 		if (strcmp(client->name, client_name) != 0)
1906 			continue;
1907 		if (!client->get_global_nl_info) {
1908 			ret = -EOPNOTSUPP;
1909 			break;
1910 		}
1911 		ret = client->get_global_nl_info(res);
1912 		if (WARN_ON(ret == -ENOENT))
1913 			ret = -EINVAL;
1914 		if (!ret && res->cdev)
1915 			get_device(res->cdev);
1916 		break;
1917 	}
1918 	up_read(&clients_rwsem);
1919 	return ret;
1920 }
1921 
1922 static int __ib_get_client_nl_info(struct ib_device *ibdev,
1923 				   const char *client_name,
1924 				   struct ib_client_nl_info *res)
1925 {
1926 	unsigned long index;
1927 	void *client_data;
1928 	int ret = -ENOENT;
1929 
1930 	down_read(&ibdev->client_data_rwsem);
1931 	xan_for_each_marked (&ibdev->client_data, index, client_data,
1932 			     CLIENT_DATA_REGISTERED) {
1933 		struct ib_client *client = xa_load(&clients, index);
1934 
1935 		if (!client || strcmp(client->name, client_name) != 0)
1936 			continue;
1937 		if (!client->get_nl_info) {
1938 			ret = -EOPNOTSUPP;
1939 			break;
1940 		}
1941 		ret = client->get_nl_info(ibdev, client_data, res);
1942 		if (WARN_ON(ret == -ENOENT))
1943 			ret = -EINVAL;
1944 
1945 		/*
1946 		 * The cdev is guaranteed valid as long as we are inside the
1947 		 * client_data_rwsem as remove_one can't be called. Keep it
1948 		 * valid for the caller.
1949 		 */
1950 		if (!ret && res->cdev)
1951 			get_device(res->cdev);
1952 		break;
1953 	}
1954 	up_read(&ibdev->client_data_rwsem);
1955 
1956 	return ret;
1957 }
1958 
1959 /**
1960  * ib_get_client_nl_info - Fetch the nl_info from a client
1961  * @ibdev: IB device
1962  * @client_name: Name of the client
1963  * @res: Result of the query
1964  */
1965 int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
1966 			  struct ib_client_nl_info *res)
1967 {
1968 	int ret;
1969 
1970 	if (ibdev)
1971 		ret = __ib_get_client_nl_info(ibdev, client_name, res);
1972 	else
1973 		ret = __ib_get_global_client_nl_info(client_name, res);
1974 #ifdef CONFIG_MODULES
1975 	if (ret == -ENOENT) {
1976 		request_module("rdma-client-%s", client_name);
1977 		if (ibdev)
1978 			ret = __ib_get_client_nl_info(ibdev, client_name, res);
1979 		else
1980 			ret = __ib_get_global_client_nl_info(client_name, res);
1981 	}
1982 #endif
1983 	if (ret) {
1984 		if (ret == -ENOENT)
1985 			return -EOPNOTSUPP;
1986 		return ret;
1987 	}
1988 
1989 	if (WARN_ON(!res->cdev))
1990 		return -EINVAL;
1991 	return 0;
1992 }
1993 
1994 /**
1995  * ib_set_client_data - Set IB client context
1996  * @device:Device to set context for
1997  * @client:Client to set context for
1998  * @data:Context to set
1999  *
2000  * ib_set_client_data() sets client context data that can be retrieved with
2001  * ib_get_client_data(). This can only be called while the client is
2002  * registered to the device, once the ib_client remove() callback returns this
2003  * cannot be called.
2004  */
2005 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
2006 			void *data)
2007 {
2008 	void *rc;
2009 
2010 	if (WARN_ON(IS_ERR(data)))
2011 		data = NULL;
2012 
2013 	rc = xa_store(&device->client_data, client->client_id, data,
2014 		      GFP_KERNEL);
2015 	WARN_ON(xa_is_err(rc));
2016 }
2017 EXPORT_SYMBOL(ib_set_client_data);
2018 
2019 /**
2020  * ib_register_event_handler - Register an IB event handler
2021  * @event_handler:Handler to register
2022  *
2023  * ib_register_event_handler() registers an event handler that will be
2024  * called back when asynchronous IB events occur (as defined in
2025  * chapter 11 of the InfiniBand Architecture Specification). This
2026  * callback occurs in workqueue context.
2027  */
2028 void ib_register_event_handler(struct ib_event_handler *event_handler)
2029 {
2030 	down_write(&event_handler->device->event_handler_rwsem);
2031 	list_add_tail(&event_handler->list,
2032 		      &event_handler->device->event_handler_list);
2033 	up_write(&event_handler->device->event_handler_rwsem);
2034 }
2035 EXPORT_SYMBOL(ib_register_event_handler);
2036 
2037 /**
2038  * ib_unregister_event_handler - Unregister an event handler
2039  * @event_handler:Handler to unregister
2040  *
2041  * Unregister an event handler registered with
2042  * ib_register_event_handler().
2043  */
2044 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
2045 {
2046 	down_write(&event_handler->device->event_handler_rwsem);
2047 	list_del(&event_handler->list);
2048 	up_write(&event_handler->device->event_handler_rwsem);
2049 }
2050 EXPORT_SYMBOL(ib_unregister_event_handler);
2051 
2052 void ib_dispatch_event_clients(struct ib_event *event)
2053 {
2054 	struct ib_event_handler *handler;
2055 
2056 	down_read(&event->device->event_handler_rwsem);
2057 
2058 	list_for_each_entry(handler, &event->device->event_handler_list, list)
2059 		handler->handler(handler, event);
2060 
2061 	up_read(&event->device->event_handler_rwsem);
2062 }
2063 
2064 static int iw_query_port(struct ib_device *device,
2065 			   u32 port_num,
2066 			   struct ib_port_attr *port_attr)
2067 {
2068 	struct in_device *inetdev;
2069 	struct net_device *netdev;
2070 
2071 	memset(port_attr, 0, sizeof(*port_attr));
2072 
2073 	netdev = ib_device_get_netdev(device, port_num);
2074 	if (!netdev)
2075 		return -ENODEV;
2076 
2077 	port_attr->max_mtu = IB_MTU_4096;
2078 	port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
2079 
2080 	if (!netif_carrier_ok(netdev)) {
2081 		port_attr->state = IB_PORT_DOWN;
2082 		port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2083 	} else {
2084 		rcu_read_lock();
2085 		inetdev = __in_dev_get_rcu(netdev);
2086 
2087 		if (inetdev && inetdev->ifa_list) {
2088 			port_attr->state = IB_PORT_ACTIVE;
2089 			port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2090 		} else {
2091 			port_attr->state = IB_PORT_INIT;
2092 			port_attr->phys_state =
2093 				IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
2094 		}
2095 
2096 		rcu_read_unlock();
2097 	}
2098 
2099 	dev_put(netdev);
2100 	return device->ops.query_port(device, port_num, port_attr);
2101 }
2102 
2103 static int __ib_query_port(struct ib_device *device,
2104 			   u32 port_num,
2105 			   struct ib_port_attr *port_attr)
2106 {
2107 	int err;
2108 
2109 	memset(port_attr, 0, sizeof(*port_attr));
2110 
2111 	err = device->ops.query_port(device, port_num, port_attr);
2112 	if (err || port_attr->subnet_prefix)
2113 		return err;
2114 
2115 	if (rdma_port_get_link_layer(device, port_num) !=
2116 	    IB_LINK_LAYER_INFINIBAND)
2117 		return 0;
2118 
2119 	ib_get_cached_subnet_prefix(device, port_num,
2120 				    &port_attr->subnet_prefix);
2121 	return 0;
2122 }
2123 
2124 /**
2125  * ib_query_port - Query IB port attributes
2126  * @device:Device to query
2127  * @port_num:Port number to query
2128  * @port_attr:Port attributes
2129  *
2130  * ib_query_port() returns the attributes of a port through the
2131  * @port_attr pointer.
2132  */
2133 int ib_query_port(struct ib_device *device,
2134 		  u32 port_num,
2135 		  struct ib_port_attr *port_attr)
2136 {
2137 	if (!rdma_is_port_valid(device, port_num))
2138 		return -EINVAL;
2139 
2140 	if (rdma_protocol_iwarp(device, port_num))
2141 		return iw_query_port(device, port_num, port_attr);
2142 	else
2143 		return __ib_query_port(device, port_num, port_attr);
2144 }
2145 EXPORT_SYMBOL(ib_query_port);
2146 
2147 static void add_ndev_hash(struct ib_port_data *pdata)
2148 {
2149 	unsigned long flags;
2150 
2151 	might_sleep();
2152 
2153 	spin_lock_irqsave(&ndev_hash_lock, flags);
2154 	if (hash_hashed(&pdata->ndev_hash_link)) {
2155 		hash_del_rcu(&pdata->ndev_hash_link);
2156 		spin_unlock_irqrestore(&ndev_hash_lock, flags);
2157 		/*
2158 		 * We cannot do hash_add_rcu after a hash_del_rcu until the
2159 		 * grace period
2160 		 */
2161 		synchronize_rcu();
2162 		spin_lock_irqsave(&ndev_hash_lock, flags);
2163 	}
2164 	if (pdata->netdev)
2165 		hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
2166 			     (uintptr_t)pdata->netdev);
2167 	spin_unlock_irqrestore(&ndev_hash_lock, flags);
2168 }
2169 
2170 /**
2171  * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
2172  * @ib_dev: Device to modify
2173  * @ndev: net_device to affiliate, may be NULL
2174  * @port: IB port the net_device is connected to
2175  *
2176  * Drivers should use this to link the ib_device to a netdev so the netdev
2177  * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
2178  * affiliated with any port.
2179  *
2180  * The caller must ensure that the given ndev is not unregistered or
2181  * unregistering, and that either the ib_device is unregistered or
2182  * ib_device_set_netdev() is called with NULL when the ndev sends a
2183  * NETDEV_UNREGISTER event.
2184  */
2185 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
2186 			 u32 port)
2187 {
2188 	enum rdma_nl_notify_event_type etype;
2189 	struct net_device *old_ndev;
2190 	struct ib_port_data *pdata;
2191 	unsigned long flags;
2192 	int ret;
2193 
2194 	if (!rdma_is_port_valid(ib_dev, port))
2195 		return -EINVAL;
2196 
2197 	/*
2198 	 * Drivers wish to call this before ib_register_driver, so we have to
2199 	 * setup the port data early.
2200 	 */
2201 	ret = alloc_port_data(ib_dev);
2202 	if (ret)
2203 		return ret;
2204 
2205 	pdata = &ib_dev->port_data[port];
2206 	spin_lock_irqsave(&pdata->netdev_lock, flags);
2207 	old_ndev = rcu_dereference_protected(
2208 		pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2209 	if (old_ndev == ndev) {
2210 		spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2211 		return 0;
2212 	}
2213 
2214 	rcu_assign_pointer(pdata->netdev, ndev);
2215 	netdev_put(old_ndev, &pdata->netdev_tracker);
2216 	netdev_hold(ndev, &pdata->netdev_tracker, GFP_ATOMIC);
2217 	spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2218 
2219 	add_ndev_hash(pdata);
2220 
2221 	/* Make sure that the device is registered before we send events */
2222 	if (xa_load(&devices, ib_dev->index) != ib_dev)
2223 		return 0;
2224 
2225 	etype = ndev ? RDMA_NETDEV_ATTACH_EVENT : RDMA_NETDEV_DETACH_EVENT;
2226 	rdma_nl_notify_event(ib_dev, port, etype);
2227 
2228 	return 0;
2229 }
2230 EXPORT_SYMBOL(ib_device_set_netdev);
2231 
2232 static void free_netdevs(struct ib_device *ib_dev)
2233 {
2234 	unsigned long flags;
2235 	u32 port;
2236 
2237 	if (!ib_dev->port_data)
2238 		return;
2239 
2240 	rdma_for_each_port (ib_dev, port) {
2241 		struct ib_port_data *pdata = &ib_dev->port_data[port];
2242 		struct net_device *ndev;
2243 
2244 		spin_lock_irqsave(&pdata->netdev_lock, flags);
2245 		ndev = rcu_dereference_protected(
2246 			pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2247 		if (ndev) {
2248 			spin_lock(&ndev_hash_lock);
2249 			hash_del_rcu(&pdata->ndev_hash_link);
2250 			spin_unlock(&ndev_hash_lock);
2251 
2252 			/*
2253 			 * If this is the last dev_put there is still a
2254 			 * synchronize_rcu before the netdev is kfreed, so we
2255 			 * can continue to rely on unlocked pointer
2256 			 * comparisons after the put
2257 			 */
2258 			rcu_assign_pointer(pdata->netdev, NULL);
2259 			netdev_put(ndev, &pdata->netdev_tracker);
2260 		}
2261 		spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2262 	}
2263 }
2264 
2265 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
2266 					u32 port)
2267 {
2268 	struct ib_port_data *pdata;
2269 	struct net_device *res;
2270 
2271 	if (!rdma_is_port_valid(ib_dev, port))
2272 		return NULL;
2273 
2274 	if (!ib_dev->port_data)
2275 		return NULL;
2276 
2277 	pdata = &ib_dev->port_data[port];
2278 
2279 	/*
2280 	 * New drivers should use ib_device_set_netdev() not the legacy
2281 	 * get_netdev().
2282 	 */
2283 	if (ib_dev->ops.get_netdev)
2284 		res = ib_dev->ops.get_netdev(ib_dev, port);
2285 	else {
2286 		spin_lock(&pdata->netdev_lock);
2287 		res = rcu_dereference_protected(
2288 			pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2289 		dev_hold(res);
2290 		spin_unlock(&pdata->netdev_lock);
2291 	}
2292 
2293 	return res;
2294 }
2295 EXPORT_SYMBOL(ib_device_get_netdev);
2296 
2297 /**
2298  * ib_query_netdev_port - Query the port number of a net_device
2299  * associated with an ibdev
2300  * @ibdev: IB device
2301  * @ndev: Network device
2302  * @port: IB port the net_device is connected to
2303  */
2304 int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
2305 			 u32 *port)
2306 {
2307 	struct net_device *ib_ndev;
2308 	u32 port_num;
2309 
2310 	rdma_for_each_port(ibdev, port_num) {
2311 		ib_ndev = ib_device_get_netdev(ibdev, port_num);
2312 		if (ndev == ib_ndev) {
2313 			*port = port_num;
2314 			dev_put(ib_ndev);
2315 			return 0;
2316 		}
2317 		dev_put(ib_ndev);
2318 	}
2319 
2320 	return -ENOENT;
2321 }
2322 EXPORT_SYMBOL(ib_query_netdev_port);
2323 
2324 /**
2325  * ib_device_get_by_netdev - Find an IB device associated with a netdev
2326  * @ndev: netdev to locate
2327  * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2328  *
2329  * Find and hold an ib_device that is associated with a netdev via
2330  * ib_device_set_netdev(). The caller must call ib_device_put() on the
2331  * returned pointer.
2332  */
2333 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2334 					  enum rdma_driver_id driver_id)
2335 {
2336 	struct ib_device *res = NULL;
2337 	struct ib_port_data *cur;
2338 
2339 	rcu_read_lock();
2340 	hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2341 				    (uintptr_t)ndev) {
2342 		if (rcu_access_pointer(cur->netdev) == ndev &&
2343 		    (driver_id == RDMA_DRIVER_UNKNOWN ||
2344 		     cur->ib_dev->ops.driver_id == driver_id) &&
2345 		    ib_device_try_get(cur->ib_dev)) {
2346 			res = cur->ib_dev;
2347 			break;
2348 		}
2349 	}
2350 	rcu_read_unlock();
2351 
2352 	return res;
2353 }
2354 EXPORT_SYMBOL(ib_device_get_by_netdev);
2355 
2356 /**
2357  * ib_enum_roce_netdev - enumerate all RoCE ports
2358  * @ib_dev : IB device we want to query
2359  * @filter: Should we call the callback?
2360  * @filter_cookie: Cookie passed to filter
2361  * @cb: Callback to call for each found RoCE ports
2362  * @cookie: Cookie passed back to the callback
2363  *
2364  * Enumerates all of the physical RoCE ports of ib_dev
2365  * which are related to netdevice and calls callback() on each
2366  * device for which filter() function returns non zero.
2367  */
2368 void ib_enum_roce_netdev(struct ib_device *ib_dev,
2369 			 roce_netdev_filter filter,
2370 			 void *filter_cookie,
2371 			 roce_netdev_callback cb,
2372 			 void *cookie)
2373 {
2374 	u32 port;
2375 
2376 	rdma_for_each_port (ib_dev, port)
2377 		if (rdma_protocol_roce(ib_dev, port)) {
2378 			struct net_device *idev =
2379 				ib_device_get_netdev(ib_dev, port);
2380 
2381 			if (filter(ib_dev, port, idev, filter_cookie))
2382 				cb(ib_dev, port, idev, cookie);
2383 			dev_put(idev);
2384 		}
2385 }
2386 
2387 /**
2388  * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2389  * @filter: Should we call the callback?
2390  * @filter_cookie: Cookie passed to filter
2391  * @cb: Callback to call for each found RoCE ports
2392  * @cookie: Cookie passed back to the callback
2393  *
2394  * Enumerates all RoCE devices' physical ports which are related
2395  * to netdevices and calls callback() on each device for which
2396  * filter() function returns non zero.
2397  */
2398 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
2399 			      void *filter_cookie,
2400 			      roce_netdev_callback cb,
2401 			      void *cookie)
2402 {
2403 	struct ib_device *dev;
2404 	unsigned long index;
2405 
2406 	down_read(&devices_rwsem);
2407 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
2408 		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
2409 	up_read(&devices_rwsem);
2410 }
2411 
2412 /*
2413  * ib_enum_all_devs - enumerate all ib_devices
2414  * @cb: Callback to call for each found ib_device
2415  *
2416  * Enumerates all ib_devices and calls callback() on each device.
2417  */
2418 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
2419 		     struct netlink_callback *cb)
2420 {
2421 	unsigned long index;
2422 	struct ib_device *dev;
2423 	unsigned int idx = 0;
2424 	int ret = 0;
2425 
2426 	down_read(&devices_rwsem);
2427 	xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
2428 		if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
2429 			continue;
2430 
2431 		ret = nldev_cb(dev, skb, cb, idx);
2432 		if (ret)
2433 			break;
2434 		idx++;
2435 	}
2436 	up_read(&devices_rwsem);
2437 	return ret;
2438 }
2439 
2440 /**
2441  * ib_query_pkey - Get P_Key table entry
2442  * @device:Device to query
2443  * @port_num:Port number to query
2444  * @index:P_Key table index to query
2445  * @pkey:Returned P_Key
2446  *
2447  * ib_query_pkey() fetches the specified P_Key table entry.
2448  */
2449 int ib_query_pkey(struct ib_device *device,
2450 		  u32 port_num, u16 index, u16 *pkey)
2451 {
2452 	if (!rdma_is_port_valid(device, port_num))
2453 		return -EINVAL;
2454 
2455 	if (!device->ops.query_pkey)
2456 		return -EOPNOTSUPP;
2457 
2458 	return device->ops.query_pkey(device, port_num, index, pkey);
2459 }
2460 EXPORT_SYMBOL(ib_query_pkey);
2461 
2462 /**
2463  * ib_modify_device - Change IB device attributes
2464  * @device:Device to modify
2465  * @device_modify_mask:Mask of attributes to change
2466  * @device_modify:New attribute values
2467  *
2468  * ib_modify_device() changes a device's attributes as specified by
2469  * the @device_modify_mask and @device_modify structure.
2470  */
2471 int ib_modify_device(struct ib_device *device,
2472 		     int device_modify_mask,
2473 		     struct ib_device_modify *device_modify)
2474 {
2475 	if (!device->ops.modify_device)
2476 		return -EOPNOTSUPP;
2477 
2478 	return device->ops.modify_device(device, device_modify_mask,
2479 					 device_modify);
2480 }
2481 EXPORT_SYMBOL(ib_modify_device);
2482 
2483 /**
2484  * ib_modify_port - Modifies the attributes for the specified port.
2485  * @device: The device to modify.
2486  * @port_num: The number of the port to modify.
2487  * @port_modify_mask: Mask used to specify which attributes of the port
2488  *   to change.
2489  * @port_modify: New attribute values for the port.
2490  *
2491  * ib_modify_port() changes a port's attributes as specified by the
2492  * @port_modify_mask and @port_modify structure.
2493  */
2494 int ib_modify_port(struct ib_device *device,
2495 		   u32 port_num, int port_modify_mask,
2496 		   struct ib_port_modify *port_modify)
2497 {
2498 	int rc;
2499 
2500 	if (!rdma_is_port_valid(device, port_num))
2501 		return -EINVAL;
2502 
2503 	if (device->ops.modify_port)
2504 		rc = device->ops.modify_port(device, port_num,
2505 					     port_modify_mask,
2506 					     port_modify);
2507 	else if (rdma_protocol_roce(device, port_num) &&
2508 		 ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
2509 		  (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
2510 		rc = 0;
2511 	else
2512 		rc = -EOPNOTSUPP;
2513 	return rc;
2514 }
2515 EXPORT_SYMBOL(ib_modify_port);
2516 
2517 /**
2518  * ib_find_gid - Returns the port number and GID table index where
2519  *   a specified GID value occurs. Its searches only for IB link layer.
2520  * @device: The device to query.
2521  * @gid: The GID value to search for.
2522  * @port_num: The port number of the device where the GID value was found.
2523  * @index: The index into the GID table where the GID was found.  This
2524  *   parameter may be NULL.
2525  */
2526 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2527 		u32 *port_num, u16 *index)
2528 {
2529 	union ib_gid tmp_gid;
2530 	u32 port;
2531 	int ret, i;
2532 
2533 	rdma_for_each_port (device, port) {
2534 		if (!rdma_protocol_ib(device, port))
2535 			continue;
2536 
2537 		for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
2538 		     ++i) {
2539 			ret = rdma_query_gid(device, port, i, &tmp_gid);
2540 			if (ret)
2541 				continue;
2542 
2543 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
2544 				*port_num = port;
2545 				if (index)
2546 					*index = i;
2547 				return 0;
2548 			}
2549 		}
2550 	}
2551 
2552 	return -ENOENT;
2553 }
2554 EXPORT_SYMBOL(ib_find_gid);
2555 
2556 /**
2557  * ib_find_pkey - Returns the PKey table index where a specified
2558  *   PKey value occurs.
2559  * @device: The device to query.
2560  * @port_num: The port number of the device to search for the PKey.
2561  * @pkey: The PKey value to search for.
2562  * @index: The index into the PKey table where the PKey was found.
2563  */
2564 int ib_find_pkey(struct ib_device *device,
2565 		 u32 port_num, u16 pkey, u16 *index)
2566 {
2567 	int ret, i;
2568 	u16 tmp_pkey;
2569 	int partial_ix = -1;
2570 
2571 	for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
2572 	     ++i) {
2573 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
2574 		if (ret)
2575 			return ret;
2576 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
2577 			/* if there is full-member pkey take it.*/
2578 			if (tmp_pkey & 0x8000) {
2579 				*index = i;
2580 				return 0;
2581 			}
2582 			if (partial_ix < 0)
2583 				partial_ix = i;
2584 		}
2585 	}
2586 
2587 	/*no full-member, if exists take the limited*/
2588 	if (partial_ix >= 0) {
2589 		*index = partial_ix;
2590 		return 0;
2591 	}
2592 	return -ENOENT;
2593 }
2594 EXPORT_SYMBOL(ib_find_pkey);
2595 
2596 /**
2597  * ib_get_net_dev_by_params() - Return the appropriate net_dev
2598  * for a received CM request
2599  * @dev:	An RDMA device on which the request has been received.
2600  * @port:	Port number on the RDMA device.
2601  * @pkey:	The Pkey the request came on.
2602  * @gid:	A GID that the net_dev uses to communicate.
2603  * @addr:	Contains the IP address that the request specified as its
2604  *		destination.
2605  *
2606  */
2607 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
2608 					    u32 port,
2609 					    u16 pkey,
2610 					    const union ib_gid *gid,
2611 					    const struct sockaddr *addr)
2612 {
2613 	struct net_device *net_dev = NULL;
2614 	unsigned long index;
2615 	void *client_data;
2616 
2617 	if (!rdma_protocol_ib(dev, port))
2618 		return NULL;
2619 
2620 	/*
2621 	 * Holding the read side guarantees that the client will not become
2622 	 * unregistered while we are calling get_net_dev_by_params()
2623 	 */
2624 	down_read(&dev->client_data_rwsem);
2625 	xan_for_each_marked (&dev->client_data, index, client_data,
2626 			     CLIENT_DATA_REGISTERED) {
2627 		struct ib_client *client = xa_load(&clients, index);
2628 
2629 		if (!client || !client->get_net_dev_by_params)
2630 			continue;
2631 
2632 		net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
2633 							addr, client_data);
2634 		if (net_dev)
2635 			break;
2636 	}
2637 	up_read(&dev->client_data_rwsem);
2638 
2639 	return net_dev;
2640 }
2641 EXPORT_SYMBOL(ib_get_net_dev_by_params);
2642 
2643 void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2644 {
2645 	struct ib_device_ops *dev_ops = &dev->ops;
2646 #define SET_DEVICE_OP(ptr, name)                                               \
2647 	do {                                                                   \
2648 		if (ops->name)                                                 \
2649 			if (!((ptr)->name))				       \
2650 				(ptr)->name = ops->name;                       \
2651 	} while (0)
2652 
2653 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2654 
2655 	if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
2656 		WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
2657 			dev_ops->driver_id != ops->driver_id);
2658 		dev_ops->driver_id = ops->driver_id;
2659 	}
2660 	if (ops->owner) {
2661 		WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
2662 		dev_ops->owner = ops->owner;
2663 	}
2664 	if (ops->uverbs_abi_ver)
2665 		dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
2666 
2667 	dev_ops->uverbs_no_driver_id_binding |=
2668 		ops->uverbs_no_driver_id_binding;
2669 
2670 	SET_DEVICE_OP(dev_ops, add_gid);
2671 	SET_DEVICE_OP(dev_ops, add_sub_dev);
2672 	SET_DEVICE_OP(dev_ops, advise_mr);
2673 	SET_DEVICE_OP(dev_ops, alloc_dm);
2674 	SET_DEVICE_OP(dev_ops, alloc_hw_device_stats);
2675 	SET_DEVICE_OP(dev_ops, alloc_hw_port_stats);
2676 	SET_DEVICE_OP(dev_ops, alloc_mr);
2677 	SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
2678 	SET_DEVICE_OP(dev_ops, alloc_mw);
2679 	SET_DEVICE_OP(dev_ops, alloc_pd);
2680 	SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
2681 	SET_DEVICE_OP(dev_ops, alloc_ucontext);
2682 	SET_DEVICE_OP(dev_ops, alloc_xrcd);
2683 	SET_DEVICE_OP(dev_ops, attach_mcast);
2684 	SET_DEVICE_OP(dev_ops, check_mr_status);
2685 	SET_DEVICE_OP(dev_ops, counter_alloc_stats);
2686 	SET_DEVICE_OP(dev_ops, counter_bind_qp);
2687 	SET_DEVICE_OP(dev_ops, counter_dealloc);
2688 	SET_DEVICE_OP(dev_ops, counter_init);
2689 	SET_DEVICE_OP(dev_ops, counter_unbind_qp);
2690 	SET_DEVICE_OP(dev_ops, counter_update_stats);
2691 	SET_DEVICE_OP(dev_ops, create_ah);
2692 	SET_DEVICE_OP(dev_ops, create_counters);
2693 	SET_DEVICE_OP(dev_ops, create_cq);
2694 	SET_DEVICE_OP(dev_ops, create_flow);
2695 	SET_DEVICE_OP(dev_ops, create_qp);
2696 	SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
2697 	SET_DEVICE_OP(dev_ops, create_srq);
2698 	SET_DEVICE_OP(dev_ops, create_user_ah);
2699 	SET_DEVICE_OP(dev_ops, create_wq);
2700 	SET_DEVICE_OP(dev_ops, dealloc_dm);
2701 	SET_DEVICE_OP(dev_ops, dealloc_driver);
2702 	SET_DEVICE_OP(dev_ops, dealloc_mw);
2703 	SET_DEVICE_OP(dev_ops, dealloc_pd);
2704 	SET_DEVICE_OP(dev_ops, dealloc_ucontext);
2705 	SET_DEVICE_OP(dev_ops, dealloc_xrcd);
2706 	SET_DEVICE_OP(dev_ops, del_gid);
2707 	SET_DEVICE_OP(dev_ops, del_sub_dev);
2708 	SET_DEVICE_OP(dev_ops, dereg_mr);
2709 	SET_DEVICE_OP(dev_ops, destroy_ah);
2710 	SET_DEVICE_OP(dev_ops, destroy_counters);
2711 	SET_DEVICE_OP(dev_ops, destroy_cq);
2712 	SET_DEVICE_OP(dev_ops, destroy_flow);
2713 	SET_DEVICE_OP(dev_ops, destroy_flow_action);
2714 	SET_DEVICE_OP(dev_ops, destroy_qp);
2715 	SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
2716 	SET_DEVICE_OP(dev_ops, destroy_srq);
2717 	SET_DEVICE_OP(dev_ops, destroy_wq);
2718 	SET_DEVICE_OP(dev_ops, device_group);
2719 	SET_DEVICE_OP(dev_ops, detach_mcast);
2720 	SET_DEVICE_OP(dev_ops, disassociate_ucontext);
2721 	SET_DEVICE_OP(dev_ops, drain_rq);
2722 	SET_DEVICE_OP(dev_ops, drain_sq);
2723 	SET_DEVICE_OP(dev_ops, enable_driver);
2724 	SET_DEVICE_OP(dev_ops, fill_res_cm_id_entry);
2725 	SET_DEVICE_OP(dev_ops, fill_res_cq_entry);
2726 	SET_DEVICE_OP(dev_ops, fill_res_cq_entry_raw);
2727 	SET_DEVICE_OP(dev_ops, fill_res_mr_entry);
2728 	SET_DEVICE_OP(dev_ops, fill_res_mr_entry_raw);
2729 	SET_DEVICE_OP(dev_ops, fill_res_qp_entry);
2730 	SET_DEVICE_OP(dev_ops, fill_res_qp_entry_raw);
2731 	SET_DEVICE_OP(dev_ops, fill_res_srq_entry);
2732 	SET_DEVICE_OP(dev_ops, fill_res_srq_entry_raw);
2733 	SET_DEVICE_OP(dev_ops, fill_stat_mr_entry);
2734 	SET_DEVICE_OP(dev_ops, get_dev_fw_str);
2735 	SET_DEVICE_OP(dev_ops, get_dma_mr);
2736 	SET_DEVICE_OP(dev_ops, get_hw_stats);
2737 	SET_DEVICE_OP(dev_ops, get_link_layer);
2738 	SET_DEVICE_OP(dev_ops, get_netdev);
2739 	SET_DEVICE_OP(dev_ops, get_numa_node);
2740 	SET_DEVICE_OP(dev_ops, get_port_immutable);
2741 	SET_DEVICE_OP(dev_ops, get_vector_affinity);
2742 	SET_DEVICE_OP(dev_ops, get_vf_config);
2743 	SET_DEVICE_OP(dev_ops, get_vf_guid);
2744 	SET_DEVICE_OP(dev_ops, get_vf_stats);
2745 	SET_DEVICE_OP(dev_ops, iw_accept);
2746 	SET_DEVICE_OP(dev_ops, iw_add_ref);
2747 	SET_DEVICE_OP(dev_ops, iw_connect);
2748 	SET_DEVICE_OP(dev_ops, iw_create_listen);
2749 	SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2750 	SET_DEVICE_OP(dev_ops, iw_get_qp);
2751 	SET_DEVICE_OP(dev_ops, iw_reject);
2752 	SET_DEVICE_OP(dev_ops, iw_rem_ref);
2753 	SET_DEVICE_OP(dev_ops, map_mr_sg);
2754 	SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
2755 	SET_DEVICE_OP(dev_ops, mmap);
2756 	SET_DEVICE_OP(dev_ops, mmap_free);
2757 	SET_DEVICE_OP(dev_ops, modify_ah);
2758 	SET_DEVICE_OP(dev_ops, modify_cq);
2759 	SET_DEVICE_OP(dev_ops, modify_device);
2760 	SET_DEVICE_OP(dev_ops, modify_hw_stat);
2761 	SET_DEVICE_OP(dev_ops, modify_port);
2762 	SET_DEVICE_OP(dev_ops, modify_qp);
2763 	SET_DEVICE_OP(dev_ops, modify_srq);
2764 	SET_DEVICE_OP(dev_ops, modify_wq);
2765 	SET_DEVICE_OP(dev_ops, peek_cq);
2766 	SET_DEVICE_OP(dev_ops, poll_cq);
2767 	SET_DEVICE_OP(dev_ops, port_groups);
2768 	SET_DEVICE_OP(dev_ops, post_recv);
2769 	SET_DEVICE_OP(dev_ops, post_send);
2770 	SET_DEVICE_OP(dev_ops, post_srq_recv);
2771 	SET_DEVICE_OP(dev_ops, process_mad);
2772 	SET_DEVICE_OP(dev_ops, query_ah);
2773 	SET_DEVICE_OP(dev_ops, query_device);
2774 	SET_DEVICE_OP(dev_ops, query_gid);
2775 	SET_DEVICE_OP(dev_ops, query_pkey);
2776 	SET_DEVICE_OP(dev_ops, query_port);
2777 	SET_DEVICE_OP(dev_ops, query_qp);
2778 	SET_DEVICE_OP(dev_ops, query_srq);
2779 	SET_DEVICE_OP(dev_ops, query_ucontext);
2780 	SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
2781 	SET_DEVICE_OP(dev_ops, read_counters);
2782 	SET_DEVICE_OP(dev_ops, reg_dm_mr);
2783 	SET_DEVICE_OP(dev_ops, reg_user_mr);
2784 	SET_DEVICE_OP(dev_ops, reg_user_mr_dmabuf);
2785 	SET_DEVICE_OP(dev_ops, req_notify_cq);
2786 	SET_DEVICE_OP(dev_ops, rereg_user_mr);
2787 	SET_DEVICE_OP(dev_ops, resize_cq);
2788 	SET_DEVICE_OP(dev_ops, set_vf_guid);
2789 	SET_DEVICE_OP(dev_ops, set_vf_link_state);
2790 	SET_DEVICE_OP(dev_ops, ufile_hw_cleanup);
2791 	SET_DEVICE_OP(dev_ops, report_port_event);
2792 
2793 	SET_OBJ_SIZE(dev_ops, ib_ah);
2794 	SET_OBJ_SIZE(dev_ops, ib_counters);
2795 	SET_OBJ_SIZE(dev_ops, ib_cq);
2796 	SET_OBJ_SIZE(dev_ops, ib_mw);
2797 	SET_OBJ_SIZE(dev_ops, ib_pd);
2798 	SET_OBJ_SIZE(dev_ops, ib_qp);
2799 	SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
2800 	SET_OBJ_SIZE(dev_ops, ib_srq);
2801 	SET_OBJ_SIZE(dev_ops, ib_ucontext);
2802 	SET_OBJ_SIZE(dev_ops, ib_xrcd);
2803 	SET_OBJ_SIZE(dev_ops, rdma_counter);
2804 }
2805 EXPORT_SYMBOL(ib_set_device_ops);
2806 
2807 int ib_add_sub_device(struct ib_device *parent,
2808 		      enum rdma_nl_dev_type type,
2809 		      const char *name)
2810 {
2811 	struct ib_device *sub;
2812 	int ret = 0;
2813 
2814 	if (!parent->ops.add_sub_dev || !parent->ops.del_sub_dev)
2815 		return -EOPNOTSUPP;
2816 
2817 	if (!ib_device_try_get(parent))
2818 		return -EINVAL;
2819 
2820 	sub = parent->ops.add_sub_dev(parent, type, name);
2821 	if (IS_ERR(sub)) {
2822 		ib_device_put(parent);
2823 		return PTR_ERR(sub);
2824 	}
2825 
2826 	sub->type = type;
2827 	sub->parent = parent;
2828 
2829 	mutex_lock(&parent->subdev_lock);
2830 	list_add_tail(&parent->subdev_list_head, &sub->subdev_list);
2831 	mutex_unlock(&parent->subdev_lock);
2832 
2833 	return ret;
2834 }
2835 EXPORT_SYMBOL(ib_add_sub_device);
2836 
2837 int ib_del_sub_device_and_put(struct ib_device *sub)
2838 {
2839 	struct ib_device *parent = sub->parent;
2840 
2841 	if (!parent)
2842 		return -EOPNOTSUPP;
2843 
2844 	mutex_lock(&parent->subdev_lock);
2845 	list_del(&sub->subdev_list);
2846 	mutex_unlock(&parent->subdev_lock);
2847 
2848 	ib_device_put(sub);
2849 	parent->ops.del_sub_dev(sub);
2850 	ib_device_put(parent);
2851 
2852 	return 0;
2853 }
2854 EXPORT_SYMBOL(ib_del_sub_device_and_put);
2855 
2856 #ifdef CONFIG_INFINIBAND_VIRT_DMA
2857 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents)
2858 {
2859 	struct scatterlist *s;
2860 	int i;
2861 
2862 	for_each_sg(sg, s, nents, i) {
2863 		sg_dma_address(s) = (uintptr_t)sg_virt(s);
2864 		sg_dma_len(s) = s->length;
2865 	}
2866 	return nents;
2867 }
2868 EXPORT_SYMBOL(ib_dma_virt_map_sg);
2869 #endif /* CONFIG_INFINIBAND_VIRT_DMA */
2870 
2871 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
2872 	[RDMA_NL_LS_OP_RESOLVE] = {
2873 		.doit = ib_nl_handle_resolve_resp,
2874 		.flags = RDMA_NL_ADMIN_PERM,
2875 	},
2876 	[RDMA_NL_LS_OP_SET_TIMEOUT] = {
2877 		.doit = ib_nl_handle_set_timeout,
2878 		.flags = RDMA_NL_ADMIN_PERM,
2879 	},
2880 	[RDMA_NL_LS_OP_IP_RESOLVE] = {
2881 		.doit = ib_nl_handle_ip_res_resp,
2882 		.flags = RDMA_NL_ADMIN_PERM,
2883 	},
2884 };
2885 
2886 void ib_dispatch_port_state_event(struct ib_device *ibdev, struct net_device *ndev)
2887 {
2888 	enum ib_port_state curr_state;
2889 	struct ib_event ibevent = {};
2890 	u32 port;
2891 
2892 	if (ib_query_netdev_port(ibdev, ndev, &port))
2893 		return;
2894 
2895 	curr_state = ib_get_curr_port_state(ndev);
2896 
2897 	write_lock_irq(&ibdev->cache_lock);
2898 	if (ibdev->port_data[port].cache.last_port_state == curr_state) {
2899 		write_unlock_irq(&ibdev->cache_lock);
2900 		return;
2901 	}
2902 	ibdev->port_data[port].cache.last_port_state = curr_state;
2903 	write_unlock_irq(&ibdev->cache_lock);
2904 
2905 	ibevent.event = (curr_state == IB_PORT_DOWN) ?
2906 					IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
2907 	ibevent.device = ibdev;
2908 	ibevent.element.port_num = port;
2909 	ib_dispatch_event(&ibevent);
2910 }
2911 EXPORT_SYMBOL(ib_dispatch_port_state_event);
2912 
2913 static void handle_port_event(struct net_device *ndev, unsigned long event)
2914 {
2915 	struct ib_device *ibdev;
2916 
2917 	/* Currently, link events in bonding scenarios are still
2918 	 * reported by drivers that support bonding.
2919 	 */
2920 	if (netif_is_lag_master(ndev) || netif_is_lag_port(ndev))
2921 		return;
2922 
2923 	ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
2924 	if (!ibdev)
2925 		return;
2926 
2927 	if (ibdev->ops.report_port_event) {
2928 		ibdev->ops.report_port_event(ibdev, ndev, event);
2929 		goto put_ibdev;
2930 	}
2931 
2932 	ib_dispatch_port_state_event(ibdev, ndev);
2933 
2934 put_ibdev:
2935 	ib_device_put(ibdev);
2936 };
2937 
2938 static int ib_netdevice_event(struct notifier_block *this,
2939 			      unsigned long event, void *ptr)
2940 {
2941 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2942 	struct ib_device *ibdev;
2943 	u32 port;
2944 
2945 	switch (event) {
2946 	case NETDEV_CHANGENAME:
2947 		ibdev = ib_device_get_by_netdev(ndev, RDMA_DRIVER_UNKNOWN);
2948 		if (!ibdev)
2949 			return NOTIFY_DONE;
2950 
2951 		if (ib_query_netdev_port(ibdev, ndev, &port)) {
2952 			ib_device_put(ibdev);
2953 			break;
2954 		}
2955 
2956 		rdma_nl_notify_event(ibdev, port, RDMA_NETDEV_RENAME_EVENT);
2957 		ib_device_put(ibdev);
2958 		break;
2959 
2960 	case NETDEV_UP:
2961 	case NETDEV_CHANGE:
2962 	case NETDEV_DOWN:
2963 		handle_port_event(ndev, event);
2964 		break;
2965 
2966 	default:
2967 		break;
2968 	}
2969 
2970 	return NOTIFY_DONE;
2971 }
2972 
2973 static struct notifier_block nb_netdevice = {
2974 	.notifier_call = ib_netdevice_event,
2975 };
2976 
2977 static int __init ib_core_init(void)
2978 {
2979 	int ret = -ENOMEM;
2980 
2981 	ib_wq = alloc_workqueue("infiniband", 0, 0);
2982 	if (!ib_wq)
2983 		return -ENOMEM;
2984 
2985 	ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
2986 				      WQ_UNBOUND_MAX_ACTIVE);
2987 	if (!ib_unreg_wq)
2988 		goto err;
2989 
2990 	ib_comp_wq = alloc_workqueue("ib-comp-wq",
2991 			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
2992 	if (!ib_comp_wq)
2993 		goto err_unbound;
2994 
2995 	ib_comp_unbound_wq =
2996 		alloc_workqueue("ib-comp-unb-wq",
2997 				WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
2998 				WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
2999 	if (!ib_comp_unbound_wq)
3000 		goto err_comp;
3001 
3002 	ret = class_register(&ib_class);
3003 	if (ret) {
3004 		pr_warn("Couldn't create InfiniBand device class\n");
3005 		goto err_comp_unbound;
3006 	}
3007 
3008 	rdma_nl_init();
3009 
3010 	ret = addr_init();
3011 	if (ret) {
3012 		pr_warn("Couldn't init IB address resolution\n");
3013 		goto err_ibnl;
3014 	}
3015 
3016 	ret = ib_mad_init();
3017 	if (ret) {
3018 		pr_warn("Couldn't init IB MAD\n");
3019 		goto err_addr;
3020 	}
3021 
3022 	ret = ib_sa_init();
3023 	if (ret) {
3024 		pr_warn("Couldn't init SA\n");
3025 		goto err_mad;
3026 	}
3027 
3028 	ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
3029 	if (ret) {
3030 		pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
3031 		goto err_sa;
3032 	}
3033 
3034 	ret = register_pernet_device(&rdma_dev_net_ops);
3035 	if (ret) {
3036 		pr_warn("Couldn't init compat dev. ret %d\n", ret);
3037 		goto err_compat;
3038 	}
3039 
3040 	nldev_init();
3041 	rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
3042 	ret = roce_gid_mgmt_init();
3043 	if (ret) {
3044 		pr_warn("Couldn't init RoCE GID management\n");
3045 		goto err_parent;
3046 	}
3047 
3048 	register_netdevice_notifier(&nb_netdevice);
3049 
3050 	return 0;
3051 
3052 err_parent:
3053 	rdma_nl_unregister(RDMA_NL_LS);
3054 	nldev_exit();
3055 	unregister_pernet_device(&rdma_dev_net_ops);
3056 err_compat:
3057 	unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
3058 err_sa:
3059 	ib_sa_cleanup();
3060 err_mad:
3061 	ib_mad_cleanup();
3062 err_addr:
3063 	addr_cleanup();
3064 err_ibnl:
3065 	class_unregister(&ib_class);
3066 err_comp_unbound:
3067 	destroy_workqueue(ib_comp_unbound_wq);
3068 err_comp:
3069 	destroy_workqueue(ib_comp_wq);
3070 err_unbound:
3071 	destroy_workqueue(ib_unreg_wq);
3072 err:
3073 	destroy_workqueue(ib_wq);
3074 	return ret;
3075 }
3076 
3077 static void __exit ib_core_cleanup(void)
3078 {
3079 	unregister_netdevice_notifier(&nb_netdevice);
3080 	roce_gid_mgmt_cleanup();
3081 	rdma_nl_unregister(RDMA_NL_LS);
3082 	nldev_exit();
3083 	unregister_pernet_device(&rdma_dev_net_ops);
3084 	unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
3085 	ib_sa_cleanup();
3086 	ib_mad_cleanup();
3087 	addr_cleanup();
3088 	rdma_nl_exit();
3089 	class_unregister(&ib_class);
3090 	destroy_workqueue(ib_comp_unbound_wq);
3091 	destroy_workqueue(ib_comp_wq);
3092 	/* Make sure that any pending umem accounting work is done. */
3093 	destroy_workqueue(ib_wq);
3094 	destroy_workqueue(ib_unreg_wq);
3095 	WARN_ON(!xa_empty(&clients));
3096 	WARN_ON(!xa_empty(&devices));
3097 }
3098 
3099 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
3100 
3101 /* ib core relies on netdev stack to first register net_ns_type_operations
3102  * ns kobject type before ib_core initialization.
3103  */
3104 fs_initcall(ib_core_init);
3105 module_exit(ib_core_cleanup);
3106