xref: /linux/include/net/netdev_lock.h (revision b803c4a4f78834b31ebfbbcea350473333760559)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef _NET_NETDEV_LOCK_H
4 #define _NET_NETDEV_LOCK_H
5 
6 #include <linux/lockdep.h>
7 #include <linux/netdevice.h>
8 #include <linux/rtnetlink.h>
9 
10 static inline bool netdev_trylock(struct net_device *dev)
11 {
12 	return mutex_trylock(&dev->lock);
13 }
14 
15 static inline void netdev_assert_locked(const struct net_device *dev)
16 {
17 	lockdep_assert_held(&dev->lock);
18 }
19 
20 static inline void
21 netdev_assert_locked_or_invisible(const struct net_device *dev)
22 {
23 	if (dev->reg_state == NETREG_REGISTERED ||
24 	    dev->reg_state == NETREG_UNREGISTERING)
25 		netdev_assert_locked(dev);
26 }
27 
28 static inline bool netdev_need_ops_lock(const struct net_device *dev)
29 {
30 	bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
31 
32 #if IS_ENABLED(CONFIG_NET_SHAPER)
33 	ret |= !!dev->netdev_ops->net_shaper_ops;
34 #endif
35 
36 	return ret;
37 }
38 
39 static inline void netdev_lock_ops(struct net_device *dev)
40 {
41 	if (netdev_need_ops_lock(dev))
42 		netdev_lock(dev);
43 }
44 
45 static inline void netdev_unlock_ops(struct net_device *dev)
46 {
47 	if (netdev_need_ops_lock(dev))
48 		netdev_unlock(dev);
49 }
50 
51 static inline void netdev_lock_ops_to_full(struct net_device *dev)
52 {
53 	if (netdev_need_ops_lock(dev))
54 		netdev_assert_locked(dev);
55 	else
56 		netdev_lock(dev);
57 }
58 
59 static inline void netdev_unlock_full_to_ops(struct net_device *dev)
60 {
61 	if (netdev_need_ops_lock(dev))
62 		netdev_assert_locked(dev);
63 	else
64 		netdev_unlock(dev);
65 }
66 
67 static inline void netdev_ops_assert_locked(const struct net_device *dev)
68 {
69 	if (netdev_need_ops_lock(dev))
70 		lockdep_assert_held(&dev->lock);
71 	else
72 		ASSERT_RTNL();
73 }
74 
75 static inline void
76 netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
77 {
78 	if (dev->reg_state == NETREG_REGISTERED ||
79 	    dev->reg_state == NETREG_UNREGISTERING)
80 		netdev_ops_assert_locked(dev);
81 }
82 
83 static inline void netdev_lock_ops_compat(struct net_device *dev)
84 {
85 	if (netdev_need_ops_lock(dev))
86 		netdev_lock(dev);
87 	else
88 		rtnl_lock();
89 }
90 
91 static inline void netdev_unlock_ops_compat(struct net_device *dev)
92 {
93 	if (netdev_need_ops_lock(dev))
94 		netdev_unlock(dev);
95 	else
96 		rtnl_unlock();
97 }
98 
99 static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
100 				     const struct lockdep_map *b)
101 {
102 	/* Only lower devices currently grab the instance lock, so no
103 	 * real ordering issues can occur. In the near future, only
104 	 * hardware devices will grab instance lock which also does not
105 	 * involve any ordering. Suppress lockdep ordering warnings
106 	 * until (if) we start grabbing instance lock on pure SW
107 	 * devices (bond/team/veth/etc).
108 	 */
109 	if (a == b)
110 		return 0;
111 	return -1;
112 }
113 
114 #define netdev_lockdep_set_classes(dev)				\
115 {								\
116 	static struct lock_class_key qdisc_tx_busylock_key;	\
117 	static struct lock_class_key qdisc_xmit_lock_key;	\
118 	static struct lock_class_key dev_addr_list_lock_key;	\
119 	static struct lock_class_key dev_instance_lock_key;	\
120 	unsigned int i;						\
121 								\
122 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
123 	lockdep_set_class(&(dev)->addr_list_lock,		\
124 			  &dev_addr_list_lock_key);		\
125 	lockdep_set_class(&(dev)->lock,				\
126 			  &dev_instance_lock_key);		\
127 	lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL);	\
128 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
129 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
130 				  &qdisc_xmit_lock_key);	\
131 }
132 
133 int netdev_debug_event(struct notifier_block *nb, unsigned long event,
134 		       void *ptr);
135 
136 #endif
137