1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 #ifndef _NET_NETDEV_LOCK_H
4 #define _NET_NETDEV_LOCK_H
5
6 #include <linux/lockdep.h>
7 #include <linux/netdevice.h>
8 #include <linux/rtnetlink.h>
9
netdev_trylock(struct net_device * dev)10 static inline bool netdev_trylock(struct net_device *dev)
11 {
12 return mutex_trylock(&dev->lock);
13 }
14
netdev_assert_locked(const struct net_device * dev)15 static inline void netdev_assert_locked(const struct net_device *dev)
16 {
17 lockdep_assert_held(&dev->lock);
18 }
19
20 static inline void
netdev_assert_locked_or_invisible(const struct net_device * dev)21 netdev_assert_locked_or_invisible(const struct net_device *dev)
22 {
23 if (dev->reg_state == NETREG_REGISTERED ||
24 dev->reg_state == NETREG_UNREGISTERING)
25 netdev_assert_locked(dev);
26 }
27
netdev_need_ops_lock(const struct net_device * dev)28 static inline bool netdev_need_ops_lock(const struct net_device *dev)
29 {
30 bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops;
31
32 #if IS_ENABLED(CONFIG_NET_SHAPER)
33 ret |= !!dev->netdev_ops->net_shaper_ops;
34 #endif
35
36 return ret;
37 }
38
netdev_lock_ops(struct net_device * dev)39 static inline void netdev_lock_ops(struct net_device *dev)
40 {
41 if (netdev_need_ops_lock(dev))
42 netdev_lock(dev);
43 }
44
netdev_unlock_ops(struct net_device * dev)45 static inline void netdev_unlock_ops(struct net_device *dev)
46 {
47 if (netdev_need_ops_lock(dev))
48 netdev_unlock(dev);
49 }
50
netdev_ops_assert_locked(const struct net_device * dev)51 static inline void netdev_ops_assert_locked(const struct net_device *dev)
52 {
53 if (netdev_need_ops_lock(dev))
54 lockdep_assert_held(&dev->lock);
55 else
56 ASSERT_RTNL();
57 }
58
59 static inline void
netdev_ops_assert_locked_or_invisible(const struct net_device * dev)60 netdev_ops_assert_locked_or_invisible(const struct net_device *dev)
61 {
62 if (dev->reg_state == NETREG_REGISTERED ||
63 dev->reg_state == NETREG_UNREGISTERING)
64 netdev_ops_assert_locked(dev);
65 }
66
netdev_lock_cmp_fn(const struct lockdep_map * a,const struct lockdep_map * b)67 static inline int netdev_lock_cmp_fn(const struct lockdep_map *a,
68 const struct lockdep_map *b)
69 {
70 /* Only lower devices currently grab the instance lock, so no
71 * real ordering issues can occur. In the near future, only
72 * hardware devices will grab instance lock which also does not
73 * involve any ordering. Suppress lockdep ordering warnings
74 * until (if) we start grabbing instance lock on pure SW
75 * devices (bond/team/veth/etc).
76 */
77 if (a == b)
78 return 0;
79 return -1;
80 }
81
82 #define netdev_lockdep_set_classes(dev) \
83 { \
84 static struct lock_class_key qdisc_tx_busylock_key; \
85 static struct lock_class_key qdisc_xmit_lock_key; \
86 static struct lock_class_key dev_addr_list_lock_key; \
87 static struct lock_class_key dev_instance_lock_key; \
88 unsigned int i; \
89 \
90 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
91 lockdep_set_class(&(dev)->addr_list_lock, \
92 &dev_addr_list_lock_key); \
93 lockdep_set_class(&(dev)->lock, \
94 &dev_instance_lock_key); \
95 lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \
96 for (i = 0; i < (dev)->num_tx_queues; i++) \
97 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
98 &qdisc_xmit_lock_key); \
99 }
100
101 #define netdev_lock_dereference(p, dev) \
102 rcu_dereference_protected(p, lockdep_is_held(&(dev)->lock))
103
104 int netdev_debug_event(struct notifier_block *nb, unsigned long event,
105 void *ptr);
106
107 #endif
108