1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _NET_CORE_DEV_H
3 #define _NET_CORE_DEV_H
4
5 #include <linux/cleanup.h>
6 #include <linux/types.h>
7 #include <linux/rwsem.h>
8 #include <linux/netdevice.h>
9 #include <net/netdev_lock.h>
10
11 struct net;
12 struct netlink_ext_ack;
13 struct netdev_queue_config;
14 struct cpumask;
15 struct pp_memory_provider_params;
16
17 /* Random bits of netdevice that don't need to be exposed */
18 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
19 struct sd_flow_limit {
20 struct rcu_head rcu;
21 unsigned int count;
22 u8 log_buckets;
23 unsigned int history_head;
24 u16 history[FLOW_LIMIT_HISTORY];
25 u8 buckets[];
26 };
27
28 extern int netdev_flow_limit_table_len;
29
30 struct napi_struct *
31 netdev_napi_by_id_lock(struct net *net, unsigned int napi_id);
32 struct net_device *dev_get_by_napi_id(unsigned int napi_id);
33
34 struct net_device *netdev_put_lock(struct net_device *dev, struct net *net,
35 netdevice_tracker *tracker);
36
37 static inline struct net_device *
__netdev_put_lock(struct net_device * dev,struct net * net)38 __netdev_put_lock(struct net_device *dev, struct net *net)
39 {
40 return netdev_put_lock(dev, net, NULL);
41 }
42
43 struct net_device *
44 netdev_xa_find_lock(struct net *net, struct net_device *dev,
45 unsigned long *index);
46
47 DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T));
48
49 #define for_each_netdev_lock_scoped(net, var_name, ifindex) \
50 for (struct net_device *var_name __free(netdev_unlock) = NULL; \
51 (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \
52 ifindex++)
53
54 struct net_device *
55 netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex);
56 struct net_device *
57 netdev_xa_find_lock_ops_compat(struct net *net, struct net_device *dev,
58 unsigned long *index);
59
60 DEFINE_FREE(netdev_unlock_ops_compat, struct net_device *,
61 if (_T) netdev_unlock_ops_compat(_T));
62
63 #define for_each_netdev_lock_ops_compat_scoped(net, var_name, ifindex) \
64 for (struct net_device *var_name __free(netdev_unlock_ops_compat) = NULL; \
65 (var_name = netdev_xa_find_lock_ops_compat(net, var_name, \
66 &ifindex)); \
67 ifindex++)
68
69 #ifdef CONFIG_PROC_FS
70 int __init dev_proc_init(void);
71 #else
72 #define dev_proc_init() 0
73 #endif
74
75 void linkwatch_init_dev(struct net_device *dev);
76 void linkwatch_run_queue(void);
77
78 void dev_addr_flush(struct net_device *dev);
79 int dev_addr_init(struct net_device *dev);
80 void dev_addr_check(struct net_device *dev);
81
82 #if IS_ENABLED(CONFIG_NET_SHAPER)
83 void net_shaper_flush_netdev(struct net_device *dev);
84 void net_shaper_set_real_num_tx_queues(struct net_device *dev,
85 unsigned int txq);
86 #else
net_shaper_flush_netdev(struct net_device * dev)87 static inline void net_shaper_flush_netdev(struct net_device *dev) {}
net_shaper_set_real_num_tx_queues(struct net_device * dev,unsigned int txq)88 static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev,
89 unsigned int txq) {}
90 #endif
91
92 /* sysctls not referred to from outside net/core/ */
93 extern int netdev_unregister_timeout_secs;
94 extern int weight_p;
95 extern int dev_weight_rx_bias;
96 extern int dev_weight_tx_bias;
97
98 extern struct rw_semaphore dev_addr_sem;
99
100 /* rtnl helpers */
101 extern struct list_head net_todo_list;
102 void netdev_run_todo(void);
103
104 int netdev_queue_config_validate(struct net_device *dev, int rxq_idx,
105 struct netdev_queue_config *qcfg,
106 struct netlink_ext_ack *extack);
107
108 bool netif_rxq_has_mp(struct net_device *dev, unsigned int rxq_idx);
109 bool netif_rxq_is_leased(struct net_device *dev, unsigned int rxq_idx);
110 bool netif_is_queue_leasee(const struct net_device *dev);
111
112 void __netif_mp_uninstall_rxq(struct netdev_rx_queue *rxq,
113 const struct pp_memory_provider_params *p);
114
115 void netif_rxq_cleanup_unlease(struct netdev_rx_queue *phys_rxq,
116 struct netdev_rx_queue *virt_rxq);
117
118 /* netdev management, shared between various uAPI entry points */
119 struct netdev_name_node {
120 struct hlist_node hlist;
121 struct list_head list;
122 struct net_device *dev;
123 const char *name;
124 struct rcu_head rcu;
125 };
126
127 int netdev_get_name(struct net *net, char *name, int ifindex);
128 int netif_change_name(struct net_device *dev, const char *newname);
129 int dev_change_name(struct net_device *dev, const char *newname);
130
131 #define netdev_for_each_altname(dev, namenode) \
132 list_for_each_entry((namenode), &(dev)->name_node->list, list)
133 #define netdev_for_each_altname_safe(dev, namenode, next) \
134 list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
135 list)
136
137 int netdev_name_node_alt_create(struct net_device *dev, const char *name);
138 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
139
140 int dev_validate_mtu(struct net_device *dev, int mtu,
141 struct netlink_ext_ack *extack);
142 int netif_set_mtu_ext(struct net_device *dev, int new_mtu,
143 struct netlink_ext_ack *extack);
144
145 int dev_get_phys_port_id(struct net_device *dev,
146 struct netdev_phys_item_id *ppid);
147 int dev_get_phys_port_name(struct net_device *dev,
148 char *name, size_t len);
149
150 int netif_change_proto_down(struct net_device *dev, bool proto_down);
151 int dev_change_proto_down(struct net_device *dev, bool proto_down);
152 void netdev_change_proto_down_reason_locked(struct net_device *dev,
153 unsigned long mask, u32 value);
154
155 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
156 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
157 int fd, int expected_fd, u32 flags);
158
159 int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
160 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
161 void netif_set_group(struct net_device *dev, int new_group);
162 void dev_set_group(struct net_device *dev, int new_group);
163 int netif_change_carrier(struct net_device *dev, bool new_carrier);
164 int dev_change_carrier(struct net_device *dev, bool new_carrier);
165
166 void __dev_set_rx_mode(struct net_device *dev);
167
168 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
169 unsigned int gchanges, u32 portid,
170 const struct nlmsghdr *nlh);
171
172 void unregister_netdevice_many_notify(struct list_head *head,
173 u32 portid, const struct nlmsghdr *nlh);
174
netif_set_up(struct net_device * dev,bool value)175 static inline void netif_set_up(struct net_device *dev, bool value)
176 {
177 if (value)
178 dev->flags |= IFF_UP;
179 else
180 dev->flags &= ~IFF_UP;
181
182 if (!netdev_need_ops_lock(dev))
183 netdev_lock(dev);
184 dev->up = value;
185 if (!netdev_need_ops_lock(dev))
186 netdev_unlock(dev);
187 }
188
netif_set_gso_max_size(struct net_device * dev,unsigned int size)189 static inline void netif_set_gso_max_size(struct net_device *dev,
190 unsigned int size)
191 {
192 /* dev->gso_max_size is read locklessly from sk_setup_caps() */
193 WRITE_ONCE(dev->gso_max_size, size);
194 if (size <= GSO_LEGACY_MAX_SIZE)
195 WRITE_ONCE(dev->gso_ipv4_max_size, size);
196 }
197
netif_set_gso_max_segs(struct net_device * dev,unsigned int segs)198 static inline void netif_set_gso_max_segs(struct net_device *dev,
199 unsigned int segs)
200 {
201 /* dev->gso_max_segs is read locklessly from sk_setup_caps() */
202 WRITE_ONCE(dev->gso_max_segs, segs);
203 }
204
netif_set_gro_max_size(struct net_device * dev,unsigned int size)205 static inline void netif_set_gro_max_size(struct net_device *dev,
206 unsigned int size)
207 {
208 /* This pairs with the READ_ONCE() in skb_gro_receive() */
209 WRITE_ONCE(dev->gro_max_size, size);
210 if (size <= GRO_LEGACY_MAX_SIZE)
211 WRITE_ONCE(dev->gro_ipv4_max_size, size);
212 }
213
netif_set_gso_ipv4_max_size(struct net_device * dev,unsigned int size)214 static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
215 unsigned int size)
216 {
217 /* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
218 WRITE_ONCE(dev->gso_ipv4_max_size, size);
219 }
220
netif_set_gro_ipv4_max_size(struct net_device * dev,unsigned int size)221 static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
222 unsigned int size)
223 {
224 /* This pairs with the READ_ONCE() in skb_gro_receive() */
225 WRITE_ONCE(dev->gro_ipv4_max_size, size);
226 }
227
228 /**
229 * napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs
230 * @n: napi struct to get the defer_hard_irqs field from
231 *
232 * Return: the per-NAPI value of the defar_hard_irqs field.
233 */
napi_get_defer_hard_irqs(const struct napi_struct * n)234 static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n)
235 {
236 return READ_ONCE(n->defer_hard_irqs);
237 }
238
239 /**
240 * napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi
241 * @n: napi_struct to set the defer_hard_irqs field
242 * @defer: the value the field should be set to
243 */
napi_set_defer_hard_irqs(struct napi_struct * n,u32 defer)244 static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer)
245 {
246 WRITE_ONCE(n->defer_hard_irqs, defer);
247 }
248
249 /**
250 * netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev
251 * @netdev: the net_device for which all NAPIs will have defer_hard_irqs set
252 * @defer: the defer_hard_irqs value to set
253 */
netdev_set_defer_hard_irqs(struct net_device * netdev,u32 defer)254 static inline void netdev_set_defer_hard_irqs(struct net_device *netdev,
255 u32 defer)
256 {
257 unsigned int count = max(netdev->num_rx_queues,
258 netdev->num_tx_queues);
259 struct napi_struct *napi;
260 int i;
261
262 WRITE_ONCE(netdev->napi_defer_hard_irqs, defer);
263 list_for_each_entry(napi, &netdev->napi_list, dev_list)
264 napi_set_defer_hard_irqs(napi, defer);
265
266 for (i = 0; i < count; i++)
267 netdev->napi_config[i].defer_hard_irqs = defer;
268 }
269
270 /**
271 * napi_get_gro_flush_timeout - get the gro_flush_timeout
272 * @n: napi struct to get the gro_flush_timeout from
273 *
274 * Return: the per-NAPI value of the gro_flush_timeout field.
275 */
276 static inline unsigned long
napi_get_gro_flush_timeout(const struct napi_struct * n)277 napi_get_gro_flush_timeout(const struct napi_struct *n)
278 {
279 return READ_ONCE(n->gro_flush_timeout);
280 }
281
282 /**
283 * napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi
284 * @n: napi struct to set the gro_flush_timeout
285 * @timeout: timeout value to set
286 *
287 * napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout
288 */
napi_set_gro_flush_timeout(struct napi_struct * n,unsigned long timeout)289 static inline void napi_set_gro_flush_timeout(struct napi_struct *n,
290 unsigned long timeout)
291 {
292 WRITE_ONCE(n->gro_flush_timeout, timeout);
293 }
294
295 /**
296 * netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs
297 * @netdev: the net_device for which all NAPIs will have gro_flush_timeout set
298 * @timeout: the timeout value to set
299 */
netdev_set_gro_flush_timeout(struct net_device * netdev,unsigned long timeout)300 static inline void netdev_set_gro_flush_timeout(struct net_device *netdev,
301 unsigned long timeout)
302 {
303 unsigned int count = max(netdev->num_rx_queues,
304 netdev->num_tx_queues);
305 struct napi_struct *napi;
306 int i;
307
308 WRITE_ONCE(netdev->gro_flush_timeout, timeout);
309 list_for_each_entry(napi, &netdev->napi_list, dev_list)
310 napi_set_gro_flush_timeout(napi, timeout);
311
312 for (i = 0; i < count; i++)
313 netdev->napi_config[i].gro_flush_timeout = timeout;
314 }
315
316 /**
317 * napi_get_irq_suspend_timeout - get the irq_suspend_timeout
318 * @n: napi struct to get the irq_suspend_timeout from
319 *
320 * Return: the per-NAPI value of the irq_suspend_timeout field.
321 */
322 static inline unsigned long
napi_get_irq_suspend_timeout(const struct napi_struct * n)323 napi_get_irq_suspend_timeout(const struct napi_struct *n)
324 {
325 return READ_ONCE(n->irq_suspend_timeout);
326 }
327
328 /**
329 * napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi
330 * @n: napi struct to set the irq_suspend_timeout
331 * @timeout: timeout value to set
332 *
333 * napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout
334 */
napi_set_irq_suspend_timeout(struct napi_struct * n,unsigned long timeout)335 static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
336 unsigned long timeout)
337 {
338 WRITE_ONCE(n->irq_suspend_timeout, timeout);
339 }
340
napi_get_threaded(struct napi_struct * n)341 static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
342 {
343 if (test_bit(NAPI_STATE_THREADED_BUSY_POLL, &n->state))
344 return NETDEV_NAPI_THREADED_BUSY_POLL;
345
346 if (test_bit(NAPI_STATE_THREADED, &n->state))
347 return NETDEV_NAPI_THREADED_ENABLED;
348
349 return NETDEV_NAPI_THREADED_DISABLED;
350 }
351
352 static inline enum netdev_napi_threaded
napi_get_threaded_config(struct net_device * dev,struct napi_struct * n)353 napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
354 {
355 if (n->config)
356 return n->config->threaded;
357 return dev->threaded;
358 }
359
360 int napi_set_threaded(struct napi_struct *n,
361 enum netdev_napi_threaded threaded);
362
363 int netif_set_threaded(struct net_device *dev,
364 enum netdev_napi_threaded threaded);
365
366 int rps_cpumask_housekeeping(struct cpumask *mask);
367
368 #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
369 void xdp_do_check_flushed(struct napi_struct *napi);
370 #else
xdp_do_check_flushed(struct napi_struct * napi)371 static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
372 #endif
373
374 /* Best effort check that NAPI is not idle (can't be scheduled to run) */
napi_assert_will_not_race(const struct napi_struct * napi)375 static inline void napi_assert_will_not_race(const struct napi_struct *napi)
376 {
377 /* uninitialized instance, can't race */
378 if (!napi->poll_list.next)
379 return;
380
381 /* SCHED bit is set on disabled instances */
382 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state));
383 WARN_ON(READ_ONCE(napi->list_owner) != -1);
384 }
385
386 void kick_defer_list_purge(unsigned int cpu);
387
388 int dev_set_hwtstamp_phylib(struct net_device *dev,
389 struct kernel_hwtstamp_config *cfg,
390 struct netlink_ext_ack *extack);
391 int dev_get_hwtstamp_phylib(struct net_device *dev,
392 struct kernel_hwtstamp_config *cfg);
393 int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg);
394
395 #endif
396