1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the Interfaces handler.
8 *
9 * Version: @(#)dev.h 1.0.10 08/12/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
16 * Bjorn Ekwall. <bj0rn@blox.se>
17 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 *
19 * Moved to /usr/include/linux for NET3
20 */
21 #ifndef _LINUX_NETDEVICE_H
22 #define _LINUX_NETDEVICE_H
23
24 #include <linux/timer.h>
25 #include <linux/bug.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28 #include <linux/prefetch.h>
29 #include <asm/cache.h>
30 #include <asm/byteorder.h>
31 #include <asm/local.h>
32
33 #include <linux/percpu.h>
34 #include <linux/rculist.h>
35 #include <linux/workqueue.h>
36 #include <linux/dynamic_queue_limits.h>
37
38 #include <net/net_namespace.h>
39 #ifdef CONFIG_DCB
40 #include <net/dcbnl.h>
41 #endif
42 #include <net/netprio_cgroup.h>
43 #include <linux/netdev_features.h>
44 #include <linux/neighbour.h>
45 #include <linux/netdevice_xmit.h>
46 #include <uapi/linux/netdevice.h>
47 #include <uapi/linux/if_bonding.h>
48 #include <uapi/linux/pkt_cls.h>
49 #include <uapi/linux/netdev.h>
50 #include <linux/hashtable.h>
51 #include <linux/rbtree.h>
52 #include <net/net_trackers.h>
53 #include <net/net_debug.h>
54 #include <net/dropreason-core.h>
55 #include <net/neighbour_tables.h>
56
57 struct netpoll_info;
58 struct device;
59 struct ethtool_ops;
60 struct kernel_hwtstamp_config;
61 struct phy_device;
62 struct dsa_port;
63 struct ip_tunnel_parm_kern;
64 struct macsec_context;
65 struct macsec_ops;
66 struct netdev_config;
67 struct netdev_name_node;
68 struct sd_flow_limit;
69 struct sfp_bus;
70 /* 802.11 specific */
71 struct wireless_dev;
72 /* 802.15.4 specific */
73 struct wpan_dev;
74 struct mpls_dev;
75 /* UDP Tunnel offloads */
76 struct udp_tunnel_info;
77 struct udp_tunnel_nic_info;
78 struct udp_tunnel_nic;
79 struct bpf_prog;
80 struct xdp_buff;
81 struct xdp_frame;
82 struct xdp_metadata_ops;
83 struct xdp_md;
84 struct ethtool_netdev_state;
85 struct phy_link_topology;
86 struct hwtstamp_provider;
87
88 typedef u32 xdp_features_t;
89
90 void synchronize_net(void);
91 void netdev_set_default_ethtool_ops(struct net_device *dev,
92 const struct ethtool_ops *ops);
93 void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
94
95 /* Backlog congestion levels */
96 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
97 #define NET_RX_DROP 1 /* packet dropped */
98
99 #define MAX_NEST_DEV 8
100
101 /*
102 * Transmit return codes: transmit return codes originate from three different
103 * namespaces:
104 *
105 * - qdisc return codes
106 * - driver transmit return codes
107 * - errno values
108 *
109 * Drivers are allowed to return any one of those in their hard_start_xmit()
110 * function. Real network devices commonly used with qdiscs should only return
111 * the driver transmit return codes though - when qdiscs are used, the actual
112 * transmission happens asynchronously, so the value is not propagated to
113 * higher layers. Virtual network devices transmit synchronously; in this case
114 * the driver transmit return codes are consumed by dev_queue_xmit(), and all
115 * others are propagated to higher layers.
116 */
117
118 /* qdisc ->enqueue() return codes. */
119 #define NET_XMIT_SUCCESS 0x00
120 #define NET_XMIT_DROP 0x01 /* skb dropped */
121 #define NET_XMIT_CN 0x02 /* congestion notification */
122 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
123
124 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
125 * indicates that the device will soon be dropping packets, or already drops
126 * some packets of the same priority; prompting us to send less aggressively. */
127 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
128 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
129
130 /* Driver transmit return codes */
131 #define NETDEV_TX_MASK 0xf0
132
133 enum netdev_tx {
134 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
135 NETDEV_TX_OK = 0x00, /* driver took care of packet */
136 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
137 };
138 typedef enum netdev_tx netdev_tx_t;
139
140 /*
141 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
142 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
143 */
dev_xmit_complete(int rc)144 static inline bool dev_xmit_complete(int rc)
145 {
146 /*
147 * Positive cases with an skb consumed by a driver:
148 * - successful transmission (rc == NETDEV_TX_OK)
149 * - error while transmitting (rc < 0)
150 * - error while queueing to a different device (rc & NET_XMIT_MASK)
151 */
152 if (likely(rc < NET_XMIT_MASK))
153 return true;
154
155 return false;
156 }
157
158 /*
159 * Compute the worst-case header length according to the protocols
160 * used.
161 */
162
163 #if defined(CONFIG_HYPERV_NET)
164 # define LL_MAX_HEADER 128
165 #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
166 # if defined(CONFIG_MAC80211_MESH)
167 # define LL_MAX_HEADER 128
168 # else
169 # define LL_MAX_HEADER 96
170 # endif
171 #else
172 # define LL_MAX_HEADER 32
173 #endif
174
175 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
176 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
177 #define MAX_HEADER LL_MAX_HEADER
178 #else
179 #define MAX_HEADER (LL_MAX_HEADER + 48)
180 #endif
181
182 /*
183 * Old network device statistics. Fields are native words
184 * (unsigned long) so they can be read and written atomically.
185 */
186
187 #define NET_DEV_STAT(FIELD) \
188 union { \
189 unsigned long FIELD; \
190 atomic_long_t __##FIELD; \
191 }
192
193 struct net_device_stats {
194 NET_DEV_STAT(rx_packets);
195 NET_DEV_STAT(tx_packets);
196 NET_DEV_STAT(rx_bytes);
197 NET_DEV_STAT(tx_bytes);
198 NET_DEV_STAT(rx_errors);
199 NET_DEV_STAT(tx_errors);
200 NET_DEV_STAT(rx_dropped);
201 NET_DEV_STAT(tx_dropped);
202 NET_DEV_STAT(multicast);
203 NET_DEV_STAT(collisions);
204 NET_DEV_STAT(rx_length_errors);
205 NET_DEV_STAT(rx_over_errors);
206 NET_DEV_STAT(rx_crc_errors);
207 NET_DEV_STAT(rx_frame_errors);
208 NET_DEV_STAT(rx_fifo_errors);
209 NET_DEV_STAT(rx_missed_errors);
210 NET_DEV_STAT(tx_aborted_errors);
211 NET_DEV_STAT(tx_carrier_errors);
212 NET_DEV_STAT(tx_fifo_errors);
213 NET_DEV_STAT(tx_heartbeat_errors);
214 NET_DEV_STAT(tx_window_errors);
215 NET_DEV_STAT(rx_compressed);
216 NET_DEV_STAT(tx_compressed);
217 };
218 #undef NET_DEV_STAT
219
220 /* per-cpu stats, allocated on demand.
221 * Try to fit them in a single cache line, for dev_get_stats() sake.
222 */
223 struct net_device_core_stats {
224 unsigned long rx_dropped;
225 unsigned long tx_dropped;
226 unsigned long rx_nohandler;
227 unsigned long rx_otherhost_dropped;
228 } __aligned(4 * sizeof(unsigned long));
229
230 #include <linux/cache.h>
231 #include <linux/skbuff.h>
232
233 struct neighbour;
234 struct neigh_parms;
235 struct sk_buff;
236
237 struct netdev_hw_addr {
238 struct list_head list;
239 struct rb_node node;
240 unsigned char addr[MAX_ADDR_LEN];
241 unsigned char type;
242 #define NETDEV_HW_ADDR_T_LAN 1
243 #define NETDEV_HW_ADDR_T_SAN 2
244 #define NETDEV_HW_ADDR_T_UNICAST 3
245 #define NETDEV_HW_ADDR_T_MULTICAST 4
246 bool global_use;
247 int sync_cnt;
248 int refcount;
249 int synced;
250 struct rcu_head rcu_head;
251 };
252
253 struct netdev_hw_addr_list {
254 struct list_head list;
255 int count;
256
257 /* Auxiliary tree for faster lookup on addition and deletion */
258 struct rb_root tree;
259 };
260
261 #define netdev_hw_addr_list_count(l) ((l)->count)
262 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
263 #define netdev_hw_addr_list_for_each(ha, l) \
264 list_for_each_entry(ha, &(l)->list, list)
265
266 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
267 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
268 #define netdev_for_each_uc_addr(ha, dev) \
269 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
270 #define netdev_for_each_synced_uc_addr(_ha, _dev) \
271 netdev_for_each_uc_addr((_ha), (_dev)) \
272 if ((_ha)->sync_cnt)
273
274 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
275 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
276 #define netdev_for_each_mc_addr(ha, dev) \
277 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
278 #define netdev_for_each_synced_mc_addr(_ha, _dev) \
279 netdev_for_each_mc_addr((_ha), (_dev)) \
280 if ((_ha)->sync_cnt)
281
282 struct hh_cache {
283 unsigned int hh_len;
284 seqlock_t hh_lock;
285
286 /* cached hardware header; allow for machine alignment needs. */
287 #define HH_DATA_MOD 16
288 #define HH_DATA_OFF(__len) \
289 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
290 #define HH_DATA_ALIGN(__len) \
291 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
292 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
293 };
294
295 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
296 * Alternative is:
297 * dev->hard_header_len ? (dev->hard_header_len +
298 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
299 *
300 * We could use other alignment values, but we must maintain the
301 * relationship HH alignment <= LL alignment.
302 */
303 #define LL_RESERVED_SPACE(dev) \
304 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
305 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
307 ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
308 & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
309
310 struct header_ops {
311 int (*create) (struct sk_buff *skb, struct net_device *dev,
312 unsigned short type, const void *daddr,
313 const void *saddr, unsigned int len);
314 int (*parse)(const struct sk_buff *skb,
315 const struct net_device *dev,
316 unsigned char *haddr);
317 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
318 void (*cache_update)(struct hh_cache *hh,
319 const struct net_device *dev,
320 const unsigned char *haddr);
321 bool (*validate)(const char *ll_header, unsigned int len);
322 __be16 (*parse_protocol)(const struct sk_buff *skb);
323 };
324
325 /* These flag bits are private to the generic network queueing
326 * layer; they may not be explicitly referenced by any other
327 * code.
328 */
329
330 enum netdev_state_t {
331 __LINK_STATE_START,
332 __LINK_STATE_PRESENT,
333 __LINK_STATE_NOCARRIER,
334 __LINK_STATE_LINKWATCH_PENDING,
335 __LINK_STATE_DORMANT,
336 __LINK_STATE_TESTING,
337 };
338
339 struct gro_list {
340 struct list_head list;
341 int count;
342 };
343
344 /*
345 * size of gro hash buckets, must be <= the number of bits in
346 * gro_node::bitmask
347 */
348 #define GRO_HASH_BUCKETS 8
349
350 /**
351 * struct gro_node - structure to support Generic Receive Offload
352 * @bitmask: bitmask to indicate used buckets in @hash
353 * @hash: hashtable of pending aggregated skbs, separated by flows
354 * @rx_list: list of pending ``GRO_NORMAL`` skbs
355 * @rx_count: cached current length of @rx_list
356 * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone
357 */
358 struct gro_node {
359 unsigned long bitmask;
360 struct gro_list hash[GRO_HASH_BUCKETS];
361 struct list_head rx_list;
362 u32 rx_count;
363 u32 cached_napi_id;
364 };
365
366 /*
367 * Structure for per-NAPI config
368 */
369 struct napi_config {
370 u64 gro_flush_timeout;
371 u64 irq_suspend_timeout;
372 u32 defer_hard_irqs;
373 cpumask_t affinity_mask;
374 u8 threaded;
375 unsigned int napi_id;
376 };
377
378 /*
379 * Structure for NAPI scheduling similar to tasklet but with weighting
380 */
381 struct napi_struct {
382 /* This field should be first or softnet_data.backlog needs tweaks. */
383 unsigned long state;
384 /* The poll_list must only be managed by the entity which
385 * changes the state of the NAPI_STATE_SCHED bit. This means
386 * whoever atomically sets that bit can add this napi_struct
387 * to the per-CPU poll_list, and whoever clears that bit
388 * can remove from the list right before clearing the bit.
389 */
390 struct list_head poll_list;
391
392 int weight;
393 u32 defer_hard_irqs_count;
394 int (*poll)(struct napi_struct *, int);
395 #ifdef CONFIG_NETPOLL
396 /* CPU actively polling if netpoll is configured */
397 int poll_owner;
398 #endif
399 /* CPU on which NAPI has been scheduled for processing */
400 int list_owner;
401 struct net_device *dev;
402 struct sk_buff *skb;
403 struct gro_node gro;
404 struct hrtimer timer;
405 /* all fields past this point are write-protected by netdev_lock */
406 struct task_struct *thread;
407 unsigned long gro_flush_timeout;
408 unsigned long irq_suspend_timeout;
409 u32 defer_hard_irqs;
410 /* control-path-only fields follow */
411 u32 napi_id;
412 struct list_head dev_list;
413 struct hlist_node napi_hash_node;
414 int irq;
415 struct irq_affinity_notify notify;
416 int napi_rmap_idx;
417 int index;
418 struct napi_config *config;
419 };
420
421 enum {
422 NAPI_STATE_SCHED, /* Poll is scheduled */
423 NAPI_STATE_MISSED, /* reschedule a napi */
424 NAPI_STATE_DISABLE, /* Disable pending */
425 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
426 NAPI_STATE_LISTED, /* NAPI added to system lists */
427 NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
428 NAPI_STATE_IN_BUSY_POLL, /* Do not rearm NAPI interrupt */
429 NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
430 NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
431 NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
432 NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */
433 NAPI_STATE_THREADED_BUSY_POLL, /* The threaded NAPI poller will busy poll */
434 };
435
436 enum {
437 NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
438 NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
439 NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
440 NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
441 NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
442 NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
443 NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
444 NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
445 NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
446 NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
447 NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER),
448 NAPIF_STATE_THREADED_BUSY_POLL = BIT(NAPI_STATE_THREADED_BUSY_POLL),
449 };
450
451 enum gro_result {
452 GRO_MERGED,
453 GRO_MERGED_FREE,
454 GRO_HELD,
455 GRO_NORMAL,
456 GRO_CONSUMED,
457 };
458 typedef enum gro_result gro_result_t;
459
460 /*
461 * enum rx_handler_result - Possible return values for rx_handlers.
462 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
463 * further.
464 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
465 * case skb->dev was changed by rx_handler.
466 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
467 * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
468 *
469 * rx_handlers are functions called from inside __netif_receive_skb(), to do
470 * special processing of the skb, prior to delivery to protocol handlers.
471 *
472 * Currently, a net_device can only have a single rx_handler registered. Trying
473 * to register a second rx_handler will return -EBUSY.
474 *
475 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
476 * To unregister a rx_handler on a net_device, use
477 * netdev_rx_handler_unregister().
478 *
479 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
480 * do with the skb.
481 *
482 * If the rx_handler consumed the skb in some way, it should return
483 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
484 * the skb to be delivered in some other way.
485 *
486 * If the rx_handler changed skb->dev, to divert the skb to another
487 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
488 * new device will be called if it exists.
489 *
490 * If the rx_handler decides the skb should be ignored, it should return
491 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
492 * are registered on exact device (ptype->dev == skb->dev).
493 *
494 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
495 * delivered, it should return RX_HANDLER_PASS.
496 *
497 * A device without a registered rx_handler will behave as if rx_handler
498 * returned RX_HANDLER_PASS.
499 */
500
501 enum rx_handler_result {
502 RX_HANDLER_CONSUMED,
503 RX_HANDLER_ANOTHER,
504 RX_HANDLER_EXACT,
505 RX_HANDLER_PASS,
506 };
507 typedef enum rx_handler_result rx_handler_result_t;
508 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
509
510 void __napi_schedule(struct napi_struct *n);
511 void __napi_schedule_irqoff(struct napi_struct *n);
512
napi_disable_pending(struct napi_struct * n)513 static inline bool napi_disable_pending(struct napi_struct *n)
514 {
515 return test_bit(NAPI_STATE_DISABLE, &n->state);
516 }
517
napi_prefer_busy_poll(struct napi_struct * n)518 static inline bool napi_prefer_busy_poll(struct napi_struct *n)
519 {
520 return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
521 }
522
523 /**
524 * napi_is_scheduled - test if NAPI is scheduled
525 * @n: NAPI context
526 *
527 * This check is "best-effort". With no locking implemented,
528 * a NAPI can be scheduled or terminate right after this check
529 * and produce not precise results.
530 *
531 * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
532 * should not be used normally and napi_schedule should be
533 * used instead.
534 *
535 * Use only if the driver really needs to check if a NAPI
536 * is scheduled for example in the context of delayed timer
537 * that can be skipped if a NAPI is already scheduled.
538 *
539 * Return: True if NAPI is scheduled, False otherwise.
540 */
napi_is_scheduled(struct napi_struct * n)541 static inline bool napi_is_scheduled(struct napi_struct *n)
542 {
543 return test_bit(NAPI_STATE_SCHED, &n->state);
544 }
545
546 bool napi_schedule_prep(struct napi_struct *n);
547
548 /**
549 * napi_schedule - schedule NAPI poll
550 * @n: NAPI context
551 *
552 * Schedule NAPI poll routine to be called if it is not already
553 * running.
554 * Return: true if we schedule a NAPI or false if not.
555 * Refer to napi_schedule_prep() for additional reason on why
556 * a NAPI might not be scheduled.
557 */
napi_schedule(struct napi_struct * n)558 static inline bool napi_schedule(struct napi_struct *n)
559 {
560 if (napi_schedule_prep(n)) {
561 __napi_schedule(n);
562 return true;
563 }
564
565 return false;
566 }
567
568 /**
569 * napi_schedule_irqoff - schedule NAPI poll
570 * @n: NAPI context
571 *
572 * Variant of napi_schedule(), assuming hard irqs are masked.
573 */
napi_schedule_irqoff(struct napi_struct * n)574 static inline void napi_schedule_irqoff(struct napi_struct *n)
575 {
576 if (napi_schedule_prep(n))
577 __napi_schedule_irqoff(n);
578 }
579
580 /**
581 * napi_complete_done - NAPI processing complete
582 * @n: NAPI context
583 * @work_done: number of packets processed
584 *
585 * Mark NAPI processing as complete. Should only be called if poll budget
586 * has not been completely consumed.
587 * Prefer over napi_complete().
588 * Return: false if device should avoid rearming interrupts.
589 */
590 bool napi_complete_done(struct napi_struct *n, int work_done);
591
napi_complete(struct napi_struct * n)592 static inline bool napi_complete(struct napi_struct *n)
593 {
594 return napi_complete_done(n, 0);
595 }
596
597 void netif_threaded_enable(struct net_device *dev);
598 int dev_set_threaded(struct net_device *dev,
599 enum netdev_napi_threaded threaded);
600
601 void napi_disable(struct napi_struct *n);
602 void napi_disable_locked(struct napi_struct *n);
603
604 void napi_enable(struct napi_struct *n);
605 void napi_enable_locked(struct napi_struct *n);
606
607 /**
608 * napi_synchronize - wait until NAPI is not running
609 * @n: NAPI context
610 *
611 * Wait until NAPI is done being scheduled on this context.
612 * Waits till any outstanding processing completes but
613 * does not disable future activations.
614 */
napi_synchronize(const struct napi_struct * n)615 static inline void napi_synchronize(const struct napi_struct *n)
616 {
617 if (IS_ENABLED(CONFIG_SMP))
618 while (test_bit(NAPI_STATE_SCHED, &n->state))
619 msleep(1);
620 else
621 barrier();
622 }
623
624 /**
625 * napi_if_scheduled_mark_missed - if napi is running, set the
626 * NAPIF_STATE_MISSED
627 * @n: NAPI context
628 *
629 * If napi is running, set the NAPIF_STATE_MISSED, and return true if
630 * NAPI is scheduled.
631 **/
napi_if_scheduled_mark_missed(struct napi_struct * n)632 static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
633 {
634 unsigned long val, new;
635
636 val = READ_ONCE(n->state);
637 do {
638 if (val & NAPIF_STATE_DISABLE)
639 return true;
640
641 if (!(val & NAPIF_STATE_SCHED))
642 return false;
643
644 new = val | NAPIF_STATE_MISSED;
645 } while (!try_cmpxchg(&n->state, &val, new));
646
647 return true;
648 }
649
650 enum netdev_queue_state_t {
651 __QUEUE_STATE_DRV_XOFF,
652 __QUEUE_STATE_STACK_XOFF,
653 __QUEUE_STATE_FROZEN,
654 };
655
656 #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
657 #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
658 #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
659
660 #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
661 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
662 QUEUE_STATE_FROZEN)
663 #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
664 QUEUE_STATE_FROZEN)
665
666 /*
667 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
668 * netif_tx_* functions below are used to manipulate this flag. The
669 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
670 * queue independently. The netif_xmit_*stopped functions below are called
671 * to check if the queue has been stopped by the driver or stack (either
672 * of the XOFF bits are set in the state). Drivers should not need to call
673 * netif_xmit*stopped functions, they should only be using netif_tx_*.
674 */
675
676 struct netdev_queue {
677 /*
678 * read-mostly part
679 */
680 struct net_device *dev;
681 netdevice_tracker dev_tracker;
682
683 struct Qdisc __rcu *qdisc;
684 struct Qdisc __rcu *qdisc_sleeping;
685 #ifdef CONFIG_SYSFS
686 struct kobject kobj;
687 const struct attribute_group **groups;
688 #endif
689 unsigned long tx_maxrate;
690 /*
691 * Number of TX timeouts for this queue
692 * (/sys/class/net/DEV/Q/trans_timeout)
693 */
694 atomic_long_t trans_timeout;
695
696 /* Subordinate device that the queue has been assigned to */
697 struct net_device *sb_dev;
698 #ifdef CONFIG_XDP_SOCKETS
699 /* "ops protected", see comment about net_device::lock */
700 struct xsk_buff_pool *pool;
701 #endif
702
703 /*
704 * write-mostly part
705 */
706 #ifdef CONFIG_BQL
707 struct dql dql;
708 #endif
709 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
710 int xmit_lock_owner;
711 /*
712 * Time (in jiffies) of last Tx
713 */
714 unsigned long trans_start;
715
716 unsigned long state;
717
718 /*
719 * slow- / control-path part
720 */
721 /* NAPI instance for the queue
722 * "ops protected", see comment about net_device::lock
723 */
724 struct napi_struct *napi;
725
726 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
727 int numa_node;
728 #endif
729 } ____cacheline_aligned_in_smp;
730
731 extern int sysctl_fb_tunnels_only_for_init_net;
732 extern int sysctl_devconf_inherit_init_net;
733
734 /*
735 * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
736 * == 1 : For initns only
737 * == 2 : For none.
738 */
net_has_fallback_tunnels(const struct net * net)739 static inline bool net_has_fallback_tunnels(const struct net *net)
740 {
741 #if IS_ENABLED(CONFIG_SYSCTL)
742 int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
743
744 return !fb_tunnels_only_for_init_net ||
745 (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
746 #else
747 return true;
748 #endif
749 }
750
net_inherit_devconf(void)751 static inline int net_inherit_devconf(void)
752 {
753 #if IS_ENABLED(CONFIG_SYSCTL)
754 return READ_ONCE(sysctl_devconf_inherit_init_net);
755 #else
756 return 0;
757 #endif
758 }
759
netdev_queue_numa_node_read(const struct netdev_queue * q)760 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
761 {
762 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
763 return q->numa_node;
764 #else
765 return NUMA_NO_NODE;
766 #endif
767 }
768
netdev_queue_numa_node_write(struct netdev_queue * q,int node)769 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
770 {
771 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
772 q->numa_node = node;
773 #endif
774 }
775
776 #ifdef CONFIG_RFS_ACCEL
777 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
778 u16 filter_id);
779 #endif
780
781 /* XPS map type and offset of the xps map within net_device->xps_maps[]. */
782 enum xps_map_type {
783 XPS_CPUS = 0,
784 XPS_RXQS,
785 XPS_MAPS_MAX,
786 };
787
788 #ifdef CONFIG_XPS
789 /*
790 * This structure holds an XPS map which can be of variable length. The
791 * map is an array of queues.
792 */
793 struct xps_map {
794 unsigned int len;
795 unsigned int alloc_len;
796 struct rcu_head rcu;
797 u16 queues[];
798 };
799 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
800 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
801 - sizeof(struct xps_map)) / sizeof(u16))
802
803 /*
804 * This structure holds all XPS maps for device. Maps are indexed by CPU.
805 *
806 * We keep track of the number of cpus/rxqs used when the struct is allocated,
807 * in nr_ids. This will help not accessing out-of-bound memory.
808 *
809 * We keep track of the number of traffic classes used when the struct is
810 * allocated, in num_tc. This will be used to navigate the maps, to ensure we're
811 * not crossing its upper bound, as the original dev->num_tc can be updated in
812 * the meantime.
813 */
814 struct xps_dev_maps {
815 struct rcu_head rcu;
816 unsigned int nr_ids;
817 s16 num_tc;
818 struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
819 };
820
821 #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
822 (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
823
824 #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
825 (_rxqs * (_tcs) * sizeof(struct xps_map *)))
826
827 #endif /* CONFIG_XPS */
828
829 #define TC_MAX_QUEUE 16
830 #define TC_BITMASK 15
831 /* HW offloaded queuing disciplines txq count and offset maps */
832 struct netdev_tc_txq {
833 u16 count;
834 u16 offset;
835 };
836
837 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
838 /*
839 * This structure is to hold information about the device
840 * configured to run FCoE protocol stack.
841 */
842 struct netdev_fcoe_hbainfo {
843 char manufacturer[64];
844 char serial_number[64];
845 char hardware_version[64];
846 char driver_version[64];
847 char optionrom_version[64];
848 char firmware_version[64];
849 char model[256];
850 char model_description[256];
851 };
852 #endif
853
854 #define MAX_PHYS_ITEM_ID_LEN 32
855
856 /* This structure holds a unique identifier to identify some
857 * physical item (port for example) used by a netdevice.
858 */
859 struct netdev_phys_item_id {
860 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
861 unsigned char id_len;
862 };
863
netdev_phys_item_id_same(struct netdev_phys_item_id * a,struct netdev_phys_item_id * b)864 static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
865 struct netdev_phys_item_id *b)
866 {
867 return a->id_len == b->id_len &&
868 memcmp(a->id, b->id, a->id_len) == 0;
869 }
870
871 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
872 struct sk_buff *skb,
873 struct net_device *sb_dev);
874
875 enum net_device_path_type {
876 DEV_PATH_ETHERNET = 0,
877 DEV_PATH_VLAN,
878 DEV_PATH_BRIDGE,
879 DEV_PATH_PPPOE,
880 DEV_PATH_DSA,
881 DEV_PATH_MTK_WDMA,
882 DEV_PATH_TUN,
883 };
884
885 struct net_device_path {
886 enum net_device_path_type type;
887 const struct net_device *dev;
888 union {
889 struct {
890 u16 id;
891 __be16 proto;
892 u8 h_dest[ETH_ALEN];
893 } encap;
894 struct {
895 union {
896 struct in_addr src_v4;
897 struct in6_addr src_v6;
898 };
899 union {
900 struct in_addr dst_v4;
901 struct in6_addr dst_v6;
902 };
903
904 u8 l3_proto;
905 } tun;
906 struct {
907 enum {
908 DEV_PATH_BR_VLAN_KEEP,
909 DEV_PATH_BR_VLAN_TAG,
910 DEV_PATH_BR_VLAN_UNTAG,
911 DEV_PATH_BR_VLAN_UNTAG_HW,
912 } vlan_mode;
913 u16 vlan_id;
914 __be16 vlan_proto;
915 } bridge;
916 struct {
917 int port;
918 u16 proto;
919 } dsa;
920 struct {
921 u8 wdma_idx;
922 u8 queue;
923 u16 wcid;
924 u8 bss;
925 u8 amsdu;
926 } mtk_wdma;
927 };
928 };
929
930 #define NET_DEVICE_PATH_STACK_MAX 5
931 #define NET_DEVICE_PATH_VLAN_MAX 2
932
933 struct net_device_path_stack {
934 int num_paths;
935 struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
936 };
937
938 struct net_device_path_ctx {
939 const struct net_device *dev;
940 u8 daddr[ETH_ALEN];
941
942 int num_vlans;
943 struct {
944 u16 id;
945 __be16 proto;
946 } vlan[NET_DEVICE_PATH_VLAN_MAX];
947 };
948
949 enum tc_setup_type {
950 TC_QUERY_CAPS,
951 TC_SETUP_QDISC_MQPRIO,
952 TC_SETUP_CLSU32,
953 TC_SETUP_CLSFLOWER,
954 TC_SETUP_CLSMATCHALL,
955 TC_SETUP_CLSBPF,
956 TC_SETUP_BLOCK,
957 TC_SETUP_QDISC_CBS,
958 TC_SETUP_QDISC_RED,
959 TC_SETUP_QDISC_PRIO,
960 TC_SETUP_QDISC_MQ,
961 TC_SETUP_QDISC_ETF,
962 TC_SETUP_ROOT_QDISC,
963 TC_SETUP_QDISC_GRED,
964 TC_SETUP_QDISC_TAPRIO,
965 TC_SETUP_FT,
966 TC_SETUP_QDISC_ETS,
967 TC_SETUP_QDISC_TBF,
968 TC_SETUP_QDISC_FIFO,
969 TC_SETUP_QDISC_HTB,
970 TC_SETUP_ACT,
971 };
972
973 /* These structures hold the attributes of bpf state that are being passed
974 * to the netdevice through the bpf op.
975 */
976 enum bpf_netdev_command {
977 /* Set or clear a bpf program used in the earliest stages of packet
978 * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
979 * is responsible for calling bpf_prog_put on any old progs that are
980 * stored. In case of error, the callee need not release the new prog
981 * reference, but on success it takes ownership and must bpf_prog_put
982 * when it is no longer used.
983 */
984 XDP_SETUP_PROG,
985 XDP_SETUP_PROG_HW,
986 /* BPF program for offload callbacks, invoked at program load time. */
987 BPF_OFFLOAD_MAP_ALLOC,
988 BPF_OFFLOAD_MAP_FREE,
989 XDP_SETUP_XSK_POOL,
990 };
991
992 struct bpf_prog_offload_ops;
993 struct netlink_ext_ack;
994 struct xdp_umem;
995 struct xdp_dev_bulk_queue;
996 struct bpf_xdp_link;
997
998 enum bpf_xdp_mode {
999 XDP_MODE_SKB = 0,
1000 XDP_MODE_DRV = 1,
1001 XDP_MODE_HW = 2,
1002 __MAX_XDP_MODE
1003 };
1004
1005 struct bpf_xdp_entity {
1006 struct bpf_prog *prog;
1007 struct bpf_xdp_link *link;
1008 };
1009
1010 struct netdev_bpf {
1011 enum bpf_netdev_command command;
1012 union {
1013 /* XDP_SETUP_PROG */
1014 struct {
1015 u32 flags;
1016 struct bpf_prog *prog;
1017 struct netlink_ext_ack *extack;
1018 };
1019 /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
1020 struct {
1021 struct bpf_offloaded_map *offmap;
1022 };
1023 /* XDP_SETUP_XSK_POOL */
1024 struct {
1025 struct xsk_buff_pool *pool;
1026 u16 queue_id;
1027 } xsk;
1028 };
1029 };
1030
1031 /* Flags for ndo_xsk_wakeup. */
1032 #define XDP_WAKEUP_RX (1 << 0)
1033 #define XDP_WAKEUP_TX (1 << 1)
1034
1035 #ifdef CONFIG_XFRM_OFFLOAD
1036 struct xfrmdev_ops {
1037 int (*xdo_dev_state_add)(struct net_device *dev,
1038 struct xfrm_state *x,
1039 struct netlink_ext_ack *extack);
1040 void (*xdo_dev_state_delete)(struct net_device *dev,
1041 struct xfrm_state *x);
1042 void (*xdo_dev_state_free)(struct net_device *dev,
1043 struct xfrm_state *x);
1044 bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
1045 struct xfrm_state *x);
1046 void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
1047 void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
1048 int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
1049 void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
1050 void (*xdo_dev_policy_free) (struct xfrm_policy *x);
1051 };
1052 #endif
1053
1054 struct dev_ifalias {
1055 struct rcu_head rcuhead;
1056 char ifalias[];
1057 };
1058
1059 struct devlink;
1060 struct tlsdev_ops;
1061
1062 struct netdev_net_notifier {
1063 struct list_head list;
1064 struct notifier_block *nb;
1065 };
1066
1067 /*
1068 * This structure defines the management hooks for network devices.
1069 * The following hooks can be defined; unless noted otherwise, they are
1070 * optional and can be filled with a null pointer.
1071 *
1072 * int (*ndo_init)(struct net_device *dev);
1073 * This function is called once when a network device is registered.
1074 * The network device can use this for any late stage initialization
1075 * or semantic validation. It can fail with an error code which will
1076 * be propagated back to register_netdev.
1077 *
1078 * void (*ndo_uninit)(struct net_device *dev);
1079 * This function is called when device is unregistered or when registration
1080 * fails. It is not called if init fails.
1081 *
1082 * int (*ndo_open)(struct net_device *dev);
1083 * This function is called when a network device transitions to the up
1084 * state.
1085 *
1086 * int (*ndo_stop)(struct net_device *dev);
1087 * This function is called when a network device transitions to the down
1088 * state.
1089 *
1090 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1091 * struct net_device *dev);
1092 * Called when a packet needs to be transmitted.
1093 * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
1094 * the queue before that can happen; it's for obsolete devices and weird
1095 * corner cases, but the stack really does a non-trivial amount
1096 * of useless work if you return NETDEV_TX_BUSY.
1097 * Required; cannot be NULL.
1098 *
1099 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1100 * struct net_device *dev
1101 * netdev_features_t features);
1102 * Called by core transmit path to determine if device is capable of
1103 * performing offload operations on a given packet. This is to give
1104 * the device an opportunity to implement any restrictions that cannot
1105 * be otherwise expressed by feature flags. The check is called with
1106 * the set of features that the stack has calculated and it returns
1107 * those the driver believes to be appropriate.
1108 *
1109 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1110 * struct net_device *sb_dev);
1111 * Called to decide which queue to use when device supports multiple
1112 * transmit queues.
1113 *
1114 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1115 * This function is called to allow device receiver to make
1116 * changes to configuration when multicast or promiscuous is enabled.
1117 *
1118 * void (*ndo_set_rx_mode)(struct net_device *dev);
1119 * This function is called device changes address list filtering.
1120 * If driver handles unicast address filtering, it should set
1121 * IFF_UNICAST_FLT in its priv_flags.
1122 *
1123 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1124 * This function is called when the Media Access Control address
1125 * needs to be changed. If this interface is not defined, the
1126 * MAC address can not be changed.
1127 *
1128 * int (*ndo_validate_addr)(struct net_device *dev);
1129 * Test if Media Access Control address is valid for the device.
1130 *
1131 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1132 * Old-style ioctl entry point. This is used internally by the
1133 * ieee802154 subsystem but is no longer called by the device
1134 * ioctl handler.
1135 *
1136 * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
1137 * Used by the bonding driver for its device specific ioctls:
1138 * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
1139 * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
1140 *
1141 * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1142 * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
1143 * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
1144 *
1145 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1146 * Used to set network devices bus interface parameters. This interface
1147 * is retained for legacy reasons; new devices should use the bus
1148 * interface (PCI) for low level management.
1149 *
1150 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1151 * Called when a user wants to change the Maximum Transfer Unit
1152 * of a device.
1153 *
1154 * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1155 * Callback used when the transmitter has not made any progress
1156 * for dev->watchdog ticks.
1157 *
1158 * void (*ndo_get_stats64)(struct net_device *dev,
1159 * struct rtnl_link_stats64 *storage);
1160 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1161 * Called when a user wants to get the network device usage
1162 * statistics. Drivers must do one of the following:
1163 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1164 * rtnl_link_stats64 structure passed by the caller.
1165 * 2. Define @ndo_get_stats to update a net_device_stats structure
1166 * (which should normally be dev->stats) and return a pointer to
1167 * it. The structure may be changed asynchronously only if each
1168 * field is written atomically.
1169 * 3. Update dev->stats asynchronously and atomically, and define
1170 * neither operation.
1171 *
1172 * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1173 * Return true if this device supports offload stats of this attr_id.
1174 *
1175 * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1176 * void *attr_data)
1177 * Get statistics for offload operations by attr_id. Write it into the
1178 * attr_data pointer.
1179 *
1180 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1181 * If device supports VLAN filtering this function is called when a
1182 * VLAN id is registered.
1183 *
1184 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1185 * If device supports VLAN filtering this function is called when a
1186 * VLAN id is unregistered.
1187 *
1188 * void (*ndo_poll_controller)(struct net_device *dev);
1189 *
1190 * SR-IOV management functions.
1191 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1192 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1193 * u8 qos, __be16 proto);
1194 * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1195 * int max_tx_rate);
1196 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1197 * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1198 * int (*ndo_get_vf_config)(struct net_device *dev,
1199 * int vf, struct ifla_vf_info *ivf);
1200 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1201 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1202 * struct nlattr *port[]);
1203 *
1204 * Enable or disable the VF ability to query its RSS Redirection Table and
1205 * Hash Key. This is needed since on some devices VF share this information
1206 * with PF and querying it may introduce a theoretical security risk.
1207 * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1208 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1209 * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1210 * void *type_data);
1211 * Called to setup any 'tc' scheduler, classifier or action on @dev.
1212 * This is always called from the stack with the rtnl lock held and netif
1213 * tx queues stopped. This allows the netdevice to perform queue
1214 * management safely.
1215 *
1216 * Fiber Channel over Ethernet (FCoE) offload functions.
1217 * int (*ndo_fcoe_enable)(struct net_device *dev);
1218 * Called when the FCoE protocol stack wants to start using LLD for FCoE
1219 * so the underlying device can perform whatever needed configuration or
1220 * initialization to support acceleration of FCoE traffic.
1221 *
1222 * int (*ndo_fcoe_disable)(struct net_device *dev);
1223 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
1224 * so the underlying device can perform whatever needed clean-ups to
1225 * stop supporting acceleration of FCoE traffic.
1226 *
1227 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1228 * struct scatterlist *sgl, unsigned int sgc);
1229 * Called when the FCoE Initiator wants to initialize an I/O that
1230 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1231 * perform necessary setup and returns 1 to indicate the device is set up
1232 * successfully to perform DDP on this I/O, otherwise this returns 0.
1233 *
1234 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1235 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1236 * indicated by the FC exchange id 'xid', so the underlying device can
1237 * clean up and reuse resources for later DDP requests.
1238 *
1239 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1240 * struct scatterlist *sgl, unsigned int sgc);
1241 * Called when the FCoE Target wants to initialize an I/O that
1242 * is a possible candidate for Direct Data Placement (DDP). The LLD can
1243 * perform necessary setup and returns 1 to indicate the device is set up
1244 * successfully to perform DDP on this I/O, otherwise this returns 0.
1245 *
1246 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1247 * struct netdev_fcoe_hbainfo *hbainfo);
1248 * Called when the FCoE Protocol stack wants information on the underlying
1249 * device. This information is utilized by the FCoE protocol stack to
1250 * register attributes with Fiber Channel management service as per the
1251 * FC-GS Fabric Device Management Information(FDMI) specification.
1252 *
1253 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1254 * Called when the underlying device wants to override default World Wide
1255 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1256 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1257 * protocol stack to use.
1258 *
1259 * RFS acceleration.
1260 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1261 * u16 rxq_index, u32 flow_id);
1262 * Set hardware filter for RFS. rxq_index is the target queue index;
1263 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1264 * Return the filter ID on success, or a negative error code.
1265 *
1266 * Slave management functions (for bridge, bonding, etc).
1267 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1268 * Called to make another netdev an underling.
1269 *
1270 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1271 * Called to release previously enslaved netdev.
1272 *
1273 * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1274 * struct sk_buff *skb,
1275 * bool all_slaves);
1276 * Get the xmit slave of master device. If all_slaves is true, function
1277 * assume all the slaves can transmit.
1278 *
1279 * Feature/offload setting functions.
1280 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1281 * netdev_features_t features);
1282 * Adjusts the requested feature flags according to device-specific
1283 * constraints, and returns the resulting flags. Must not modify
1284 * the device state.
1285 *
1286 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1287 * Called to update device configuration to new features. Passed
1288 * feature set might be less than what was returned by ndo_fix_features()).
1289 * Must return >0 or -errno if it changed dev->features itself.
1290 *
1291 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1292 * struct net_device *dev,
1293 * const unsigned char *addr, u16 vid, u16 flags,
1294 * bool *notified, struct netlink_ext_ack *extack);
1295 * Adds an FDB entry to dev for addr.
1296 * Callee shall set *notified to true if it sent any appropriate
1297 * notification(s). Otherwise core will send a generic one.
1298 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1299 * struct net_device *dev,
1300 * const unsigned char *addr, u16 vid
1301 * bool *notified, struct netlink_ext_ack *extack);
1302 * Deletes the FDB entry from dev corresponding to addr.
1303 * Callee shall set *notified to true if it sent any appropriate
1304 * notification(s). Otherwise core will send a generic one.
1305 * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
1306 * struct netlink_ext_ack *extack);
1307 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1308 * struct net_device *dev, struct net_device *filter_dev,
1309 * int *idx)
1310 * Used to add FDB entries to dump requests. Implementers should add
1311 * entries to skb and update idx with the number of entries.
1312 *
1313 * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
1314 * u16 nlmsg_flags, struct netlink_ext_ack *extack);
1315 * Adds an MDB entry to dev.
1316 * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
1317 * struct netlink_ext_ack *extack);
1318 * Deletes the MDB entry from dev.
1319 * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
1320 * struct netlink_ext_ack *extack);
1321 * Bulk deletes MDB entries from dev.
1322 * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
1323 * struct netlink_callback *cb);
1324 * Dumps MDB entries from dev. The first argument (marker) in the netlink
1325 * callback is used by core rtnetlink code.
1326 *
1327 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1328 * u16 flags, struct netlink_ext_ack *extack)
1329 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1330 * struct net_device *dev, u32 filter_mask,
1331 * int nlflags)
1332 * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1333 * u16 flags);
1334 *
1335 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1336 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1337 * which do not represent real hardware may define this to allow their
1338 * userspace components to manage their virtual carrier state. Devices
1339 * that determine carrier state from physical hardware properties (eg
1340 * network cables) or protocol-dependent mechanisms (eg
1341 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1342 *
1343 * int (*ndo_get_phys_port_id)(struct net_device *dev,
1344 * struct netdev_phys_item_id *ppid);
1345 * Called to get ID of physical port of this device. If driver does
1346 * not implement this, it is assumed that the hw is not able to have
1347 * multiple net devices on single physical port.
1348 *
1349 * int (*ndo_get_port_parent_id)(struct net_device *dev,
1350 * struct netdev_phys_item_id *ppid)
1351 * Called to get the parent ID of the physical port of this device.
1352 *
1353 * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1354 * struct net_device *dev)
1355 * Called by upper layer devices to accelerate switching or other
1356 * station functionality into hardware. 'pdev is the lowerdev
1357 * to use for the offload and 'dev' is the net device that will
1358 * back the offload. Returns a pointer to the private structure
1359 * the upper layer will maintain.
1360 * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1361 * Called by upper layer device to delete the station created
1362 * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1363 * the station and priv is the structure returned by the add
1364 * operation.
1365 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1366 * int queue_index, u32 maxrate);
1367 * Called when a user wants to set a max-rate limitation of specific
1368 * TX queue.
1369 * int (*ndo_get_iflink)(const struct net_device *dev);
1370 * Called to get the iflink value of this device.
1371 * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1372 * This function is used to get egress tunnel information for given skb.
1373 * This is useful for retrieving outer tunnel header parameters while
1374 * sampling packet.
1375 * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1376 * This function is used to specify the headroom that the skb must
1377 * consider when allocation skb during packet reception. Setting
1378 * appropriate rx headroom value allows avoiding skb head copy on
1379 * forward. Setting a negative value resets the rx headroom to the
1380 * default value.
1381 * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1382 * This function is used to set or query state related to XDP on the
1383 * netdevice and manage BPF offload. See definition of
1384 * enum bpf_netdev_command for details.
1385 * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1386 * u32 flags);
1387 * This function is used to submit @n XDP packets for transmit on a
1388 * netdevice. Returns number of frames successfully transmitted, frames
1389 * that got dropped are freed/returned via xdp_return_frame().
1390 * Returns negative number, means general error invoking ndo, meaning
1391 * no frames were xmit'ed and core-caller will free all frames.
1392 * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1393 * struct xdp_buff *xdp);
1394 * Get the xmit slave of master device based on the xdp_buff.
1395 * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1396 * This function is used to wake up the softirq, ksoftirqd or kthread
1397 * responsible for sending and/or receiving packets on a specific
1398 * queue id bound to an AF_XDP socket. The flags field specifies if
1399 * only RX, only Tx, or both should be woken up using the flags
1400 * XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1401 * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
1402 * int cmd);
1403 * Add, change, delete or get information on an IPv4 tunnel.
1404 * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1405 * If a device is paired with a peer device, return the peer instance.
1406 * The caller must be under RCU read context.
1407 * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
1408 * Get the forwarding path to reach the real device from the HW destination address
1409 * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1410 * const struct skb_shared_hwtstamps *hwtstamps,
1411 * bool cycles);
1412 * Get hardware timestamp based on normal/adjustable time or free running
1413 * cycle counter. This function is required if physical clock supports a
1414 * free running cycle counter.
1415 *
1416 * int (*ndo_hwtstamp_get)(struct net_device *dev,
1417 * struct kernel_hwtstamp_config *kernel_config);
1418 * Get the currently configured hardware timestamping parameters for the
1419 * NIC device.
1420 *
1421 * int (*ndo_hwtstamp_set)(struct net_device *dev,
1422 * struct kernel_hwtstamp_config *kernel_config,
1423 * struct netlink_ext_ack *extack);
1424 * Change the hardware timestamping parameters for NIC device.
1425 */
1426 struct net_device_ops {
1427 int (*ndo_init)(struct net_device *dev);
1428 void (*ndo_uninit)(struct net_device *dev);
1429 int (*ndo_open)(struct net_device *dev);
1430 int (*ndo_stop)(struct net_device *dev);
1431 netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1432 struct net_device *dev);
1433 netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1434 struct net_device *dev,
1435 netdev_features_t features);
1436 u16 (*ndo_select_queue)(struct net_device *dev,
1437 struct sk_buff *skb,
1438 struct net_device *sb_dev);
1439 void (*ndo_change_rx_flags)(struct net_device *dev,
1440 int flags);
1441 void (*ndo_set_rx_mode)(struct net_device *dev);
1442 int (*ndo_set_mac_address)(struct net_device *dev,
1443 void *addr);
1444 int (*ndo_validate_addr)(struct net_device *dev);
1445 int (*ndo_do_ioctl)(struct net_device *dev,
1446 struct ifreq *ifr, int cmd);
1447 int (*ndo_eth_ioctl)(struct net_device *dev,
1448 struct ifreq *ifr, int cmd);
1449 int (*ndo_siocbond)(struct net_device *dev,
1450 struct ifreq *ifr, int cmd);
1451 int (*ndo_siocwandev)(struct net_device *dev,
1452 struct if_settings *ifs);
1453 int (*ndo_siocdevprivate)(struct net_device *dev,
1454 struct ifreq *ifr,
1455 void __user *data, int cmd);
1456 int (*ndo_set_config)(struct net_device *dev,
1457 struct ifmap *map);
1458 int (*ndo_change_mtu)(struct net_device *dev,
1459 int new_mtu);
1460 int (*ndo_neigh_setup)(struct net_device *dev,
1461 struct neigh_parms *);
1462 void (*ndo_tx_timeout) (struct net_device *dev,
1463 unsigned int txqueue);
1464
1465 void (*ndo_get_stats64)(struct net_device *dev,
1466 struct rtnl_link_stats64 *storage);
1467 bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1468 int (*ndo_get_offload_stats)(int attr_id,
1469 const struct net_device *dev,
1470 void *attr_data);
1471 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1472
1473 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1474 __be16 proto, u16 vid);
1475 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1476 __be16 proto, u16 vid);
1477 #ifdef CONFIG_NET_POLL_CONTROLLER
1478 void (*ndo_poll_controller)(struct net_device *dev);
1479 int (*ndo_netpoll_setup)(struct net_device *dev);
1480 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1481 #endif
1482 int (*ndo_set_vf_mac)(struct net_device *dev,
1483 int queue, u8 *mac);
1484 int (*ndo_set_vf_vlan)(struct net_device *dev,
1485 int queue, u16 vlan,
1486 u8 qos, __be16 proto);
1487 int (*ndo_set_vf_rate)(struct net_device *dev,
1488 int vf, int min_tx_rate,
1489 int max_tx_rate);
1490 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1491 int vf, bool setting);
1492 int (*ndo_set_vf_trust)(struct net_device *dev,
1493 int vf, bool setting);
1494 int (*ndo_get_vf_config)(struct net_device *dev,
1495 int vf,
1496 struct ifla_vf_info *ivf);
1497 int (*ndo_set_vf_link_state)(struct net_device *dev,
1498 int vf, int link_state);
1499 int (*ndo_get_vf_stats)(struct net_device *dev,
1500 int vf,
1501 struct ifla_vf_stats
1502 *vf_stats);
1503 int (*ndo_set_vf_port)(struct net_device *dev,
1504 int vf,
1505 struct nlattr *port[]);
1506 int (*ndo_get_vf_port)(struct net_device *dev,
1507 int vf, struct sk_buff *skb);
1508 int (*ndo_get_vf_guid)(struct net_device *dev,
1509 int vf,
1510 struct ifla_vf_guid *node_guid,
1511 struct ifla_vf_guid *port_guid);
1512 int (*ndo_set_vf_guid)(struct net_device *dev,
1513 int vf, u64 guid,
1514 int guid_type);
1515 int (*ndo_set_vf_rss_query_en)(
1516 struct net_device *dev,
1517 int vf, bool setting);
1518 int (*ndo_setup_tc)(struct net_device *dev,
1519 enum tc_setup_type type,
1520 void *type_data);
1521 #if IS_ENABLED(CONFIG_FCOE)
1522 int (*ndo_fcoe_enable)(struct net_device *dev);
1523 int (*ndo_fcoe_disable)(struct net_device *dev);
1524 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1525 u16 xid,
1526 struct scatterlist *sgl,
1527 unsigned int sgc);
1528 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1529 u16 xid);
1530 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1531 u16 xid,
1532 struct scatterlist *sgl,
1533 unsigned int sgc);
1534 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1535 struct netdev_fcoe_hbainfo *hbainfo);
1536 #endif
1537
1538 #if IS_ENABLED(CONFIG_LIBFCOE)
1539 #define NETDEV_FCOE_WWNN 0
1540 #define NETDEV_FCOE_WWPN 1
1541 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1542 u64 *wwn, int type);
1543 #endif
1544
1545 #ifdef CONFIG_RFS_ACCEL
1546 int (*ndo_rx_flow_steer)(struct net_device *dev,
1547 const struct sk_buff *skb,
1548 u16 rxq_index,
1549 u32 flow_id);
1550 #endif
1551 int (*ndo_add_slave)(struct net_device *dev,
1552 struct net_device *slave_dev,
1553 struct netlink_ext_ack *extack);
1554 int (*ndo_del_slave)(struct net_device *dev,
1555 struct net_device *slave_dev);
1556 struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1557 struct sk_buff *skb,
1558 bool all_slaves);
1559 struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
1560 struct sock *sk);
1561 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1562 netdev_features_t features);
1563 int (*ndo_set_features)(struct net_device *dev,
1564 netdev_features_t features);
1565 int (*ndo_neigh_construct)(struct net_device *dev,
1566 struct neighbour *n);
1567 void (*ndo_neigh_destroy)(struct net_device *dev,
1568 struct neighbour *n);
1569
1570 int (*ndo_fdb_add)(struct ndmsg *ndm,
1571 struct nlattr *tb[],
1572 struct net_device *dev,
1573 const unsigned char *addr,
1574 u16 vid,
1575 u16 flags,
1576 bool *notified,
1577 struct netlink_ext_ack *extack);
1578 int (*ndo_fdb_del)(struct ndmsg *ndm,
1579 struct nlattr *tb[],
1580 struct net_device *dev,
1581 const unsigned char *addr,
1582 u16 vid,
1583 bool *notified,
1584 struct netlink_ext_ack *extack);
1585 int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
1586 struct net_device *dev,
1587 struct netlink_ext_ack *extack);
1588 int (*ndo_fdb_dump)(struct sk_buff *skb,
1589 struct netlink_callback *cb,
1590 struct net_device *dev,
1591 struct net_device *filter_dev,
1592 int *idx);
1593 int (*ndo_fdb_get)(struct sk_buff *skb,
1594 struct nlattr *tb[],
1595 struct net_device *dev,
1596 const unsigned char *addr,
1597 u16 vid, u32 portid, u32 seq,
1598 struct netlink_ext_ack *extack);
1599 int (*ndo_mdb_add)(struct net_device *dev,
1600 struct nlattr *tb[],
1601 u16 nlmsg_flags,
1602 struct netlink_ext_ack *extack);
1603 int (*ndo_mdb_del)(struct net_device *dev,
1604 struct nlattr *tb[],
1605 struct netlink_ext_ack *extack);
1606 int (*ndo_mdb_del_bulk)(struct net_device *dev,
1607 struct nlattr *tb[],
1608 struct netlink_ext_ack *extack);
1609 int (*ndo_mdb_dump)(struct net_device *dev,
1610 struct sk_buff *skb,
1611 struct netlink_callback *cb);
1612 int (*ndo_mdb_get)(struct net_device *dev,
1613 struct nlattr *tb[], u32 portid,
1614 u32 seq,
1615 struct netlink_ext_ack *extack);
1616 int (*ndo_bridge_setlink)(struct net_device *dev,
1617 struct nlmsghdr *nlh,
1618 u16 flags,
1619 struct netlink_ext_ack *extack);
1620 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1621 u32 pid, u32 seq,
1622 struct net_device *dev,
1623 u32 filter_mask,
1624 int nlflags);
1625 int (*ndo_bridge_dellink)(struct net_device *dev,
1626 struct nlmsghdr *nlh,
1627 u16 flags);
1628 int (*ndo_change_carrier)(struct net_device *dev,
1629 bool new_carrier);
1630 int (*ndo_get_phys_port_id)(struct net_device *dev,
1631 struct netdev_phys_item_id *ppid);
1632 int (*ndo_get_port_parent_id)(struct net_device *dev,
1633 struct netdev_phys_item_id *ppid);
1634 int (*ndo_get_phys_port_name)(struct net_device *dev,
1635 char *name, size_t len);
1636 void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1637 struct net_device *dev);
1638 void (*ndo_dfwd_del_station)(struct net_device *pdev,
1639 void *priv);
1640
1641 int (*ndo_set_tx_maxrate)(struct net_device *dev,
1642 int queue_index,
1643 u32 maxrate);
1644 int (*ndo_get_iflink)(const struct net_device *dev);
1645 int (*ndo_fill_metadata_dst)(struct net_device *dev,
1646 struct sk_buff *skb);
1647 void (*ndo_set_rx_headroom)(struct net_device *dev,
1648 int needed_headroom);
1649 int (*ndo_bpf)(struct net_device *dev,
1650 struct netdev_bpf *bpf);
1651 int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1652 struct xdp_frame **xdp,
1653 u32 flags);
1654 struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
1655 struct xdp_buff *xdp);
1656 int (*ndo_xsk_wakeup)(struct net_device *dev,
1657 u32 queue_id, u32 flags);
1658 int (*ndo_tunnel_ctl)(struct net_device *dev,
1659 struct ip_tunnel_parm_kern *p,
1660 int cmd);
1661 struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
1662 int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
1663 struct net_device_path *path);
1664 ktime_t (*ndo_get_tstamp)(struct net_device *dev,
1665 const struct skb_shared_hwtstamps *hwtstamps,
1666 bool cycles);
1667 int (*ndo_hwtstamp_get)(struct net_device *dev,
1668 struct kernel_hwtstamp_config *kernel_config);
1669 int (*ndo_hwtstamp_set)(struct net_device *dev,
1670 struct kernel_hwtstamp_config *kernel_config,
1671 struct netlink_ext_ack *extack);
1672
1673 #if IS_ENABLED(CONFIG_NET_SHAPER)
1674 /**
1675 * @net_shaper_ops: Device shaping offload operations
1676 * see include/net/net_shapers.h
1677 */
1678 const struct net_shaper_ops *net_shaper_ops;
1679 #endif
1680 };
1681
1682 /**
1683 * enum netdev_priv_flags - &struct net_device priv_flags
1684 *
1685 * These are the &struct net_device, they are only set internally
1686 * by drivers and used in the kernel. These flags are invisible to
1687 * userspace; this means that the order of these flags can change
1688 * during any kernel release.
1689 *
1690 * You should add bitfield booleans after either net_device::priv_flags
1691 * (hotpath) or ::threaded (slowpath) instead of extending these flags.
1692 *
1693 * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1694 * @IFF_EBRIDGE: Ethernet bridging device
1695 * @IFF_BONDING: bonding master or slave
1696 * @IFF_ISATAP: ISATAP interface (RFC4214)
1697 * @IFF_WAN_HDLC: WAN HDLC device
1698 * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1699 * release skb->dst
1700 * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1701 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1702 * @IFF_MACVLAN_PORT: device used as macvlan port
1703 * @IFF_BRIDGE_PORT: device used as bridge port
1704 * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1705 * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1706 * @IFF_UNICAST_FLT: Supports unicast filtering
1707 * @IFF_TEAM_PORT: device used as team port
1708 * @IFF_SUPP_NOFCS: device supports sending custom FCS
1709 * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1710 * change when it's running
1711 * @IFF_MACVLAN: Macvlan device
1712 * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1713 * underlying stacked devices
1714 * @IFF_L3MDEV_MASTER: device is an L3 master device
1715 * @IFF_NO_QUEUE: device can run without qdisc attached
1716 * @IFF_OPENVSWITCH: device is a Open vSwitch master
1717 * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1718 * @IFF_TEAM: device is a team device
1719 * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1720 * entity (i.e. the master device for bridged veth)
1721 * @IFF_MACSEC: device is a MACsec device
1722 * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1723 * @IFF_FAILOVER: device is a failover master device
1724 * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1725 * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1726 * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
1727 * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
1728 * skb_headlen(skb) == 0 (data starts from frag0)
1729 */
1730 enum netdev_priv_flags {
1731 IFF_802_1Q_VLAN = 1<<0,
1732 IFF_EBRIDGE = 1<<1,
1733 IFF_BONDING = 1<<2,
1734 IFF_ISATAP = 1<<3,
1735 IFF_WAN_HDLC = 1<<4,
1736 IFF_XMIT_DST_RELEASE = 1<<5,
1737 IFF_DONT_BRIDGE = 1<<6,
1738 IFF_DISABLE_NETPOLL = 1<<7,
1739 IFF_MACVLAN_PORT = 1<<8,
1740 IFF_BRIDGE_PORT = 1<<9,
1741 IFF_OVS_DATAPATH = 1<<10,
1742 IFF_TX_SKB_SHARING = 1<<11,
1743 IFF_UNICAST_FLT = 1<<12,
1744 IFF_TEAM_PORT = 1<<13,
1745 IFF_SUPP_NOFCS = 1<<14,
1746 IFF_LIVE_ADDR_CHANGE = 1<<15,
1747 IFF_MACVLAN = 1<<16,
1748 IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1749 IFF_L3MDEV_MASTER = 1<<18,
1750 IFF_NO_QUEUE = 1<<19,
1751 IFF_OPENVSWITCH = 1<<20,
1752 IFF_L3MDEV_SLAVE = 1<<21,
1753 IFF_TEAM = 1<<22,
1754 IFF_PHONY_HEADROOM = 1<<24,
1755 IFF_MACSEC = 1<<25,
1756 IFF_NO_RX_HANDLER = 1<<26,
1757 IFF_FAILOVER = 1<<27,
1758 IFF_FAILOVER_SLAVE = 1<<28,
1759 IFF_L3MDEV_RX_HANDLER = 1<<29,
1760 IFF_NO_ADDRCONF = BIT_ULL(30),
1761 IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
1762 };
1763
1764 /* Specifies the type of the struct net_device::ml_priv pointer */
1765 enum netdev_ml_priv_type {
1766 ML_PRIV_NONE,
1767 ML_PRIV_CAN,
1768 };
1769
1770 enum netdev_stat_type {
1771 NETDEV_PCPU_STAT_NONE,
1772 NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
1773 NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
1774 NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
1775 };
1776
1777 enum netdev_reg_state {
1778 NETREG_UNINITIALIZED = 0,
1779 NETREG_REGISTERED, /* completed register_netdevice */
1780 NETREG_UNREGISTERING, /* called unregister_netdevice */
1781 NETREG_UNREGISTERED, /* completed unregister todo */
1782 NETREG_RELEASED, /* called free_netdev */
1783 NETREG_DUMMY, /* dummy device for NAPI poll */
1784 };
1785
1786 /**
1787 * struct net_device - The DEVICE structure.
1788 *
1789 * Actually, this whole structure is a big mistake. It mixes I/O
1790 * data with strictly "high-level" data, and it has to know about
1791 * almost every data structure used in the INET module.
1792 *
1793 * @priv_flags: flags invisible to userspace defined as bits, see
1794 * enum netdev_priv_flags for the definitions
1795 * @lltx: device supports lockless Tx. Deprecated for real HW
1796 * drivers. Mainly used by logical interfaces, such as
1797 * bonding and tunnels
1798 * @netmem_tx: device support netmem_tx.
1799 *
1800 * @name: This is the first field of the "visible" part of this structure
1801 * (i.e. as seen by users in the "Space.c" file). It is the name
1802 * of the interface.
1803 *
1804 * @name_node: Name hashlist node
1805 * @ifalias: SNMP alias
1806 * @mem_end: Shared memory end
1807 * @mem_start: Shared memory start
1808 * @base_addr: Device I/O address
1809 * @irq: Device IRQ number
1810 *
1811 * @state: Generic network queuing layer state, see netdev_state_t
1812 * @dev_list: The global list of network devices
1813 * @napi_list: List entry used for polling NAPI devices
1814 * @unreg_list: List entry when we are unregistering the
1815 * device; see the function unregister_netdev
1816 * @close_list: List entry used when we are closing the device
1817 * @ptype_all: Device-specific packet handlers for all protocols
1818 * @ptype_specific: Device-specific, protocol-specific packet handlers
1819 *
1820 * @adj_list: Directly linked devices, like slaves for bonding
1821 * @features: Currently active device features
1822 * @hw_features: User-changeable features
1823 *
1824 * @wanted_features: User-requested features
1825 * @vlan_features: Mask of features inheritable by VLAN devices
1826 *
1827 * @hw_enc_features: Mask of features inherited by encapsulating devices
1828 * This field indicates what encapsulation
1829 * offloads the hardware is capable of doing,
1830 * and drivers will need to set them appropriately.
1831 *
1832 * @mpls_features: Mask of features inheritable by MPLS
1833 * @gso_partial_features: value(s) from NETIF_F_GSO\*
1834 * @mangleid_features: Mask of features requiring MANGLEID, will be
1835 * disabled together with the latter.
1836 *
1837 * @ifindex: interface index
1838 * @group: The group the device belongs to
1839 *
1840 * @stats: Statistics struct, which was left as a legacy, use
1841 * rtnl_link_stats64 instead
1842 *
1843 * @core_stats: core networking counters,
1844 * do not use this in drivers
1845 * @carrier_up_count: Number of times the carrier has been up
1846 * @carrier_down_count: Number of times the carrier has been down
1847 *
1848 * @wireless_handlers: List of functions to handle Wireless Extensions,
1849 * instead of ioctl,
1850 * see <net/iw_handler.h> for details.
1851 *
1852 * @netdev_ops: Includes several pointers to callbacks,
1853 * if one wants to override the ndo_*() functions
1854 * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
1855 * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
1856 * @ethtool_ops: Management operations
1857 * @l3mdev_ops: Layer 3 master device operations
1858 * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1859 * discovery handling. Necessary for e.g. 6LoWPAN.
1860 * @xfrmdev_ops: Transformation offload operations
1861 * @tlsdev_ops: Transport Layer Security offload operations
1862 * @header_ops: Includes callbacks for creating,parsing,caching,etc
1863 * of Layer 2 headers.
1864 *
1865 * @flags: Interface flags (a la BSD)
1866 * @xdp_features: XDP capability supported by the device
1867 * @gflags: Global flags ( kept as legacy )
1868 * @priv_len: Size of the ->priv flexible array
1869 * @priv: Flexible array containing private data
1870 * @operstate: RFC2863 operstate
1871 * @link_mode: Mapping policy to operstate
1872 * @if_port: Selectable AUI, TP, ...
1873 * @dma: DMA channel
1874 * @mtu: Interface MTU value
1875 * @min_mtu: Interface Minimum MTU value
1876 * @max_mtu: Interface Maximum MTU value
1877 * @type: Interface hardware type
1878 * @hard_header_len: Maximum hardware header length.
1879 * @min_header_len: Minimum hardware header length
1880 *
1881 * @needed_headroom: Extra headroom the hardware may need, but not in all
1882 * cases can this be guaranteed
1883 * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1884 * cases can this be guaranteed. Some cases also use
1885 * LL_MAX_HEADER instead to allocate the skb
1886 *
1887 * interface address info:
1888 *
1889 * @perm_addr: Permanent hw address
1890 * @addr_assign_type: Hw address assignment type
1891 * @addr_len: Hardware address length
1892 * @upper_level: Maximum depth level of upper devices.
1893 * @lower_level: Maximum depth level of lower devices.
1894 * @threaded: napi threaded state.
1895 * @neigh_priv_len: Used in neigh_alloc()
1896 * @dev_id: Used to differentiate devices that share
1897 * the same link layer address
1898 * @dev_port: Used to differentiate devices that share
1899 * the same function
1900 * @addr_list_lock: XXX: need comments on this one
1901 * @name_assign_type: network interface name assignment type
1902 * @uc_promisc: Counter that indicates promiscuous mode
1903 * has been enabled due to the need to listen to
1904 * additional unicast addresses in a device that
1905 * does not implement ndo_set_rx_mode()
1906 * @uc: unicast mac addresses
1907 * @mc: multicast mac addresses
1908 * @dev_addrs: list of device hw addresses
1909 * @queues_kset: Group of all Kobjects in the Tx and RX queues
1910 * @promiscuity: Number of times the NIC is told to work in
1911 * promiscuous mode; if it becomes 0 the NIC will
1912 * exit promiscuous mode
1913 * @allmulti: Counter, enables or disables allmulticast mode
1914 *
1915 * @vlan_info: VLAN info
1916 * @dsa_ptr: dsa specific data
1917 * @tipc_ptr: TIPC specific data
1918 * @atalk_ptr: AppleTalk link
1919 * @ip_ptr: IPv4 specific data
1920 * @ip6_ptr: IPv6 specific data
1921 * @ax25_ptr: AX.25 specific data
1922 * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1923 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1924 * device struct
1925 * @mpls_ptr: mpls_dev struct pointer
1926 * @mctp_ptr: MCTP specific data
1927 * @psp_dev: PSP crypto device registered for this netdev
1928 *
1929 * @dev_addr: Hw address (before bcast,
1930 * because most packets are unicast)
1931 *
1932 * @_rx: Array of RX queues
1933 * @num_rx_queues: Number of RX queues
1934 * allocated at register_netdev() time
1935 * @real_num_rx_queues: Number of RX queues currently active in device
1936 * @xdp_prog: XDP sockets filter program pointer
1937 *
1938 * @rx_handler: handler for received packets
1939 * @rx_handler_data: XXX: need comments on this one
1940 * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing
1941 * @ingress_queue: XXX: need comments on this one
1942 * @nf_hooks_ingress: netfilter hooks executed for ingress packets
1943 * @broadcast: hw bcast address
1944 *
1945 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1946 * indexed by RX queue number. Assigned by driver.
1947 * This must only be set if the ndo_rx_flow_steer
1948 * operation is defined
1949 * @index_hlist: Device index hash chain
1950 *
1951 * @_tx: Array of TX queues
1952 * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1953 * @real_num_tx_queues: Number of TX queues currently active in device
1954 * @qdisc: Root qdisc from userspace point of view
1955 * @tx_queue_len: Max frames per queue allowed
1956 * @tx_global_lock: XXX: need comments on this one
1957 * @xdp_bulkq: XDP device bulk queue
1958 * @xps_maps: all CPUs/RXQs maps for XPS device
1959 *
1960 * @xps_maps: XXX: need comments on this one
1961 * @tcx_egress: BPF & clsact qdisc specific data for egress processing
1962 * @nf_hooks_egress: netfilter hooks executed for egress packets
1963 * @qdisc_hash: qdisc hash table
1964 * @watchdog_timeo: Represents the timeout that is used by
1965 * the watchdog (see dev_watchdog())
1966 * @watchdog_timer: List of timers
1967 *
1968 * @proto_down_reason: reason a netdev interface is held down
1969 * @pcpu_refcnt: Number of references to this device
1970 * @dev_refcnt: Number of references to this device
1971 * @refcnt_tracker: Tracker directory for tracked references to this device
1972 * @todo_list: Delayed register/unregister
1973 * @link_watch_list: XXX: need comments on this one
1974 *
1975 * @reg_state: Register/unregister state machine
1976 * @dismantle: Device is going to be freed
1977 * @needs_free_netdev: Should unregister perform free_netdev?
1978 * @priv_destructor: Called from unregister
1979 * @npinfo: XXX: need comments on this one
1980 * @nd_net: Network namespace this network device is inside
1981 * protected by @lock
1982 *
1983 * @ml_priv: Mid-layer private
1984 * @ml_priv_type: Mid-layer private type
1985 *
1986 * @pcpu_stat_type: Type of device statistics which the core should
1987 * allocate/free: none, lstats, tstats, dstats. none
1988 * means the driver is handling statistics allocation/
1989 * freeing internally.
1990 * @lstats: Loopback statistics: packets, bytes
1991 * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
1992 * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
1993 *
1994 * @garp_port: GARP
1995 * @mrp_port: MRP
1996 *
1997 * @dm_private: Drop monitor private
1998 *
1999 * @dev: Class/net/name entry
2000 * @sysfs_groups: Space for optional device, statistics and wireless
2001 * sysfs groups
2002 *
2003 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
2004 * @rtnl_link_ops: Rtnl_link_ops
2005 * @stat_ops: Optional ops for queue-aware statistics
2006 * @queue_mgmt_ops: Optional ops for queue management
2007 *
2008 * @gso_max_size: Maximum size of generic segmentation offload
2009 * @tso_max_size: Device (as in HW) limit on the max TSO request size
2010 * @gso_max_segs: Maximum number of segments that can be passed to the
2011 * NIC for GSO
2012 * @tso_max_segs: Device (as in HW) limit on the max TSO segment count
2013 * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
2014 * for IPv4.
2015 *
2016 * @dcbnl_ops: Data Center Bridging netlink ops
2017 * @num_tc: Number of traffic classes in the net device
2018 * @tc_to_txq: XXX: need comments on this one
2019 * @prio_tc_map: XXX: need comments on this one
2020 *
2021 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
2022 *
2023 * @priomap: XXX: need comments on this one
2024 * @link_topo: Physical link topology tracking attached PHYs
2025 * @phydev: Physical device may attach itself
2026 * for hardware timestamping
2027 * @sfp_bus: attached &struct sfp_bus structure.
2028 *
2029 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
2030 *
2031 * @proto_down: protocol port state information can be sent to the
2032 * switch driver and used to set the phys state of the
2033 * switch port.
2034 *
2035 * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ
2036 * affinity. Set by netif_enable_irq_affinity(), then
2037 * the driver must create a persistent napi by
2038 * netif_napi_add_config() and finally bind the napi to
2039 * IRQ (via netif_napi_set_irq()).
2040 *
2041 * @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap.
2042 * Set by calling netif_enable_cpu_rmap().
2043 *
2044 * @see_all_hwtstamp_requests: device wants to see calls to
2045 * ndo_hwtstamp_set() for all timestamp requests
2046 * regardless of source, even if those aren't
2047 * HWTSTAMP_SOURCE_NETDEV
2048 * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
2049 * @netns_immutable: interface can't change network namespaces
2050 * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes
2051 *
2052 * @net_notifier_list: List of per-net netdev notifier block
2053 * that follow this device when it is moved
2054 * to another network namespace.
2055 *
2056 * @macsec_ops: MACsec offloading ops
2057 *
2058 * @udp_tunnel_nic_info: static structure describing the UDP tunnel
2059 * offload capabilities of the device
2060 * @udp_tunnel_nic: UDP tunnel offload state
2061 * @ethtool: ethtool related state
2062 * @xdp_state: stores info on attached XDP BPF programs
2063 *
2064 * @nested_level: Used as a parameter of spin_lock_nested() of
2065 * dev->addr_list_lock.
2066 * @unlink_list: As netif_addr_lock() can be called recursively,
2067 * keep a list of interfaces to be deleted.
2068 * @gro_max_size: Maximum size of aggregated packet in generic
2069 * receive offload (GRO)
2070 * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
2071 * receive offload (GRO), for IPv4.
2072 * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP
2073 * zero copy driver
2074 *
2075 * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
2076 * @linkwatch_dev_tracker: refcount tracker used by linkwatch.
2077 * @watchdog_dev_tracker: refcount tracker used by watchdog.
2078 * @dev_registered_tracker: tracker for reference held while
2079 * registered
2080 * @offload_xstats_l3: L3 HW stats for this netdevice.
2081 *
2082 * @devlink_port: Pointer to related devlink port structure.
2083 * Assigned by a driver before netdev registration using
2084 * SET_NETDEV_DEVLINK_PORT macro. This pointer is static
2085 * during the time netdevice is registered.
2086 *
2087 * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
2088 * where the clock is recovered.
2089 *
2090 * @max_pacing_offload_horizon: max EDT offload horizon in nsec.
2091 * @napi_config: An array of napi_config structures containing per-NAPI
2092 * settings.
2093 * @num_napi_configs: number of allocated NAPI config structs,
2094 * always >= max(num_rx_queues, num_tx_queues).
2095 * @gro_flush_timeout: timeout for GRO layer in NAPI
2096 * @napi_defer_hard_irqs: If not zero, provides a counter that would
2097 * allow to avoid NIC hard IRQ, on busy queues.
2098 *
2099 * @neighbours: List heads pointing to this device's neighbours'
2100 * dev_list, one per address-family.
2101 * @hwprov: Tracks which PTP performs hardware packet time stamping.
2102 *
2103 * FIXME: cleanup struct net_device such that network protocol info
2104 * moves out.
2105 */
2106
2107 struct net_device {
2108 /* Cacheline organization can be found documented in
2109 * Documentation/networking/net_cachelines/net_device.rst.
2110 * Please update the document when adding new fields.
2111 */
2112
2113 /* TX read-mostly hotpath */
2114 __cacheline_group_begin(net_device_read_tx);
2115 struct_group(priv_flags_fast,
2116 unsigned long priv_flags:32;
2117 unsigned long lltx:1;
2118 unsigned long netmem_tx:1;
2119 );
2120 const struct net_device_ops *netdev_ops;
2121 const struct header_ops *header_ops;
2122 struct netdev_queue *_tx;
2123 netdev_features_t gso_partial_features;
2124 unsigned int real_num_tx_queues;
2125 unsigned int gso_max_size;
2126 unsigned int gso_ipv4_max_size;
2127 u16 gso_max_segs;
2128 s16 num_tc;
2129 /* Note : dev->mtu is often read without holding a lock.
2130 * Writers usually hold RTNL.
2131 * It is recommended to use READ_ONCE() to annotate the reads,
2132 * and to use WRITE_ONCE() to annotate the writes.
2133 */
2134 unsigned int mtu;
2135 unsigned short needed_headroom;
2136 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2137 #ifdef CONFIG_XPS
2138 struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
2139 #endif
2140 #ifdef CONFIG_NETFILTER_EGRESS
2141 struct nf_hook_entries __rcu *nf_hooks_egress;
2142 #endif
2143 #ifdef CONFIG_NET_XGRESS
2144 struct bpf_mprog_entry __rcu *tcx_egress;
2145 #endif
2146 __cacheline_group_end(net_device_read_tx);
2147
2148 /* TXRX read-mostly hotpath */
2149 __cacheline_group_begin(net_device_read_txrx);
2150 union {
2151 struct pcpu_lstats __percpu *lstats;
2152 struct pcpu_sw_netstats __percpu *tstats;
2153 struct pcpu_dstats __percpu *dstats;
2154 };
2155 unsigned long state;
2156 unsigned int flags;
2157 unsigned short hard_header_len;
2158 enum netdev_stat_type pcpu_stat_type:8;
2159 netdev_features_t features;
2160 struct inet6_dev __rcu *ip6_ptr;
2161 __cacheline_group_end(net_device_read_txrx);
2162
2163 /* RX read-mostly hotpath */
2164 __cacheline_group_begin(net_device_read_rx);
2165 struct bpf_prog __rcu *xdp_prog;
2166 struct list_head ptype_specific;
2167 int ifindex;
2168 unsigned int real_num_rx_queues;
2169 struct netdev_rx_queue *_rx;
2170 unsigned int gro_max_size;
2171 unsigned int gro_ipv4_max_size;
2172 rx_handler_func_t __rcu *rx_handler;
2173 void __rcu *rx_handler_data;
2174 possible_net_t nd_net;
2175 #ifdef CONFIG_NETPOLL
2176 struct netpoll_info __rcu *npinfo;
2177 #endif
2178 #ifdef CONFIG_NET_XGRESS
2179 struct bpf_mprog_entry __rcu *tcx_ingress;
2180 #endif
2181 __cacheline_group_end(net_device_read_rx);
2182
2183 char name[IFNAMSIZ];
2184 struct netdev_name_node *name_node;
2185 struct dev_ifalias __rcu *ifalias;
2186 /*
2187 * I/O specific fields
2188 * FIXME: Merge these and struct ifmap into one
2189 */
2190 unsigned long mem_end;
2191 unsigned long mem_start;
2192 unsigned long base_addr;
2193
2194 /*
2195 * Some hardware also needs these fields (state,dev_list,
2196 * napi_list,unreg_list,close_list) but they are not
2197 * part of the usual set specified in Space.c.
2198 */
2199
2200
2201 struct list_head dev_list;
2202 struct list_head napi_list;
2203 struct list_head unreg_list;
2204 struct list_head close_list;
2205 struct list_head ptype_all;
2206
2207 struct {
2208 struct list_head upper;
2209 struct list_head lower;
2210 } adj_list;
2211
2212 /* Read-mostly cache-line for fast-path access */
2213 xdp_features_t xdp_features;
2214 const struct xdp_metadata_ops *xdp_metadata_ops;
2215 const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
2216 unsigned short gflags;
2217
2218 unsigned short needed_tailroom;
2219
2220 netdev_features_t hw_features;
2221 netdev_features_t wanted_features;
2222 netdev_features_t vlan_features;
2223 netdev_features_t hw_enc_features;
2224 netdev_features_t mpls_features;
2225 netdev_features_t mangleid_features;
2226
2227 unsigned int min_mtu;
2228 unsigned int max_mtu;
2229 unsigned short type;
2230 unsigned char min_header_len;
2231 unsigned char name_assign_type;
2232
2233 int group;
2234
2235 struct net_device_stats stats; /* not used by modern drivers */
2236
2237 struct net_device_core_stats __percpu *core_stats;
2238
2239 /* Stats to monitor link on/off, flapping */
2240 atomic_t carrier_up_count;
2241 atomic_t carrier_down_count;
2242
2243 #ifdef CONFIG_WIRELESS_EXT
2244 const struct iw_handler_def *wireless_handlers;
2245 #endif
2246 const struct ethtool_ops *ethtool_ops;
2247 #ifdef CONFIG_NET_L3_MASTER_DEV
2248 const struct l3mdev_ops *l3mdev_ops;
2249 #endif
2250 #if IS_ENABLED(CONFIG_IPV6)
2251 const struct ndisc_ops *ndisc_ops;
2252 #endif
2253
2254 #ifdef CONFIG_XFRM_OFFLOAD
2255 const struct xfrmdev_ops *xfrmdev_ops;
2256 #endif
2257
2258 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2259 const struct tlsdev_ops *tlsdev_ops;
2260 #endif
2261
2262 unsigned int operstate;
2263 unsigned char link_mode;
2264
2265 unsigned char if_port;
2266 unsigned char dma;
2267
2268 /* Interface address info. */
2269 unsigned char perm_addr[MAX_ADDR_LEN];
2270 unsigned char addr_assign_type;
2271 unsigned char addr_len;
2272 unsigned char upper_level;
2273 unsigned char lower_level;
2274 u8 threaded;
2275
2276 unsigned short neigh_priv_len;
2277 unsigned short dev_id;
2278 unsigned short dev_port;
2279 int irq;
2280 u32 priv_len;
2281
2282 spinlock_t addr_list_lock;
2283
2284 struct netdev_hw_addr_list uc;
2285 struct netdev_hw_addr_list mc;
2286 struct netdev_hw_addr_list dev_addrs;
2287
2288 #ifdef CONFIG_SYSFS
2289 struct kset *queues_kset;
2290 #endif
2291 #ifdef CONFIG_LOCKDEP
2292 struct list_head unlink_list;
2293 #endif
2294 unsigned int promiscuity;
2295 unsigned int allmulti;
2296 bool uc_promisc;
2297 #ifdef CONFIG_LOCKDEP
2298 unsigned char nested_level;
2299 #endif
2300
2301
2302 /* Protocol-specific pointers */
2303 struct in_device __rcu *ip_ptr;
2304 /** @fib_nh_head: nexthops associated with this netdev */
2305 struct hlist_head fib_nh_head;
2306
2307 #if IS_ENABLED(CONFIG_VLAN_8021Q)
2308 struct vlan_info __rcu *vlan_info;
2309 #endif
2310 #if IS_ENABLED(CONFIG_NET_DSA)
2311 struct dsa_port *dsa_ptr;
2312 #endif
2313 #if IS_ENABLED(CONFIG_TIPC)
2314 struct tipc_bearer __rcu *tipc_ptr;
2315 #endif
2316 #if IS_ENABLED(CONFIG_ATALK)
2317 void *atalk_ptr;
2318 #endif
2319 #if IS_ENABLED(CONFIG_AX25)
2320 struct ax25_dev __rcu *ax25_ptr;
2321 #endif
2322 #if IS_ENABLED(CONFIG_CFG80211)
2323 struct wireless_dev *ieee80211_ptr;
2324 #endif
2325 #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
2326 struct wpan_dev *ieee802154_ptr;
2327 #endif
2328 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
2329 struct mpls_dev __rcu *mpls_ptr;
2330 #endif
2331 #if IS_ENABLED(CONFIG_MCTP)
2332 struct mctp_dev __rcu *mctp_ptr;
2333 #endif
2334 #if IS_ENABLED(CONFIG_INET_PSP)
2335 struct psp_dev __rcu *psp_dev;
2336 #endif
2337
2338 /*
2339 * Cache lines mostly used on receive path (including eth_type_trans())
2340 */
2341 /* Interface address info used in eth_type_trans() */
2342 const unsigned char *dev_addr;
2343
2344 unsigned int num_rx_queues;
2345 #define GRO_LEGACY_MAX_SIZE 65536u
2346 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2347 * and shinfo->gso_segs is a 16bit field.
2348 */
2349 #define GRO_MAX_SIZE (8 * 65535u)
2350 unsigned int xdp_zc_max_segs;
2351 struct netdev_queue __rcu *ingress_queue;
2352 #ifdef CONFIG_NETFILTER_INGRESS
2353 struct nf_hook_entries __rcu *nf_hooks_ingress;
2354 #endif
2355
2356 unsigned char broadcast[MAX_ADDR_LEN];
2357 #ifdef CONFIG_RFS_ACCEL
2358 struct cpu_rmap *rx_cpu_rmap;
2359 #endif
2360 struct hlist_node index_hlist;
2361
2362 /*
2363 * Cache lines mostly used on transmit path
2364 */
2365 unsigned int num_tx_queues;
2366 struct Qdisc __rcu *qdisc;
2367 unsigned int tx_queue_len;
2368 spinlock_t tx_global_lock;
2369
2370 struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2371
2372 #ifdef CONFIG_NET_SCHED
2373 DECLARE_HASHTABLE (qdisc_hash, 4);
2374 #endif
2375 /* These may be needed for future network-power-down code. */
2376 struct timer_list watchdog_timer;
2377 int watchdog_timeo;
2378
2379 u32 proto_down_reason;
2380
2381 struct list_head todo_list;
2382
2383 #ifdef CONFIG_PCPU_DEV_REFCNT
2384 int __percpu *pcpu_refcnt;
2385 #else
2386 refcount_t dev_refcnt;
2387 #endif
2388 struct ref_tracker_dir refcnt_tracker;
2389
2390 struct list_head link_watch_list;
2391
2392 u8 reg_state;
2393
2394 bool dismantle;
2395
2396 /** @moving_ns: device is changing netns, protected by @lock */
2397 bool moving_ns;
2398 /** @rtnl_link_initializing: Device being created, suppress events */
2399 bool rtnl_link_initializing;
2400
2401 bool needs_free_netdev;
2402 void (*priv_destructor)(struct net_device *dev);
2403
2404 /* mid-layer private */
2405 void *ml_priv;
2406 enum netdev_ml_priv_type ml_priv_type;
2407
2408 #if IS_ENABLED(CONFIG_GARP)
2409 struct garp_port __rcu *garp_port;
2410 #endif
2411 #if IS_ENABLED(CONFIG_MRP)
2412 struct mrp_port __rcu *mrp_port;
2413 #endif
2414 #if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
2415 struct dm_hw_stat_delta __rcu *dm_private;
2416 #endif
2417 struct device dev;
2418 const struct attribute_group *sysfs_groups[5];
2419 const struct attribute_group *sysfs_rx_queue_group;
2420
2421 const struct rtnl_link_ops *rtnl_link_ops;
2422
2423 const struct netdev_stat_ops *stat_ops;
2424
2425 const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
2426
2427 /* for setting kernel sock attribute on TCP connection setup */
2428 #define GSO_MAX_SEGS 65535u
2429 #define GSO_LEGACY_MAX_SIZE 65536u
2430 /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
2431 * and shinfo->gso_segs is a 16bit field.
2432 */
2433 #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
2434
2435 #define TSO_LEGACY_MAX_SIZE 65536
2436 #define TSO_MAX_SIZE UINT_MAX
2437 unsigned int tso_max_size;
2438 #define TSO_MAX_SEGS U16_MAX
2439 u16 tso_max_segs;
2440
2441 #ifdef CONFIG_DCB
2442 const struct dcbnl_rtnl_ops *dcbnl_ops;
2443 #endif
2444 u8 prio_tc_map[TC_BITMASK + 1];
2445
2446 #if IS_ENABLED(CONFIG_FCOE)
2447 unsigned int fcoe_ddp_xid;
2448 #endif
2449 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2450 struct netprio_map __rcu *priomap;
2451 #endif
2452 struct phy_link_topology *link_topo;
2453 struct phy_device *phydev;
2454 struct sfp_bus *sfp_bus;
2455 struct lock_class_key *qdisc_tx_busylock;
2456 bool proto_down;
2457 bool irq_affinity_auto;
2458 bool rx_cpu_rmap_auto;
2459
2460 /* priv_flags_slow, ungrouped to save space */
2461 unsigned long see_all_hwtstamp_requests:1;
2462 unsigned long change_proto_down:1;
2463 unsigned long netns_immutable:1;
2464 unsigned long fcoe_mtu:1;
2465
2466 struct list_head net_notifier_list;
2467
2468 #if IS_ENABLED(CONFIG_MACSEC)
2469 /* MACsec management functions */
2470 const struct macsec_ops *macsec_ops;
2471 #endif
2472 const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2473 struct udp_tunnel_nic *udp_tunnel_nic;
2474
2475 /** @cfg: net_device queue-related configuration */
2476 struct netdev_config *cfg;
2477 /**
2478 * @cfg_pending: same as @cfg but when device is being actively
2479 * reconfigured includes any changes to the configuration
2480 * requested by the user, but which may or may not be rejected.
2481 */
2482 struct netdev_config *cfg_pending;
2483 struct ethtool_netdev_state *ethtool;
2484
2485 /* protected by rtnl_lock */
2486 struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2487
2488 u8 dev_addr_shadow[MAX_ADDR_LEN];
2489 netdevice_tracker linkwatch_dev_tracker;
2490 netdevice_tracker watchdog_dev_tracker;
2491 netdevice_tracker dev_registered_tracker;
2492 struct rtnl_hw_stats64 *offload_xstats_l3;
2493
2494 struct devlink_port *devlink_port;
2495
2496 #if IS_ENABLED(CONFIG_DPLL)
2497 struct dpll_pin __rcu *dpll_pin;
2498 #endif
2499 #if IS_ENABLED(CONFIG_PAGE_POOL)
2500 /** @page_pools: page pools created for this netdevice */
2501 struct hlist_head page_pools;
2502 #endif
2503
2504 /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
2505 struct dim_irq_moder *irq_moder;
2506
2507 u64 max_pacing_offload_horizon;
2508 struct napi_config *napi_config;
2509 u32 num_napi_configs;
2510 u32 napi_defer_hard_irqs;
2511 unsigned long gro_flush_timeout;
2512
2513 /**
2514 * @up: copy of @state's IFF_UP, but safe to read with just @lock.
2515 * May report false negatives while the device is being opened
2516 * or closed (@lock does not protect .ndo_open, or .ndo_close).
2517 */
2518 bool up;
2519
2520 /**
2521 * @request_ops_lock: request the core to run all @netdev_ops and
2522 * @ethtool_ops under the @lock.
2523 */
2524 bool request_ops_lock;
2525
2526 /**
2527 * @lock: netdev-scope lock, protects a small selection of fields.
2528 * Should always be taken using netdev_lock() / netdev_unlock() helpers.
2529 * Drivers are free to use it for other protection.
2530 *
2531 * For the drivers that implement shaper or queue API, the scope
2532 * of this lock is expanded to cover most ndo/queue/ethtool/sysfs
2533 * operations. Drivers may opt-in to this behavior by setting
2534 * @request_ops_lock.
2535 *
2536 * @lock protection mixes with rtnl_lock in multiple ways, fields are
2537 * either:
2538 *
2539 * - simply protected by the instance @lock;
2540 *
2541 * - double protected - writers hold both locks, readers hold either;
2542 *
2543 * - ops protected - protected by the lock held around the NDOs
2544 * and other callbacks, that is the instance lock on devices for
2545 * which netdev_need_ops_lock() returns true, otherwise by rtnl_lock;
2546 *
2547 * - double ops protected - always protected by rtnl_lock but for
2548 * devices for which netdev_need_ops_lock() returns true - also
2549 * the instance lock.
2550 *
2551 * Simply protects:
2552 * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
2553 * @net_shaper_hierarchy, @reg_state, @threaded
2554 *
2555 * Double protects:
2556 * @up, @moving_ns, @nd_net, @xdp_features
2557 *
2558 * Double ops protects:
2559 * @real_num_rx_queues, @real_num_tx_queues
2560 *
2561 * Also protects some fields in:
2562 * struct napi_struct, struct netdev_queue, struct netdev_rx_queue
2563 *
2564 * Ordering:
2565 *
2566 * - take after rtnl_lock
2567 *
2568 * - for the case of netdev queue leasing, the netdev-scope lock is
2569 * taken for both the virtual and the physical device; to prevent
2570 * deadlocks, the virtual device's lock must always be acquired
2571 * before the physical device's (see netdev_nl_queue_create_doit)
2572 */
2573 struct mutex lock;
2574
2575 #if IS_ENABLED(CONFIG_NET_SHAPER)
2576 /**
2577 * @net_shaper_hierarchy: data tracking the current shaper status
2578 * see include/net/net_shapers.h
2579 */
2580 struct net_shaper_hierarchy *net_shaper_hierarchy;
2581 #endif
2582
2583 struct hlist_head neighbours[NEIGH_NR_TABLES];
2584
2585 struct hwtstamp_provider __rcu *hwprov;
2586
2587 u8 priv[] ____cacheline_aligned
2588 __counted_by(priv_len);
2589 } ____cacheline_aligned;
2590 #define to_net_dev(d) container_of(d, struct net_device, dev)
2591
2592 /*
2593 * Driver should use this to assign devlink port instance to a netdevice
2594 * before it registers the netdevice. Therefore devlink_port is static
2595 * during the netdev lifetime after it is registered.
2596 */
2597 #define SET_NETDEV_DEVLINK_PORT(dev, port) \
2598 ({ \
2599 WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
2600 ((dev)->devlink_port = (port)); \
2601 })
2602
netif_elide_gro(const struct net_device * dev)2603 static inline bool netif_elide_gro(const struct net_device *dev)
2604 {
2605 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2606 return true;
2607 return false;
2608 }
2609
2610 #define NETDEV_ALIGN 32
2611
2612 static inline
netdev_get_prio_tc_map(const struct net_device * dev,u32 prio)2613 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2614 {
2615 return dev->prio_tc_map[prio & TC_BITMASK];
2616 }
2617
2618 static inline
netdev_set_prio_tc_map(struct net_device * dev,u8 prio,u8 tc)2619 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2620 {
2621 if (tc >= dev->num_tc)
2622 return -EINVAL;
2623
2624 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2625 return 0;
2626 }
2627
2628 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2629 void netdev_reset_tc(struct net_device *dev);
2630 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2631 int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2632
2633 static inline
netdev_get_num_tc(struct net_device * dev)2634 int netdev_get_num_tc(struct net_device *dev)
2635 {
2636 return dev->num_tc;
2637 }
2638
net_prefetch(void * p)2639 static inline void net_prefetch(void *p)
2640 {
2641 prefetch(p);
2642 #if L1_CACHE_BYTES < 128
2643 prefetch((u8 *)p + L1_CACHE_BYTES);
2644 #endif
2645 }
2646
net_prefetchw(void * p)2647 static inline void net_prefetchw(void *p)
2648 {
2649 prefetchw(p);
2650 #if L1_CACHE_BYTES < 128
2651 prefetchw((u8 *)p + L1_CACHE_BYTES);
2652 #endif
2653 }
2654
2655 void netdev_unbind_sb_channel(struct net_device *dev,
2656 struct net_device *sb_dev);
2657 int netdev_bind_sb_channel_queue(struct net_device *dev,
2658 struct net_device *sb_dev,
2659 u8 tc, u16 count, u16 offset);
2660 int netdev_set_sb_channel(struct net_device *dev, u16 channel);
netdev_get_sb_channel(struct net_device * dev)2661 static inline int netdev_get_sb_channel(struct net_device *dev)
2662 {
2663 return max_t(int, -dev->num_tc, 0);
2664 }
2665
2666 static inline
netdev_get_tx_queue(const struct net_device * dev,unsigned int index)2667 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2668 unsigned int index)
2669 {
2670 DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
2671 return &dev->_tx[index];
2672 }
2673
skb_get_tx_queue(const struct net_device * dev,const struct sk_buff * skb)2674 static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2675 const struct sk_buff *skb)
2676 {
2677 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2678 }
2679
netdev_for_each_tx_queue(struct net_device * dev,void (* f)(struct net_device *,struct netdev_queue *,void *),void * arg)2680 static inline void netdev_for_each_tx_queue(struct net_device *dev,
2681 void (*f)(struct net_device *,
2682 struct netdev_queue *,
2683 void *),
2684 void *arg)
2685 {
2686 unsigned int i;
2687
2688 for (i = 0; i < dev->num_tx_queues; i++)
2689 f(dev, &dev->_tx[i], arg);
2690 }
2691
2692 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2693 struct net_device *sb_dev);
2694 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2695 struct sk_buff *skb,
2696 struct net_device *sb_dev);
2697
2698 /* returns the headroom that the master device needs to take in account
2699 * when forwarding to this dev
2700 */
netdev_get_fwd_headroom(struct net_device * dev)2701 static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2702 {
2703 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2704 }
2705
netdev_set_rx_headroom(struct net_device * dev,int new_hr)2706 static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2707 {
2708 if (dev->netdev_ops->ndo_set_rx_headroom)
2709 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2710 }
2711
2712 /* set the device rx headroom to the dev's default */
netdev_reset_rx_headroom(struct net_device * dev)2713 static inline void netdev_reset_rx_headroom(struct net_device *dev)
2714 {
2715 netdev_set_rx_headroom(dev, -1);
2716 }
2717
netdev_get_ml_priv(struct net_device * dev,enum netdev_ml_priv_type type)2718 static inline void *netdev_get_ml_priv(struct net_device *dev,
2719 enum netdev_ml_priv_type type)
2720 {
2721 if (dev->ml_priv_type != type)
2722 return NULL;
2723
2724 return dev->ml_priv;
2725 }
2726
netdev_set_ml_priv(struct net_device * dev,void * ml_priv,enum netdev_ml_priv_type type)2727 static inline void netdev_set_ml_priv(struct net_device *dev,
2728 void *ml_priv,
2729 enum netdev_ml_priv_type type)
2730 {
2731 WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2732 "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2733 dev->ml_priv_type, type);
2734 WARN(!dev->ml_priv_type && dev->ml_priv,
2735 "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2736
2737 dev->ml_priv = ml_priv;
2738 dev->ml_priv_type = type;
2739 }
2740
2741 /*
2742 * Net namespace inlines
2743 */
2744 static inline
dev_net(const struct net_device * dev)2745 struct net *dev_net(const struct net_device *dev)
2746 {
2747 return read_pnet(&dev->nd_net);
2748 }
2749
2750 static inline
dev_net_rcu(const struct net_device * dev)2751 struct net *dev_net_rcu(const struct net_device *dev)
2752 {
2753 return read_pnet_rcu(&dev->nd_net);
2754 }
2755
2756 static inline
dev_net_set(struct net_device * dev,struct net * net)2757 void dev_net_set(struct net_device *dev, struct net *net)
2758 {
2759 write_pnet(&dev->nd_net, net);
2760 }
2761
2762 /**
2763 * netdev_priv - access network device private data
2764 * @dev: network device
2765 *
2766 * Get network device private data
2767 */
netdev_priv(const struct net_device * dev)2768 static inline void *netdev_priv(const struct net_device *dev)
2769 {
2770 return (void *)dev->priv;
2771 }
2772
2773 /**
2774 * netdev_from_priv() - get network device from priv
2775 * @priv: network device private data
2776 *
2777 * Returns: net_device to which @priv belongs
2778 */
netdev_from_priv(const void * priv)2779 static inline struct net_device *netdev_from_priv(const void *priv)
2780 {
2781 return container_of(priv, struct net_device, priv);
2782 }
2783
2784 /* Set the sysfs physical device reference for the network logical device
2785 * if set prior to registration will cause a symlink during initialization.
2786 */
2787 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2788
2789 /* Set the sysfs device type for the network logical device to allow
2790 * fine-grained identification of different network device types. For
2791 * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2792 */
2793 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2794
2795 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
2796 enum netdev_queue_type type,
2797 struct napi_struct *napi);
2798
netdev_lock(struct net_device * dev)2799 static inline void netdev_lock(struct net_device *dev)
2800 {
2801 mutex_lock(&dev->lock);
2802 }
2803
netdev_unlock(struct net_device * dev)2804 static inline void netdev_unlock(struct net_device *dev)
2805 {
2806 mutex_unlock(&dev->lock);
2807 }
2808 /* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
2809
2810 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
2811
netif_napi_set_irq(struct napi_struct * napi,int irq)2812 static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
2813 {
2814 netdev_lock(napi->dev);
2815 netif_napi_set_irq_locked(napi, irq);
2816 netdev_unlock(napi->dev);
2817 }
2818
2819 /* Default NAPI poll() weight
2820 * Device drivers are strongly advised to not use bigger value
2821 */
2822 #define NAPI_POLL_WEIGHT 64
2823
2824 void netif_napi_add_weight_locked(struct net_device *dev,
2825 struct napi_struct *napi,
2826 int (*poll)(struct napi_struct *, int),
2827 int weight);
2828
2829 static inline void
netif_napi_add_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2830 netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
2831 int (*poll)(struct napi_struct *, int), int weight)
2832 {
2833 netdev_lock(dev);
2834 netif_napi_add_weight_locked(dev, napi, poll, weight);
2835 netdev_unlock(dev);
2836 }
2837
2838 /**
2839 * netif_napi_add() - initialize a NAPI context
2840 * @dev: network device
2841 * @napi: NAPI context
2842 * @poll: polling function
2843 *
2844 * netif_napi_add() must be used to initialize a NAPI context prior to calling
2845 * *any* of the other NAPI-related functions.
2846 */
2847 static inline void
netif_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2848 netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2849 int (*poll)(struct napi_struct *, int))
2850 {
2851 netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2852 }
2853
2854 static inline void
netif_napi_add_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2855 netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
2856 int (*poll)(struct napi_struct *, int))
2857 {
2858 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2859 }
2860
2861 static inline void
netif_napi_add_tx_weight(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2862 netif_napi_add_tx_weight(struct net_device *dev,
2863 struct napi_struct *napi,
2864 int (*poll)(struct napi_struct *, int),
2865 int weight)
2866 {
2867 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2868 netif_napi_add_weight(dev, napi, poll, weight);
2869 }
2870
2871 static inline void
netif_napi_add_config_locked(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2872 netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
2873 int (*poll)(struct napi_struct *, int), int index)
2874 {
2875 napi->index = index;
2876 napi->config = &dev->napi_config[index];
2877 netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
2878 }
2879
2880 /**
2881 * netif_napi_add_config - initialize a NAPI context with persistent config
2882 * @dev: network device
2883 * @napi: NAPI context
2884 * @poll: polling function
2885 * @index: the NAPI index
2886 */
2887 static inline void
netif_napi_add_config(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int index)2888 netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
2889 int (*poll)(struct napi_struct *, int), int index)
2890 {
2891 netdev_lock(dev);
2892 netif_napi_add_config_locked(dev, napi, poll, index);
2893 netdev_unlock(dev);
2894 }
2895
2896 /**
2897 * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
2898 * @dev: network device
2899 * @napi: NAPI context
2900 * @poll: polling function
2901 *
2902 * This variant of netif_napi_add() should be used from drivers using NAPI
2903 * to exclusively poll a TX queue.
2904 * This will avoid we add it into napi_hash[], thus polluting this hash table.
2905 */
netif_napi_add_tx(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int))2906 static inline void netif_napi_add_tx(struct net_device *dev,
2907 struct napi_struct *napi,
2908 int (*poll)(struct napi_struct *, int))
2909 {
2910 netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
2911 }
2912
2913 void __netif_napi_del_locked(struct napi_struct *napi);
2914
2915 /**
2916 * __netif_napi_del - remove a NAPI context
2917 * @napi: NAPI context
2918 *
2919 * Warning: caller must observe RCU grace period before freeing memory
2920 * containing @napi. Drivers might want to call this helper to combine
2921 * all the needed RCU grace periods into a single one.
2922 */
__netif_napi_del(struct napi_struct * napi)2923 static inline void __netif_napi_del(struct napi_struct *napi)
2924 {
2925 netdev_lock(napi->dev);
2926 __netif_napi_del_locked(napi);
2927 netdev_unlock(napi->dev);
2928 }
2929
netif_napi_del_locked(struct napi_struct * napi)2930 static inline void netif_napi_del_locked(struct napi_struct *napi)
2931 {
2932 __netif_napi_del_locked(napi);
2933 synchronize_net();
2934 }
2935
2936 /**
2937 * netif_napi_del - remove a NAPI context
2938 * @napi: NAPI context
2939 *
2940 * netif_napi_del() removes a NAPI context from the network device NAPI list
2941 */
netif_napi_del(struct napi_struct * napi)2942 static inline void netif_napi_del(struct napi_struct *napi)
2943 {
2944 __netif_napi_del(napi);
2945 synchronize_net();
2946 }
2947
2948 int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs);
2949 void netif_set_affinity_auto(struct net_device *dev);
2950
2951 struct packet_type {
2952 __be16 type; /* This is really htons(ether_type). */
2953 bool ignore_outgoing;
2954 struct net_device *dev; /* NULL is wildcarded here */
2955 netdevice_tracker dev_tracker;
2956 int (*func) (struct sk_buff *,
2957 struct net_device *,
2958 struct packet_type *,
2959 struct net_device *);
2960 void (*list_func) (struct list_head *,
2961 struct packet_type *,
2962 struct net_device *);
2963 bool (*id_match)(struct packet_type *ptype,
2964 struct sock *sk);
2965 struct net *af_packet_net;
2966 void *af_packet_priv;
2967 struct list_head list;
2968 };
2969
2970 struct offload_callbacks {
2971 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2972 netdev_features_t features);
2973 struct sk_buff *(*gro_receive)(struct list_head *head,
2974 struct sk_buff *skb);
2975 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2976 };
2977
2978 struct packet_offload {
2979 __be16 type; /* This is really htons(ether_type). */
2980 u16 priority;
2981 struct offload_callbacks callbacks;
2982 struct list_head list;
2983 };
2984
2985 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2986 struct pcpu_sw_netstats {
2987 u64_stats_t rx_packets;
2988 u64_stats_t rx_bytes;
2989 u64_stats_t tx_packets;
2990 u64_stats_t tx_bytes;
2991 struct u64_stats_sync syncp;
2992 } __aligned(4 * sizeof(u64));
2993
2994 struct pcpu_dstats {
2995 u64_stats_t rx_packets;
2996 u64_stats_t rx_bytes;
2997 u64_stats_t tx_packets;
2998 u64_stats_t tx_bytes;
2999 u64_stats_t rx_drops;
3000 u64_stats_t tx_drops;
3001 struct u64_stats_sync syncp;
3002 } __aligned(8 * sizeof(u64));
3003
3004 struct pcpu_lstats {
3005 u64_stats_t packets;
3006 u64_stats_t bytes;
3007 struct u64_stats_sync syncp;
3008 } __aligned(2 * sizeof(u64));
3009
3010 void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
3011
dev_sw_netstats_rx_add(struct net_device * dev,unsigned int len)3012 static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
3013 {
3014 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
3015
3016 u64_stats_update_begin(&tstats->syncp);
3017 u64_stats_add(&tstats->rx_bytes, len);
3018 u64_stats_inc(&tstats->rx_packets);
3019 u64_stats_update_end(&tstats->syncp);
3020 }
3021
dev_sw_netstats_tx_add(struct net_device * dev,unsigned int packets,unsigned int len)3022 static inline void dev_sw_netstats_tx_add(struct net_device *dev,
3023 unsigned int packets,
3024 unsigned int len)
3025 {
3026 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
3027
3028 u64_stats_update_begin(&tstats->syncp);
3029 u64_stats_add(&tstats->tx_bytes, len);
3030 u64_stats_add(&tstats->tx_packets, packets);
3031 u64_stats_update_end(&tstats->syncp);
3032 }
3033
dev_lstats_add(struct net_device * dev,unsigned int len)3034 static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
3035 {
3036 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
3037
3038 u64_stats_update_begin(&lstats->syncp);
3039 u64_stats_add(&lstats->bytes, len);
3040 u64_stats_inc(&lstats->packets);
3041 u64_stats_update_end(&lstats->syncp);
3042 }
3043
dev_dstats_rx_add(struct net_device * dev,unsigned int len)3044 static inline void dev_dstats_rx_add(struct net_device *dev,
3045 unsigned int len)
3046 {
3047 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3048
3049 u64_stats_update_begin(&dstats->syncp);
3050 u64_stats_inc(&dstats->rx_packets);
3051 u64_stats_add(&dstats->rx_bytes, len);
3052 u64_stats_update_end(&dstats->syncp);
3053 }
3054
dev_dstats_rx_dropped(struct net_device * dev)3055 static inline void dev_dstats_rx_dropped(struct net_device *dev)
3056 {
3057 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3058
3059 u64_stats_update_begin(&dstats->syncp);
3060 u64_stats_inc(&dstats->rx_drops);
3061 u64_stats_update_end(&dstats->syncp);
3062 }
3063
dev_dstats_rx_dropped_add(struct net_device * dev,unsigned int packets)3064 static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
3065 unsigned int packets)
3066 {
3067 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3068
3069 u64_stats_update_begin(&dstats->syncp);
3070 u64_stats_add(&dstats->rx_drops, packets);
3071 u64_stats_update_end(&dstats->syncp);
3072 }
3073
dev_dstats_tx_add(struct net_device * dev,unsigned int len)3074 static inline void dev_dstats_tx_add(struct net_device *dev,
3075 unsigned int len)
3076 {
3077 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3078
3079 u64_stats_update_begin(&dstats->syncp);
3080 u64_stats_inc(&dstats->tx_packets);
3081 u64_stats_add(&dstats->tx_bytes, len);
3082 u64_stats_update_end(&dstats->syncp);
3083 }
3084
dev_dstats_tx_dropped(struct net_device * dev)3085 static inline void dev_dstats_tx_dropped(struct net_device *dev)
3086 {
3087 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
3088
3089 u64_stats_update_begin(&dstats->syncp);
3090 u64_stats_inc(&dstats->tx_drops);
3091 u64_stats_update_end(&dstats->syncp);
3092 }
3093
3094 #define __netdev_alloc_pcpu_stats(type, gfp) \
3095 ({ \
3096 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
3097 if (pcpu_stats) { \
3098 int __cpu; \
3099 for_each_possible_cpu(__cpu) { \
3100 typeof(type) *stat; \
3101 stat = per_cpu_ptr(pcpu_stats, __cpu); \
3102 u64_stats_init(&stat->syncp); \
3103 } \
3104 } \
3105 pcpu_stats; \
3106 })
3107
3108 #define netdev_alloc_pcpu_stats(type) \
3109 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
3110
3111 #define devm_netdev_alloc_pcpu_stats(dev, type) \
3112 ({ \
3113 typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
3114 if (pcpu_stats) { \
3115 int __cpu; \
3116 for_each_possible_cpu(__cpu) { \
3117 typeof(type) *stat; \
3118 stat = per_cpu_ptr(pcpu_stats, __cpu); \
3119 u64_stats_init(&stat->syncp); \
3120 } \
3121 } \
3122 pcpu_stats; \
3123 })
3124
3125 enum netdev_lag_tx_type {
3126 NETDEV_LAG_TX_TYPE_UNKNOWN,
3127 NETDEV_LAG_TX_TYPE_RANDOM,
3128 NETDEV_LAG_TX_TYPE_BROADCAST,
3129 NETDEV_LAG_TX_TYPE_ROUNDROBIN,
3130 NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
3131 NETDEV_LAG_TX_TYPE_HASH,
3132 };
3133
3134 enum netdev_lag_hash {
3135 NETDEV_LAG_HASH_NONE,
3136 NETDEV_LAG_HASH_L2,
3137 NETDEV_LAG_HASH_L34,
3138 NETDEV_LAG_HASH_L23,
3139 NETDEV_LAG_HASH_E23,
3140 NETDEV_LAG_HASH_E34,
3141 NETDEV_LAG_HASH_VLAN_SRCMAC,
3142 NETDEV_LAG_HASH_UNKNOWN,
3143 };
3144
3145 struct netdev_lag_upper_info {
3146 enum netdev_lag_tx_type tx_type;
3147 enum netdev_lag_hash hash_type;
3148 };
3149
3150 struct netdev_lag_lower_state_info {
3151 u8 link_up : 1,
3152 tx_enabled : 1;
3153 };
3154
3155 #include <linux/notifier.h>
3156
3157 /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
3158 * and the rtnetlink notification exclusion list in rtnetlink_event() when
3159 * adding new types.
3160 */
3161 enum netdev_cmd {
3162 NETDEV_UP = 1, /* For now you can't veto a device up/down */
3163 NETDEV_DOWN,
3164 NETDEV_REBOOT, /* Tell a protocol stack a network interface
3165 detected a hardware crash and restarted
3166 - we can use this eg to kick tcp sessions
3167 once done */
3168 NETDEV_CHANGE, /* Notify device state change */
3169 NETDEV_REGISTER,
3170 NETDEV_UNREGISTER,
3171 NETDEV_CHANGEMTU, /* notify after mtu change happened */
3172 NETDEV_CHANGEADDR, /* notify after the address change */
3173 NETDEV_PRE_CHANGEADDR, /* notify before the address change */
3174 NETDEV_GOING_DOWN,
3175 NETDEV_CHANGENAME,
3176 NETDEV_FEAT_CHANGE,
3177 NETDEV_BONDING_FAILOVER,
3178 NETDEV_PRE_UP,
3179 NETDEV_PRE_TYPE_CHANGE,
3180 NETDEV_POST_TYPE_CHANGE,
3181 NETDEV_POST_INIT,
3182 NETDEV_PRE_UNINIT,
3183 NETDEV_RELEASE,
3184 NETDEV_NOTIFY_PEERS,
3185 NETDEV_JOIN,
3186 NETDEV_CHANGEUPPER,
3187 NETDEV_RESEND_IGMP,
3188 NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
3189 NETDEV_CHANGEINFODATA,
3190 NETDEV_BONDING_INFO,
3191 NETDEV_PRECHANGEUPPER,
3192 NETDEV_CHANGELOWERSTATE,
3193 NETDEV_UDP_TUNNEL_PUSH_INFO,
3194 NETDEV_UDP_TUNNEL_DROP_INFO,
3195 NETDEV_CHANGE_TX_QUEUE_LEN,
3196 NETDEV_CVLAN_FILTER_PUSH_INFO,
3197 NETDEV_CVLAN_FILTER_DROP_INFO,
3198 NETDEV_SVLAN_FILTER_PUSH_INFO,
3199 NETDEV_SVLAN_FILTER_DROP_INFO,
3200 NETDEV_OFFLOAD_XSTATS_ENABLE,
3201 NETDEV_OFFLOAD_XSTATS_DISABLE,
3202 NETDEV_OFFLOAD_XSTATS_REPORT_USED,
3203 NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
3204 NETDEV_XDP_FEAT_CHANGE,
3205 };
3206 const char *netdev_cmd_to_name(enum netdev_cmd cmd);
3207
3208 int register_netdevice_notifier(struct notifier_block *nb);
3209 int unregister_netdevice_notifier(struct notifier_block *nb);
3210 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
3211 int unregister_netdevice_notifier_net(struct net *net,
3212 struct notifier_block *nb);
3213 int register_netdevice_notifier_dev_net(struct net_device *dev,
3214 struct notifier_block *nb,
3215 struct netdev_net_notifier *nn);
3216 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
3217 struct notifier_block *nb,
3218 struct netdev_net_notifier *nn);
3219
3220 struct netdev_notifier_info {
3221 struct net_device *dev;
3222 struct netlink_ext_ack *extack;
3223 };
3224
3225 struct netdev_notifier_info_ext {
3226 struct netdev_notifier_info info; /* must be first */
3227 union {
3228 u32 mtu;
3229 } ext;
3230 };
3231
3232 struct netdev_notifier_change_info {
3233 struct netdev_notifier_info info; /* must be first */
3234 unsigned int flags_changed;
3235 };
3236
3237 struct netdev_notifier_changeupper_info {
3238 struct netdev_notifier_info info; /* must be first */
3239 struct net_device *upper_dev; /* new upper dev */
3240 bool master; /* is upper dev master */
3241 bool linking; /* is the notification for link or unlink */
3242 void *upper_info; /* upper dev info */
3243 };
3244
3245 struct netdev_notifier_changelowerstate_info {
3246 struct netdev_notifier_info info; /* must be first */
3247 void *lower_state_info; /* is lower dev state */
3248 };
3249
3250 struct netdev_notifier_pre_changeaddr_info {
3251 struct netdev_notifier_info info; /* must be first */
3252 const unsigned char *dev_addr;
3253 };
3254
3255 enum netdev_offload_xstats_type {
3256 NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
3257 };
3258
3259 struct netdev_notifier_offload_xstats_info {
3260 struct netdev_notifier_info info; /* must be first */
3261 enum netdev_offload_xstats_type type;
3262
3263 union {
3264 /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
3265 struct netdev_notifier_offload_xstats_rd *report_delta;
3266 /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
3267 struct netdev_notifier_offload_xstats_ru *report_used;
3268 };
3269 };
3270
3271 int netdev_offload_xstats_enable(struct net_device *dev,
3272 enum netdev_offload_xstats_type type,
3273 struct netlink_ext_ack *extack);
3274 int netdev_offload_xstats_disable(struct net_device *dev,
3275 enum netdev_offload_xstats_type type);
3276 bool netdev_offload_xstats_enabled(const struct net_device *dev,
3277 enum netdev_offload_xstats_type type);
3278 int netdev_offload_xstats_get(struct net_device *dev,
3279 enum netdev_offload_xstats_type type,
3280 struct rtnl_hw_stats64 *stats, bool *used,
3281 struct netlink_ext_ack *extack);
3282 void
3283 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
3284 const struct rtnl_hw_stats64 *stats);
3285 void
3286 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
3287 void netdev_offload_xstats_push_delta(struct net_device *dev,
3288 enum netdev_offload_xstats_type type,
3289 const struct rtnl_hw_stats64 *stats);
3290
netdev_notifier_info_init(struct netdev_notifier_info * info,struct net_device * dev)3291 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
3292 struct net_device *dev)
3293 {
3294 info->dev = dev;
3295 info->extack = NULL;
3296 }
3297
3298 static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info * info)3299 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
3300 {
3301 return info->dev;
3302 }
3303
3304 static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info * info)3305 netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
3306 {
3307 return info->extack;
3308 }
3309
3310 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
3311 int call_netdevice_notifiers_info(unsigned long val,
3312 struct netdev_notifier_info *info);
3313
3314 #define for_each_netdev(net, d) \
3315 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
3316 #define for_each_netdev_reverse(net, d) \
3317 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
3318 #define for_each_netdev_rcu(net, d) \
3319 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
3320 #define for_each_netdev_safe(net, d, n) \
3321 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
3322 #define for_each_netdev_continue(net, d) \
3323 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
3324 #define for_each_netdev_continue_reverse(net, d) \
3325 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
3326 dev_list)
3327 #define for_each_netdev_continue_rcu(net, d) \
3328 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
3329 #define for_each_netdev_in_bond_rcu(bond, slave) \
3330 for_each_netdev_rcu(dev_net_rcu(bond), slave) \
3331 if (netdev_master_upper_dev_get_rcu(slave) == (bond))
3332 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
3333
3334 #define for_each_netdev_dump(net, d, ifindex) \
3335 for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
3336 ULONG_MAX, XA_PRESENT)); ifindex++)
3337
next_net_device(struct net_device * dev)3338 static inline struct net_device *next_net_device(struct net_device *dev)
3339 {
3340 struct list_head *lh;
3341 struct net *net;
3342
3343 net = dev_net(dev);
3344 lh = dev->dev_list.next;
3345 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3346 }
3347
next_net_device_rcu(struct net_device * dev)3348 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
3349 {
3350 struct list_head *lh;
3351 struct net *net;
3352
3353 net = dev_net(dev);
3354 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
3355 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
3356 }
3357
first_net_device(struct net * net)3358 static inline struct net_device *first_net_device(struct net *net)
3359 {
3360 return list_empty(&net->dev_base_head) ? NULL :
3361 net_device_entry(net->dev_base_head.next);
3362 }
3363
3364 int netdev_boot_setup_check(struct net_device *dev);
3365 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
3366 const char *hwaddr);
3367 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
3368 const char *hwaddr);
3369 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
3370 void dev_add_pack(struct packet_type *pt);
3371 void dev_remove_pack(struct packet_type *pt);
3372 void __dev_remove_pack(struct packet_type *pt);
3373 void dev_add_offload(struct packet_offload *po);
3374 void dev_remove_offload(struct packet_offload *po);
3375
3376 int dev_get_iflink(const struct net_device *dev);
3377 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
3378 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3379 struct net_device_path_stack *stack);
3380 struct net_device *dev_get_by_name(struct net *net, const char *name);
3381 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
3382 struct net_device *__dev_get_by_name(struct net *net, const char *name);
3383 bool netdev_name_in_use(struct net *net, const char *name);
3384 int dev_alloc_name(struct net_device *dev, const char *name);
3385 int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
3386 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
3387 void netif_close(struct net_device *dev);
3388 void dev_close(struct net_device *dev);
3389 void netif_close_many(struct list_head *head, bool unlink);
3390 void netif_disable_lro(struct net_device *dev);
3391 void dev_disable_lro(struct net_device *dev);
3392 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
3393 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3394 struct net_device *sb_dev);
3395
3396 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
3397 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
3398
dev_queue_xmit(struct sk_buff * skb)3399 static inline int dev_queue_xmit(struct sk_buff *skb)
3400 {
3401 return __dev_queue_xmit(skb, NULL);
3402 }
3403
dev_queue_xmit_accel(struct sk_buff * skb,struct net_device * sb_dev)3404 static inline int dev_queue_xmit_accel(struct sk_buff *skb,
3405 struct net_device *sb_dev)
3406 {
3407 return __dev_queue_xmit(skb, sb_dev);
3408 }
3409
dev_direct_xmit(struct sk_buff * skb,u16 queue_id)3410 static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3411 {
3412 int ret;
3413
3414 ret = __dev_direct_xmit(skb, queue_id);
3415 if (!dev_xmit_complete(ret))
3416 kfree_skb(skb);
3417 return ret;
3418 }
3419
3420 int register_netdevice(struct net_device *dev);
3421 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
3422 void unregister_netdevice_many(struct list_head *head);
3423 bool unregister_netdevice_queued(const struct net_device *dev);
3424
unregister_netdevice(struct net_device * dev)3425 static inline void unregister_netdevice(struct net_device *dev)
3426 {
3427 unregister_netdevice_queue(dev, NULL);
3428 }
3429
3430 int netdev_refcnt_read(const struct net_device *dev);
3431 void free_netdev(struct net_device *dev);
3432
3433 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
3434 struct sk_buff *skb,
3435 bool all_slaves);
3436 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
3437 struct sock *sk);
3438 struct net_device *dev_get_by_index(struct net *net, int ifindex);
3439 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
3440 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
3441 netdevice_tracker *tracker, gfp_t gfp);
3442 struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
3443 struct net_device *netdev_get_by_name(struct net *net, const char *name,
3444 netdevice_tracker *tracker, gfp_t gfp);
3445 struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
3446 unsigned short flags, unsigned short mask);
3447 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
3448 void netdev_copy_name(struct net_device *dev, char *name);
3449
dev_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)3450 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3451 unsigned short type,
3452 const void *daddr, const void *saddr,
3453 unsigned int len)
3454 {
3455 if (!dev->header_ops || !dev->header_ops->create)
3456 return 0;
3457
3458 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3459 }
3460
dev_parse_header(const struct sk_buff * skb,unsigned char * haddr)3461 static inline int dev_parse_header(const struct sk_buff *skb,
3462 unsigned char *haddr)
3463 {
3464 const struct net_device *dev = skb->dev;
3465
3466 if (!dev->header_ops || !dev->header_ops->parse)
3467 return 0;
3468 return dev->header_ops->parse(skb, dev, haddr);
3469 }
3470
dev_parse_header_protocol(const struct sk_buff * skb)3471 static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3472 {
3473 const struct net_device *dev = skb->dev;
3474
3475 if (!dev->header_ops || !dev->header_ops->parse_protocol)
3476 return 0;
3477 return dev->header_ops->parse_protocol(skb);
3478 }
3479
3480 /* ll_header must have at least hard_header_len allocated */
dev_validate_header(const struct net_device * dev,char * ll_header,int len)3481 static inline bool dev_validate_header(const struct net_device *dev,
3482 char *ll_header, int len)
3483 {
3484 if (likely(len >= dev->hard_header_len))
3485 return true;
3486 if (len < dev->min_header_len)
3487 return false;
3488
3489 if (capable(CAP_SYS_RAWIO)) {
3490 memset(ll_header + len, 0, dev->hard_header_len - len);
3491 return true;
3492 }
3493
3494 if (dev->header_ops && dev->header_ops->validate)
3495 return dev->header_ops->validate(ll_header, len);
3496
3497 return false;
3498 }
3499
dev_has_header(const struct net_device * dev)3500 static inline bool dev_has_header(const struct net_device *dev)
3501 {
3502 return dev->header_ops && dev->header_ops->create;
3503 }
3504
3505 struct numa_drop_counters {
3506 atomic_t drops0 ____cacheline_aligned_in_smp;
3507 atomic_t drops1 ____cacheline_aligned_in_smp;
3508 };
3509
numa_drop_read(const struct numa_drop_counters * ndc)3510 static inline int numa_drop_read(const struct numa_drop_counters *ndc)
3511 {
3512 return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
3513 }
3514
numa_drop_add(struct numa_drop_counters * ndc,int val)3515 static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
3516 {
3517 int n = numa_node_id() % 2;
3518
3519 if (n)
3520 atomic_add(val, &ndc->drops1);
3521 else
3522 atomic_add(val, &ndc->drops0);
3523 }
3524
numa_drop_reset(struct numa_drop_counters * ndc)3525 static inline void numa_drop_reset(struct numa_drop_counters *ndc)
3526 {
3527 atomic_set(&ndc->drops0, 0);
3528 atomic_set(&ndc->drops1, 0);
3529 }
3530
3531 /*
3532 * Incoming packets are placed on per-CPU queues
3533 */
3534 struct softnet_data {
3535 struct list_head poll_list;
3536 struct sk_buff_head process_queue;
3537 local_lock_t process_queue_bh_lock;
3538
3539 /* stats */
3540 unsigned int processed;
3541 unsigned int time_squeeze;
3542 #ifdef CONFIG_RPS
3543 struct softnet_data *rps_ipi_list;
3544 #endif
3545
3546 unsigned int received_rps;
3547 bool in_net_rx_action;
3548 bool in_napi_threaded_poll;
3549
3550 #ifdef CONFIG_NET_FLOW_LIMIT
3551 struct sd_flow_limit __rcu *flow_limit;
3552 #endif
3553 struct Qdisc *output_queue;
3554 struct Qdisc **output_queue_tailp;
3555 struct sk_buff *completion_queue;
3556 #ifdef CONFIG_XFRM_OFFLOAD
3557 struct sk_buff_head xfrm_backlog;
3558 #endif
3559 /* written and read only by owning cpu: */
3560 struct netdev_xmit xmit;
3561 #ifdef CONFIG_RPS
3562 /* input_queue_head should be written by cpu owning this struct,
3563 * and only read by other cpus. Worth using a cache line.
3564 */
3565 unsigned int input_queue_head ____cacheline_aligned_in_smp;
3566
3567 /* Elements below can be accessed between CPUs for RPS/RFS */
3568 call_single_data_t csd ____cacheline_aligned_in_smp;
3569 struct softnet_data *rps_ipi_next;
3570 unsigned int cpu;
3571
3572 /* We force a cacheline alignment from here, to hold together
3573 * input_queue_tail, input_pkt_queue and backlog.state.
3574 * We add holes so that backlog.state is the last field
3575 * of this cache line.
3576 */
3577 long pad[3] ____cacheline_aligned_in_smp;
3578 unsigned int input_queue_tail;
3579 #endif
3580 struct sk_buff_head input_pkt_queue;
3581
3582 struct napi_struct backlog;
3583
3584 struct numa_drop_counters drop_counters;
3585
3586 int defer_ipi_scheduled ____cacheline_aligned_in_smp;
3587 call_single_data_t defer_csd;
3588 };
3589
3590 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3591
3592 struct page_pool_bh {
3593 struct page_pool *pool;
3594 local_lock_t bh_lock;
3595 };
3596 DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
3597
3598 #define XMIT_RECURSION_LIMIT 8
3599
3600 #ifndef CONFIG_PREEMPT_RT
dev_recursion_level(void)3601 static inline int dev_recursion_level(void)
3602 {
3603 return this_cpu_read(softnet_data.xmit.recursion);
3604 }
3605
dev_xmit_recursion(void)3606 static inline bool dev_xmit_recursion(void)
3607 {
3608 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3609 XMIT_RECURSION_LIMIT);
3610 }
3611
dev_xmit_recursion_inc(void)3612 static inline void dev_xmit_recursion_inc(void)
3613 {
3614 __this_cpu_inc(softnet_data.xmit.recursion);
3615 }
3616
dev_xmit_recursion_dec(void)3617 static inline void dev_xmit_recursion_dec(void)
3618 {
3619 __this_cpu_dec(softnet_data.xmit.recursion);
3620 }
3621 #else
dev_recursion_level(void)3622 static inline int dev_recursion_level(void)
3623 {
3624 return current->net_xmit.recursion;
3625 }
3626
dev_xmit_recursion(void)3627 static inline bool dev_xmit_recursion(void)
3628 {
3629 return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
3630 }
3631
dev_xmit_recursion_inc(void)3632 static inline void dev_xmit_recursion_inc(void)
3633 {
3634 current->net_xmit.recursion++;
3635 }
3636
dev_xmit_recursion_dec(void)3637 static inline void dev_xmit_recursion_dec(void)
3638 {
3639 current->net_xmit.recursion--;
3640 }
3641 #endif
3642
3643 void __netif_schedule(struct Qdisc *q);
3644 void netif_schedule_queue(struct netdev_queue *txq);
3645
netif_tx_schedule_all(struct net_device * dev)3646 static inline void netif_tx_schedule_all(struct net_device *dev)
3647 {
3648 unsigned int i;
3649
3650 for (i = 0; i < dev->num_tx_queues; i++)
3651 netif_schedule_queue(netdev_get_tx_queue(dev, i));
3652 }
3653
netif_tx_start_queue(struct netdev_queue * dev_queue)3654 static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3655 {
3656 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3657 }
3658
3659 /**
3660 * netif_start_queue - allow transmit
3661 * @dev: network device
3662 *
3663 * Allow upper layers to call the device hard_start_xmit routine.
3664 */
netif_start_queue(struct net_device * dev)3665 static inline void netif_start_queue(struct net_device *dev)
3666 {
3667 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3668 }
3669
netif_tx_start_all_queues(struct net_device * dev)3670 static inline void netif_tx_start_all_queues(struct net_device *dev)
3671 {
3672 unsigned int i;
3673
3674 for (i = 0; i < dev->num_tx_queues; i++) {
3675 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3676 netif_tx_start_queue(txq);
3677 }
3678 }
3679
3680 void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3681
3682 /**
3683 * netif_wake_queue - restart transmit
3684 * @dev: network device
3685 *
3686 * Allow upper layers to call the device hard_start_xmit routine.
3687 * Used for flow control when transmit resources are available.
3688 */
netif_wake_queue(struct net_device * dev)3689 static inline void netif_wake_queue(struct net_device *dev)
3690 {
3691 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3692 }
3693
netif_tx_wake_all_queues(struct net_device * dev)3694 static inline void netif_tx_wake_all_queues(struct net_device *dev)
3695 {
3696 unsigned int i;
3697
3698 for (i = 0; i < dev->num_tx_queues; i++) {
3699 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3700 netif_tx_wake_queue(txq);
3701 }
3702 }
3703
netif_tx_stop_queue(struct netdev_queue * dev_queue)3704 static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3705 {
3706 /* Paired with READ_ONCE() from dev_watchdog() */
3707 WRITE_ONCE(dev_queue->trans_start, jiffies);
3708
3709 /* This barrier is paired with smp_mb() from dev_watchdog() */
3710 smp_mb__before_atomic();
3711
3712 /* Must be an atomic op see netif_txq_try_stop() */
3713 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3714 }
3715
3716 /**
3717 * netif_stop_queue - stop transmitted packets
3718 * @dev: network device
3719 *
3720 * Stop upper layers calling the device hard_start_xmit routine.
3721 * Used for flow control when transmit resources are unavailable.
3722 */
netif_stop_queue(struct net_device * dev)3723 static inline void netif_stop_queue(struct net_device *dev)
3724 {
3725 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3726 }
3727
3728 void netif_tx_stop_all_queues(struct net_device *dev);
3729
netif_tx_queue_stopped(const struct netdev_queue * dev_queue)3730 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3731 {
3732 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3733 }
3734
3735 /**
3736 * netif_queue_stopped - test if transmit queue is flowblocked
3737 * @dev: network device
3738 *
3739 * Test if transmit queue on device is currently unable to send.
3740 */
netif_queue_stopped(const struct net_device * dev)3741 static inline bool netif_queue_stopped(const struct net_device *dev)
3742 {
3743 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3744 }
3745
netif_xmit_stopped(const struct netdev_queue * dev_queue)3746 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3747 {
3748 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3749 }
3750
3751 static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue * dev_queue)3752 netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3753 {
3754 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3755 }
3756
3757 static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue * dev_queue)3758 netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3759 {
3760 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3761 }
3762
3763 /**
3764 * netdev_queue_set_dql_min_limit - set dql minimum limit
3765 * @dev_queue: pointer to transmit queue
3766 * @min_limit: dql minimum limit
3767 *
3768 * Forces xmit_more() to return true until the minimum threshold
3769 * defined by @min_limit is reached (or until the tx queue is
3770 * empty). Warning: to be use with care, misuse will impact the
3771 * latency.
3772 */
netdev_queue_set_dql_min_limit(struct netdev_queue * dev_queue,unsigned int min_limit)3773 static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue,
3774 unsigned int min_limit)
3775 {
3776 #ifdef CONFIG_BQL
3777 dev_queue->dql.min_limit = min_limit;
3778 #endif
3779 }
3780
netdev_queue_dql_avail(const struct netdev_queue * txq)3781 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
3782 {
3783 #ifdef CONFIG_BQL
3784 /* Non-BQL migrated drivers will return 0, too. */
3785 return dql_avail(&txq->dql);
3786 #else
3787 return 0;
3788 #endif
3789 }
3790
3791 /**
3792 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3793 * @dev_queue: pointer to transmit queue
3794 *
3795 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3796 * to give appropriate hint to the CPU.
3797 */
netdev_txq_bql_enqueue_prefetchw(struct netdev_queue * dev_queue)3798 static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3799 {
3800 #ifdef CONFIG_BQL
3801 prefetchw(&dev_queue->dql.num_queued);
3802 #endif
3803 }
3804
3805 /**
3806 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3807 * @dev_queue: pointer to transmit queue
3808 *
3809 * BQL enabled drivers might use this helper in their TX completion path,
3810 * to give appropriate hint to the CPU.
3811 */
netdev_txq_bql_complete_prefetchw(struct netdev_queue * dev_queue)3812 static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3813 {
3814 #ifdef CONFIG_BQL
3815 prefetchw(&dev_queue->dql.limit);
3816 #endif
3817 }
3818
3819 /**
3820 * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
3821 * @dev_queue: network device queue
3822 * @bytes: number of bytes queued to the device queue
3823 *
3824 * Report the number of bytes queued for sending/completion to the network
3825 * device hardware queue. @bytes should be a good approximation and should
3826 * exactly match netdev_completed_queue() @bytes.
3827 * This is typically called once per packet, from ndo_start_xmit().
3828 */
netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes)3829 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3830 unsigned int bytes)
3831 {
3832 #ifdef CONFIG_BQL
3833 dql_queued(&dev_queue->dql, bytes);
3834
3835 if (likely(dql_avail(&dev_queue->dql) >= 0))
3836 return;
3837
3838 /* Paired with READ_ONCE() from dev_watchdog() */
3839 WRITE_ONCE(dev_queue->trans_start, jiffies);
3840
3841 /* This barrier is paired with smp_mb() from dev_watchdog() */
3842 smp_mb__before_atomic();
3843
3844 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3845
3846 /*
3847 * The XOFF flag must be set before checking the dql_avail below,
3848 * because in netdev_tx_completed_queue we update the dql_completed
3849 * before checking the XOFF flag.
3850 */
3851 smp_mb__after_atomic();
3852
3853 /* check again in case another CPU has just made room avail */
3854 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3855 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3856 #endif
3857 }
3858
3859 /* Variant of netdev_tx_sent_queue() for drivers that are aware
3860 * that they should not test BQL status themselves.
3861 * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3862 * skb of a batch.
3863 * Returns true if the doorbell must be used to kick the NIC.
3864 */
__netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes,bool xmit_more)3865 static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3866 unsigned int bytes,
3867 bool xmit_more)
3868 {
3869 if (xmit_more) {
3870 #ifdef CONFIG_BQL
3871 dql_queued(&dev_queue->dql, bytes);
3872 #endif
3873 return netif_tx_queue_stopped(dev_queue);
3874 }
3875 netdev_tx_sent_queue(dev_queue, bytes);
3876 return true;
3877 }
3878
3879 /**
3880 * netdev_sent_queue - report the number of bytes queued to hardware
3881 * @dev: network device
3882 * @bytes: number of bytes queued to the hardware device queue
3883 *
3884 * Report the number of bytes queued for sending/completion to the network
3885 * device hardware queue#0. @bytes should be a good approximation and should
3886 * exactly match netdev_completed_queue() @bytes.
3887 * This is typically called once per packet, from ndo_start_xmit().
3888 */
netdev_sent_queue(struct net_device * dev,unsigned int bytes)3889 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3890 {
3891 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3892 }
3893
__netdev_sent_queue(struct net_device * dev,unsigned int bytes,bool xmit_more)3894 static inline bool __netdev_sent_queue(struct net_device *dev,
3895 unsigned int bytes,
3896 bool xmit_more)
3897 {
3898 return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3899 xmit_more);
3900 }
3901
3902 /**
3903 * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
3904 * @dev_queue: network device queue
3905 * @pkts: number of packets (currently ignored)
3906 * @bytes: number of bytes dequeued from the device queue
3907 *
3908 * Must be called at most once per TX completion round (and not per
3909 * individual packet), so that BQL can adjust its limits appropriately.
3910 */
netdev_tx_completed_queue(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)3911 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3912 unsigned int pkts, unsigned int bytes)
3913 {
3914 #ifdef CONFIG_BQL
3915 if (unlikely(!bytes))
3916 return;
3917
3918 dql_completed(&dev_queue->dql, bytes);
3919
3920 /*
3921 * Without the memory barrier there is a small possibility that
3922 * netdev_tx_sent_queue will miss the update and cause the queue to
3923 * be stopped forever
3924 */
3925 smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
3926
3927 if (unlikely(dql_avail(&dev_queue->dql) < 0))
3928 return;
3929
3930 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3931 netif_schedule_queue(dev_queue);
3932 #endif
3933 }
3934
3935 /**
3936 * netdev_completed_queue - report bytes and packets completed by device
3937 * @dev: network device
3938 * @pkts: actual number of packets sent over the medium
3939 * @bytes: actual number of bytes sent over the medium
3940 *
3941 * Report the number of bytes and packets transmitted by the network device
3942 * hardware queue over the physical medium, @bytes must exactly match the
3943 * @bytes amount passed to netdev_sent_queue()
3944 */
netdev_completed_queue(struct net_device * dev,unsigned int pkts,unsigned int bytes)3945 static inline void netdev_completed_queue(struct net_device *dev,
3946 unsigned int pkts, unsigned int bytes)
3947 {
3948 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3949 }
3950
netdev_tx_reset_queue(struct netdev_queue * q)3951 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3952 {
3953 #ifdef CONFIG_BQL
3954 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3955 dql_reset(&q->dql);
3956 #endif
3957 }
3958
3959 /**
3960 * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
3961 * @dev: network device
3962 * @qid: stack index of the queue to reset
3963 */
netdev_tx_reset_subqueue(const struct net_device * dev,u32 qid)3964 static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
3965 u32 qid)
3966 {
3967 netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
3968 }
3969
3970 /**
3971 * netdev_reset_queue - reset the packets and bytes count of a network device
3972 * @dev_queue: network device
3973 *
3974 * Reset the bytes and packet count of a network device and clear the
3975 * software flow control OFF bit for this network device
3976 */
netdev_reset_queue(struct net_device * dev_queue)3977 static inline void netdev_reset_queue(struct net_device *dev_queue)
3978 {
3979 netdev_tx_reset_subqueue(dev_queue, 0);
3980 }
3981
3982 /**
3983 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3984 * @dev: network device
3985 * @queue_index: given tx queue index
3986 *
3987 * Returns 0 if given tx queue index >= number of device tx queues,
3988 * otherwise returns the originally passed tx queue index.
3989 */
netdev_cap_txqueue(struct net_device * dev,u16 queue_index)3990 static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3991 {
3992 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3993 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3994 dev->name, queue_index,
3995 dev->real_num_tx_queues);
3996 return 0;
3997 }
3998
3999 return queue_index;
4000 }
4001
4002 /**
4003 * netif_running - test if up
4004 * @dev: network device
4005 *
4006 * Test if the device has been brought up.
4007 */
netif_running(const struct net_device * dev)4008 static inline bool netif_running(const struct net_device *dev)
4009 {
4010 return test_bit(__LINK_STATE_START, &dev->state);
4011 }
4012
4013 /*
4014 * Routines to manage the subqueues on a device. We only need start,
4015 * stop, and a check if it's stopped. All other device management is
4016 * done at the overall netdevice level.
4017 * Also test the device if we're multiqueue.
4018 */
4019
4020 /**
4021 * netif_start_subqueue - allow sending packets on subqueue
4022 * @dev: network device
4023 * @queue_index: sub queue index
4024 *
4025 * Start individual transmit queue of a device with multiple transmit queues.
4026 */
netif_start_subqueue(struct net_device * dev,u16 queue_index)4027 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
4028 {
4029 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4030
4031 netif_tx_start_queue(txq);
4032 }
4033
4034 /**
4035 * netif_stop_subqueue - stop sending packets on subqueue
4036 * @dev: network device
4037 * @queue_index: sub queue index
4038 *
4039 * Stop individual transmit queue of a device with multiple transmit queues.
4040 */
netif_stop_subqueue(struct net_device * dev,u16 queue_index)4041 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
4042 {
4043 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4044 netif_tx_stop_queue(txq);
4045 }
4046
4047 /**
4048 * __netif_subqueue_stopped - test status of subqueue
4049 * @dev: network device
4050 * @queue_index: sub queue index
4051 *
4052 * Check individual transmit queue of a device with multiple transmit queues.
4053 */
__netif_subqueue_stopped(const struct net_device * dev,u16 queue_index)4054 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
4055 u16 queue_index)
4056 {
4057 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4058
4059 return netif_tx_queue_stopped(txq);
4060 }
4061
4062 /**
4063 * netif_subqueue_stopped - test status of subqueue
4064 * @dev: network device
4065 * @skb: sub queue buffer pointer
4066 *
4067 * Check individual transmit queue of a device with multiple transmit queues.
4068 */
netif_subqueue_stopped(const struct net_device * dev,struct sk_buff * skb)4069 static inline bool netif_subqueue_stopped(const struct net_device *dev,
4070 struct sk_buff *skb)
4071 {
4072 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
4073 }
4074
4075 /**
4076 * netif_wake_subqueue - allow sending packets on subqueue
4077 * @dev: network device
4078 * @queue_index: sub queue index
4079 *
4080 * Resume individual transmit queue of a device with multiple transmit queues.
4081 */
netif_wake_subqueue(struct net_device * dev,u16 queue_index)4082 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
4083 {
4084 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
4085
4086 netif_tx_wake_queue(txq);
4087 }
4088
4089 #ifdef CONFIG_XPS
4090 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
4091 u16 index);
4092 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
4093 u16 index, enum xps_map_type type);
4094
4095 /**
4096 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
4097 * @j: CPU/Rx queue index
4098 * @mask: bitmask of all cpus/rx queues
4099 * @nr_bits: number of bits in the bitmask
4100 *
4101 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
4102 */
netif_attr_test_mask(unsigned long j,const unsigned long * mask,unsigned int nr_bits)4103 static inline bool netif_attr_test_mask(unsigned long j,
4104 const unsigned long *mask,
4105 unsigned int nr_bits)
4106 {
4107 cpu_max_bits_warn(j, nr_bits);
4108 return test_bit(j, mask);
4109 }
4110
4111 /**
4112 * netif_attr_test_online - Test for online CPU/Rx queue
4113 * @j: CPU/Rx queue index
4114 * @online_mask: bitmask for CPUs/Rx queues that are online
4115 * @nr_bits: number of bits in the bitmask
4116 *
4117 * Returns: true if a CPU/Rx queue is online.
4118 */
netif_attr_test_online(unsigned long j,const unsigned long * online_mask,unsigned int nr_bits)4119 static inline bool netif_attr_test_online(unsigned long j,
4120 const unsigned long *online_mask,
4121 unsigned int nr_bits)
4122 {
4123 cpu_max_bits_warn(j, nr_bits);
4124
4125 if (online_mask)
4126 return test_bit(j, online_mask);
4127
4128 return (j < nr_bits);
4129 }
4130
4131 /**
4132 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
4133 * @n: CPU/Rx queue index
4134 * @srcp: the cpumask/Rx queue mask pointer
4135 * @nr_bits: number of bits in the bitmask
4136 *
4137 * Returns: next (after n) CPU/Rx queue index in the mask;
4138 * >= nr_bits if no further CPUs/Rx queues set.
4139 */
netif_attrmask_next(int n,const unsigned long * srcp,unsigned int nr_bits)4140 static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
4141 unsigned int nr_bits)
4142 {
4143 /* -1 is a legal arg here. */
4144 if (n != -1)
4145 cpu_max_bits_warn(n, nr_bits);
4146
4147 if (srcp)
4148 return find_next_bit(srcp, nr_bits, n + 1);
4149
4150 return n + 1;
4151 }
4152
4153 /**
4154 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
4155 * @n: CPU/Rx queue index
4156 * @src1p: the first CPUs/Rx queues mask pointer
4157 * @src2p: the second CPUs/Rx queues mask pointer
4158 * @nr_bits: number of bits in the bitmask
4159 *
4160 * Returns: next (after n) CPU/Rx queue index set in both masks;
4161 * >= nr_bits if no further CPUs/Rx queues set in both.
4162 */
netif_attrmask_next_and(int n,const unsigned long * src1p,const unsigned long * src2p,unsigned int nr_bits)4163 static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
4164 const unsigned long *src2p,
4165 unsigned int nr_bits)
4166 {
4167 /* -1 is a legal arg here. */
4168 if (n != -1)
4169 cpu_max_bits_warn(n, nr_bits);
4170
4171 if (src1p && src2p)
4172 return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
4173 else if (src1p)
4174 return find_next_bit(src1p, nr_bits, n + 1);
4175 else if (src2p)
4176 return find_next_bit(src2p, nr_bits, n + 1);
4177
4178 return n + 1;
4179 }
4180 #else
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)4181 static inline int netif_set_xps_queue(struct net_device *dev,
4182 const struct cpumask *mask,
4183 u16 index)
4184 {
4185 return 0;
4186 }
4187
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,enum xps_map_type type)4188 static inline int __netif_set_xps_queue(struct net_device *dev,
4189 const unsigned long *mask,
4190 u16 index, enum xps_map_type type)
4191 {
4192 return 0;
4193 }
4194 #endif
4195
4196 /**
4197 * netif_is_multiqueue - test if device has multiple transmit queues
4198 * @dev: network device
4199 *
4200 * Check if device has multiple transmit queues
4201 */
netif_is_multiqueue(const struct net_device * dev)4202 static inline bool netif_is_multiqueue(const struct net_device *dev)
4203 {
4204 return dev->num_tx_queues > 1;
4205 }
4206
4207 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
4208 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
4209 int netif_set_real_num_queues(struct net_device *dev,
4210 unsigned int txq, unsigned int rxq);
4211
4212 int netif_get_num_default_rss_queues(void);
4213
4214 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4215 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
4216
4217 /*
4218 * It is not allowed to call kfree_skb() or consume_skb() from hardware
4219 * interrupt context or with hardware interrupts being disabled.
4220 * (in_hardirq() || irqs_disabled())
4221 *
4222 * We provide four helpers that can be used in following contexts :
4223 *
4224 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
4225 * replacing kfree_skb(skb)
4226 *
4227 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
4228 * Typically used in place of consume_skb(skb) in TX completion path
4229 *
4230 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
4231 * replacing kfree_skb(skb)
4232 *
4233 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
4234 * and consumed a packet. Used in place of consume_skb(skb)
4235 */
dev_kfree_skb_irq(struct sk_buff * skb)4236 static inline void dev_kfree_skb_irq(struct sk_buff *skb)
4237 {
4238 dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4239 }
4240
dev_consume_skb_irq(struct sk_buff * skb)4241 static inline void dev_consume_skb_irq(struct sk_buff *skb)
4242 {
4243 dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
4244 }
4245
dev_kfree_skb_any(struct sk_buff * skb)4246 static inline void dev_kfree_skb_any(struct sk_buff *skb)
4247 {
4248 dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
4249 }
4250
dev_consume_skb_any(struct sk_buff * skb)4251 static inline void dev_consume_skb_any(struct sk_buff *skb)
4252 {
4253 dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
4254 }
4255
4256 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4257 const struct bpf_prog *xdp_prog);
4258 void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
4259 int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
4260 int netif_rx(struct sk_buff *skb);
4261 int __netif_rx(struct sk_buff *skb);
4262
4263 int netif_receive_skb(struct sk_buff *skb);
4264 int netif_receive_skb_core(struct sk_buff *skb);
4265 void netif_receive_skb_list_internal(struct list_head *head);
4266 void netif_receive_skb_list(struct list_head *head);
4267 gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb);
4268
napi_gro_receive(struct napi_struct * napi,struct sk_buff * skb)4269 static inline gro_result_t napi_gro_receive(struct napi_struct *napi,
4270 struct sk_buff *skb)
4271 {
4272 return gro_receive_skb(&napi->gro, skb);
4273 }
4274
4275 struct sk_buff *napi_get_frags(struct napi_struct *napi);
4276 gro_result_t napi_gro_frags(struct napi_struct *napi);
4277
napi_free_frags(struct napi_struct * napi)4278 static inline void napi_free_frags(struct napi_struct *napi)
4279 {
4280 kfree_skb(napi->skb);
4281 napi->skb = NULL;
4282 }
4283
4284 bool netdev_is_rx_handler_busy(struct net_device *dev);
4285 int netdev_rx_handler_register(struct net_device *dev,
4286 rx_handler_func_t *rx_handler,
4287 void *rx_handler_data);
4288 void netdev_rx_handler_unregister(struct net_device *dev);
4289
4290 bool dev_valid_name(const char *name);
is_socket_ioctl_cmd(unsigned int cmd)4291 static inline bool is_socket_ioctl_cmd(unsigned int cmd)
4292 {
4293 return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
4294 }
4295 int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
4296 int put_user_ifreq(struct ifreq *ifr, void __user *arg);
4297 int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
4298 void __user *data, bool *need_copyout);
4299 int dev_ifconf(struct net *net, struct ifconf __user *ifc);
4300 int dev_eth_ioctl(struct net_device *dev,
4301 struct ifreq *ifr, unsigned int cmd);
4302 int generic_hwtstamp_get_lower(struct net_device *dev,
4303 struct kernel_hwtstamp_config *kernel_cfg);
4304 int generic_hwtstamp_set_lower(struct net_device *dev,
4305 struct kernel_hwtstamp_config *kernel_cfg,
4306 struct netlink_ext_ack *extack);
4307 int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
4308 unsigned int netif_get_flags(const struct net_device *dev);
4309 int __dev_change_flags(struct net_device *dev, unsigned int flags,
4310 struct netlink_ext_ack *extack);
4311 int netif_change_flags(struct net_device *dev, unsigned int flags,
4312 struct netlink_ext_ack *extack);
4313 int dev_change_flags(struct net_device *dev, unsigned int flags,
4314 struct netlink_ext_ack *extack);
4315 int netif_set_alias(struct net_device *dev, const char *alias, size_t len);
4316 int dev_set_alias(struct net_device *, const char *, size_t);
4317 int dev_get_alias(const struct net_device *, char *, size_t);
4318 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
4319 const char *pat, int new_ifindex,
4320 struct netlink_ext_ack *extack);
4321 int dev_change_net_namespace(struct net_device *dev, struct net *net,
4322 const char *pat);
4323 int __netif_set_mtu(struct net_device *dev, int new_mtu);
4324 int netif_set_mtu(struct net_device *dev, int new_mtu);
4325 int dev_set_mtu(struct net_device *, int);
4326 int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
4327 struct netlink_ext_ack *extack);
4328 int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
4329 struct netlink_ext_ack *extack);
4330 int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
4331 struct netlink_ext_ack *extack);
4332 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
4333 struct netlink_ext_ack *extack);
4334 int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
4335 int netif_get_port_parent_id(struct net_device *dev,
4336 struct netdev_phys_item_id *ppid, bool recurse);
4337 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
4338
4339 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
4340 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
4341 struct netdev_queue *txq, int *ret);
4342
4343 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
4344 u8 dev_xdp_prog_count(struct net_device *dev);
4345 int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4346 int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
4347 u8 dev_xdp_sb_prog_count(struct net_device *dev);
4348 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
4349
4350 u32 dev_get_min_mp_channel_count(const struct net_device *dev);
4351
4352 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4353 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4354 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
4355 bool is_skb_forwardable(const struct net_device *dev,
4356 const struct sk_buff *skb);
4357
__is_skb_forwardable(const struct net_device * dev,const struct sk_buff * skb,const bool check_mtu)4358 static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
4359 const struct sk_buff *skb,
4360 const bool check_mtu)
4361 {
4362 const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
4363 unsigned int len;
4364
4365 if (!(dev->flags & IFF_UP))
4366 return false;
4367
4368 if (!check_mtu)
4369 return true;
4370
4371 len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
4372 if (skb->len <= len)
4373 return true;
4374
4375 /* if TSO is enabled, we don't care about the length as the packet
4376 * could be forwarded without being segmented before
4377 */
4378 if (skb_is_gso(skb))
4379 return true;
4380
4381 return false;
4382 }
4383
4384 void netdev_core_stats_inc(struct net_device *dev, u32 offset);
4385
4386 #define DEV_CORE_STATS_INC(FIELD) \
4387 static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
4388 { \
4389 netdev_core_stats_inc(dev, \
4390 offsetof(struct net_device_core_stats, FIELD)); \
4391 }
4392 DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)4393 DEV_CORE_STATS_INC(tx_dropped)
4394 DEV_CORE_STATS_INC(rx_nohandler)
4395 DEV_CORE_STATS_INC(rx_otherhost_dropped)
4396 #undef DEV_CORE_STATS_INC
4397
4398 static __always_inline int ____dev_forward_skb(struct net_device *dev,
4399 struct sk_buff *skb,
4400 const bool check_mtu)
4401 {
4402 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4403 unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
4404 dev_core_stats_rx_dropped_inc(dev);
4405 kfree_skb(skb);
4406 return NET_RX_DROP;
4407 }
4408
4409 skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
4410 skb->priority = 0;
4411 return 0;
4412 }
4413
4414 bool dev_nit_active_rcu(const struct net_device *dev);
dev_nit_active(const struct net_device * dev)4415 static inline bool dev_nit_active(const struct net_device *dev)
4416 {
4417 bool ret;
4418
4419 rcu_read_lock();
4420 ret = dev_nit_active_rcu(dev);
4421 rcu_read_unlock();
4422 return ret;
4423 }
4424
4425 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4426
__dev_put(struct net_device * dev)4427 static inline void __dev_put(struct net_device *dev)
4428 {
4429 if (dev) {
4430 #ifdef CONFIG_PCPU_DEV_REFCNT
4431 this_cpu_dec(*dev->pcpu_refcnt);
4432 #else
4433 refcount_dec(&dev->dev_refcnt);
4434 #endif
4435 }
4436 }
4437
__dev_hold(struct net_device * dev)4438 static inline void __dev_hold(struct net_device *dev)
4439 {
4440 if (dev) {
4441 #ifdef CONFIG_PCPU_DEV_REFCNT
4442 this_cpu_inc(*dev->pcpu_refcnt);
4443 #else
4444 refcount_inc(&dev->dev_refcnt);
4445 #endif
4446 }
4447 }
4448
__netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4449 static inline void __netdev_tracker_alloc(struct net_device *dev,
4450 netdevice_tracker *tracker,
4451 gfp_t gfp)
4452 {
4453 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4454 ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
4455 #endif
4456 }
4457
4458 /* netdev_tracker_alloc() can upgrade a prior untracked reference
4459 * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
4460 */
netdev_tracker_alloc(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4461 static inline void netdev_tracker_alloc(struct net_device *dev,
4462 netdevice_tracker *tracker, gfp_t gfp)
4463 {
4464 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4465 refcount_dec(&dev->refcnt_tracker.no_tracker);
4466 __netdev_tracker_alloc(dev, tracker, gfp);
4467 #endif
4468 }
4469
netdev_tracker_free(struct net_device * dev,netdevice_tracker * tracker)4470 static inline void netdev_tracker_free(struct net_device *dev,
4471 netdevice_tracker *tracker)
4472 {
4473 #ifdef CONFIG_NET_DEV_REFCNT_TRACKER
4474 ref_tracker_free(&dev->refcnt_tracker, tracker);
4475 #endif
4476 }
4477
netdev_hold(struct net_device * dev,netdevice_tracker * tracker,gfp_t gfp)4478 static inline void netdev_hold(struct net_device *dev,
4479 netdevice_tracker *tracker, gfp_t gfp)
4480 {
4481 if (dev) {
4482 __dev_hold(dev);
4483 __netdev_tracker_alloc(dev, tracker, gfp);
4484 }
4485 }
4486
netdev_put(struct net_device * dev,netdevice_tracker * tracker)4487 static inline void netdev_put(struct net_device *dev,
4488 netdevice_tracker *tracker)
4489 {
4490 if (dev) {
4491 netdev_tracker_free(dev, tracker);
4492 __dev_put(dev);
4493 }
4494 }
4495
4496 /**
4497 * dev_hold - get reference to device
4498 * @dev: network device
4499 *
4500 * Hold reference to device to keep it from being freed.
4501 * Try using netdev_hold() instead.
4502 */
dev_hold(struct net_device * dev)4503 static inline void dev_hold(struct net_device *dev)
4504 {
4505 netdev_hold(dev, NULL, GFP_ATOMIC);
4506 }
4507
4508 /**
4509 * dev_put - release reference to device
4510 * @dev: network device
4511 *
4512 * Release reference to device to allow it to be freed.
4513 * Try using netdev_put() instead.
4514 */
dev_put(struct net_device * dev)4515 static inline void dev_put(struct net_device *dev)
4516 {
4517 netdev_put(dev, NULL);
4518 }
4519
DEFINE_FREE(dev_put,struct net_device *,if (_T)dev_put (_T))4520 DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
4521
4522 static inline void netdev_ref_replace(struct net_device *odev,
4523 struct net_device *ndev,
4524 netdevice_tracker *tracker,
4525 gfp_t gfp)
4526 {
4527 if (odev)
4528 netdev_tracker_free(odev, tracker);
4529
4530 __dev_hold(ndev);
4531 __dev_put(odev);
4532
4533 if (ndev)
4534 __netdev_tracker_alloc(ndev, tracker, gfp);
4535 }
4536
4537 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
4538 * and _off may be called from IRQ context, but it is caller
4539 * who is responsible for serialization of these calls.
4540 *
4541 * The name carrier is inappropriate, these functions should really be
4542 * called netif_lowerlayer_*() because they represent the state of any
4543 * kind of lower layer not just hardware media.
4544 */
4545 void linkwatch_fire_event(struct net_device *dev);
4546
4547 /**
4548 * linkwatch_sync_dev - sync linkwatch for the given device
4549 * @dev: network device to sync linkwatch for
4550 *
4551 * Sync linkwatch for the given device, removing it from the
4552 * pending work list (if queued).
4553 */
4554 void linkwatch_sync_dev(struct net_device *dev);
4555 void __linkwatch_sync_dev(struct net_device *dev);
4556
4557 /**
4558 * netif_carrier_ok - test if carrier present
4559 * @dev: network device
4560 *
4561 * Check if carrier is present on device
4562 */
netif_carrier_ok(const struct net_device * dev)4563 static inline bool netif_carrier_ok(const struct net_device *dev)
4564 {
4565 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4566 }
4567
4568 unsigned long dev_trans_start(struct net_device *dev);
4569
4570 void netdev_watchdog_up(struct net_device *dev);
4571
4572 void netif_carrier_on(struct net_device *dev);
4573 void netif_carrier_off(struct net_device *dev);
4574 void netif_carrier_event(struct net_device *dev);
4575
4576 /**
4577 * netif_dormant_on - mark device as dormant.
4578 * @dev: network device
4579 *
4580 * Mark device as dormant (as per RFC2863).
4581 *
4582 * The dormant state indicates that the relevant interface is not
4583 * actually in a condition to pass packets (i.e., it is not 'up') but is
4584 * in a "pending" state, waiting for some external event. For "on-
4585 * demand" interfaces, this new state identifies the situation where the
4586 * interface is waiting for events to place it in the up state.
4587 */
netif_dormant_on(struct net_device * dev)4588 static inline void netif_dormant_on(struct net_device *dev)
4589 {
4590 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4591 linkwatch_fire_event(dev);
4592 }
4593
4594 /**
4595 * netif_dormant_off - set device as not dormant.
4596 * @dev: network device
4597 *
4598 * Device is not in dormant state.
4599 */
netif_dormant_off(struct net_device * dev)4600 static inline void netif_dormant_off(struct net_device *dev)
4601 {
4602 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4603 linkwatch_fire_event(dev);
4604 }
4605
4606 /**
4607 * netif_dormant - test if device is dormant
4608 * @dev: network device
4609 *
4610 * Check if device is dormant.
4611 */
netif_dormant(const struct net_device * dev)4612 static inline bool netif_dormant(const struct net_device *dev)
4613 {
4614 return test_bit(__LINK_STATE_DORMANT, &dev->state);
4615 }
4616
4617
4618 /**
4619 * netif_testing_on - mark device as under test.
4620 * @dev: network device
4621 *
4622 * Mark device as under test (as per RFC2863).
4623 *
4624 * The testing state indicates that some test(s) must be performed on
4625 * the interface. After completion, of the test, the interface state
4626 * will change to up, dormant, or down, as appropriate.
4627 */
netif_testing_on(struct net_device * dev)4628 static inline void netif_testing_on(struct net_device *dev)
4629 {
4630 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4631 linkwatch_fire_event(dev);
4632 }
4633
4634 /**
4635 * netif_testing_off - set device as not under test.
4636 * @dev: network device
4637 *
4638 * Device is not in testing state.
4639 */
netif_testing_off(struct net_device * dev)4640 static inline void netif_testing_off(struct net_device *dev)
4641 {
4642 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4643 linkwatch_fire_event(dev);
4644 }
4645
4646 /**
4647 * netif_testing - test if device is under test
4648 * @dev: network device
4649 *
4650 * Check if device is under test
4651 */
netif_testing(const struct net_device * dev)4652 static inline bool netif_testing(const struct net_device *dev)
4653 {
4654 return test_bit(__LINK_STATE_TESTING, &dev->state);
4655 }
4656
4657
4658 /**
4659 * netif_oper_up - test if device is operational
4660 * @dev: network device
4661 *
4662 * Check if carrier is operational
4663 */
netif_oper_up(const struct net_device * dev)4664 static inline bool netif_oper_up(const struct net_device *dev)
4665 {
4666 unsigned int operstate = READ_ONCE(dev->operstate);
4667
4668 return operstate == IF_OPER_UP ||
4669 operstate == IF_OPER_UNKNOWN /* backward compat */;
4670 }
4671
4672 /**
4673 * netif_device_present - is device available or removed
4674 * @dev: network device
4675 *
4676 * Check if device has not been removed from system.
4677 */
netif_device_present(const struct net_device * dev)4678 static inline bool netif_device_present(const struct net_device *dev)
4679 {
4680 return test_bit(__LINK_STATE_PRESENT, &dev->state);
4681 }
4682
4683 void netif_device_detach(struct net_device *dev);
4684
4685 void netif_device_attach(struct net_device *dev);
4686
4687 /*
4688 * Network interface message level settings
4689 */
4690
4691 enum {
4692 NETIF_MSG_DRV_BIT,
4693 NETIF_MSG_PROBE_BIT,
4694 NETIF_MSG_LINK_BIT,
4695 NETIF_MSG_TIMER_BIT,
4696 NETIF_MSG_IFDOWN_BIT,
4697 NETIF_MSG_IFUP_BIT,
4698 NETIF_MSG_RX_ERR_BIT,
4699 NETIF_MSG_TX_ERR_BIT,
4700 NETIF_MSG_TX_QUEUED_BIT,
4701 NETIF_MSG_INTR_BIT,
4702 NETIF_MSG_TX_DONE_BIT,
4703 NETIF_MSG_RX_STATUS_BIT,
4704 NETIF_MSG_PKTDATA_BIT,
4705 NETIF_MSG_HW_BIT,
4706 NETIF_MSG_WOL_BIT,
4707
4708 /* When you add a new bit above, update netif_msg_class_names array
4709 * in net/ethtool/common.c
4710 */
4711 NETIF_MSG_CLASS_COUNT,
4712 };
4713 /* Both ethtool_ops interface and internal driver implementation use u32 */
4714 static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4715
4716 #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4717 #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4718
4719 #define NETIF_MSG_DRV __NETIF_MSG(DRV)
4720 #define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4721 #define NETIF_MSG_LINK __NETIF_MSG(LINK)
4722 #define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4723 #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4724 #define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4725 #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4726 #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4727 #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4728 #define NETIF_MSG_INTR __NETIF_MSG(INTR)
4729 #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4730 #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4731 #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4732 #define NETIF_MSG_HW __NETIF_MSG(HW)
4733 #define NETIF_MSG_WOL __NETIF_MSG(WOL)
4734
4735 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4736 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4737 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4738 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4739 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4740 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4741 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4742 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4743 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4744 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4745 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4746 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4747 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4748 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4749 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4750
netif_msg_init(int debug_value,int default_msg_enable_bits)4751 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4752 {
4753 /* use default */
4754 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4755 return default_msg_enable_bits;
4756 if (debug_value == 0) /* no output */
4757 return 0;
4758 /* set low N bits */
4759 return (1U << debug_value) - 1;
4760 }
4761
__netif_tx_lock(struct netdev_queue * txq,int cpu)4762 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4763 {
4764 spin_lock(&txq->_xmit_lock);
4765 /* Pairs with READ_ONCE() in netif_tx_owned() */
4766 WRITE_ONCE(txq->xmit_lock_owner, cpu);
4767 }
4768
__netif_tx_acquire(struct netdev_queue * txq)4769 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4770 {
4771 __acquire(&txq->_xmit_lock);
4772 return true;
4773 }
4774
__netif_tx_release(struct netdev_queue * txq)4775 static inline void __netif_tx_release(struct netdev_queue *txq)
4776 {
4777 __release(&txq->_xmit_lock);
4778 }
4779
__netif_tx_lock_bh(struct netdev_queue * txq)4780 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4781 {
4782 spin_lock_bh(&txq->_xmit_lock);
4783 /* Pairs with READ_ONCE() in netif_tx_owned() */
4784 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4785 }
4786
__netif_tx_trylock(struct netdev_queue * txq)4787 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4788 {
4789 bool ok = spin_trylock(&txq->_xmit_lock);
4790
4791 if (likely(ok)) {
4792 /* Pairs with READ_ONCE() in netif_tx_owned() */
4793 WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4794 }
4795 return ok;
4796 }
4797
__netif_tx_unlock(struct netdev_queue * txq)4798 static inline void __netif_tx_unlock(struct netdev_queue *txq)
4799 {
4800 /* Pairs with READ_ONCE() in netif_tx_owned() */
4801 WRITE_ONCE(txq->xmit_lock_owner, -1);
4802 spin_unlock(&txq->_xmit_lock);
4803 }
4804
__netif_tx_unlock_bh(struct netdev_queue * txq)4805 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4806 {
4807 /* Pairs with READ_ONCE() in netif_tx_owned() */
4808 WRITE_ONCE(txq->xmit_lock_owner, -1);
4809 spin_unlock_bh(&txq->_xmit_lock);
4810 }
4811
4812 /*
4813 * txq->trans_start can be read locklessly from dev_watchdog()
4814 */
txq_trans_update(const struct net_device * dev,struct netdev_queue * txq)4815 static inline void txq_trans_update(const struct net_device *dev,
4816 struct netdev_queue *txq)
4817 {
4818 if (!dev->lltx)
4819 WRITE_ONCE(txq->trans_start, jiffies);
4820 }
4821
txq_trans_cond_update(struct netdev_queue * txq)4822 static inline void txq_trans_cond_update(struct netdev_queue *txq)
4823 {
4824 unsigned long now = jiffies;
4825
4826 if (READ_ONCE(txq->trans_start) != now)
4827 WRITE_ONCE(txq->trans_start, now);
4828 }
4829
4830 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
netif_trans_update(struct net_device * dev)4831 static inline void netif_trans_update(struct net_device *dev)
4832 {
4833 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4834
4835 txq_trans_cond_update(txq);
4836 }
4837
4838 /**
4839 * netif_tx_lock - grab network device transmit lock
4840 * @dev: network device
4841 *
4842 * Get network device transmit lock
4843 */
4844 void netif_tx_lock(struct net_device *dev);
4845
netif_tx_lock_bh(struct net_device * dev)4846 static inline void netif_tx_lock_bh(struct net_device *dev)
4847 {
4848 local_bh_disable();
4849 netif_tx_lock(dev);
4850 }
4851
4852 void netif_tx_unlock(struct net_device *dev);
4853
netif_tx_unlock_bh(struct net_device * dev)4854 static inline void netif_tx_unlock_bh(struct net_device *dev)
4855 {
4856 netif_tx_unlock(dev);
4857 local_bh_enable();
4858 }
4859
4860 #define HARD_TX_LOCK(dev, txq, cpu) { \
4861 if (!(dev)->lltx) { \
4862 __netif_tx_lock(txq, cpu); \
4863 } else { \
4864 __netif_tx_acquire(txq); \
4865 } \
4866 }
4867
4868 #define HARD_TX_TRYLOCK(dev, txq) \
4869 (!(dev)->lltx ? \
4870 __netif_tx_trylock(txq) : \
4871 __netif_tx_acquire(txq))
4872
4873 #define HARD_TX_UNLOCK(dev, txq) { \
4874 if (!(dev)->lltx) { \
4875 __netif_tx_unlock(txq); \
4876 } else { \
4877 __netif_tx_release(txq); \
4878 } \
4879 }
4880
netif_tx_disable(struct net_device * dev)4881 static inline void netif_tx_disable(struct net_device *dev)
4882 {
4883 unsigned int i;
4884 int cpu;
4885
4886 local_bh_disable();
4887 cpu = smp_processor_id();
4888 spin_lock(&dev->tx_global_lock);
4889 for (i = 0; i < dev->num_tx_queues; i++) {
4890 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4891
4892 __netif_tx_lock(txq, cpu);
4893 netif_tx_stop_queue(txq);
4894 __netif_tx_unlock(txq);
4895 }
4896 spin_unlock(&dev->tx_global_lock);
4897 local_bh_enable();
4898 }
4899
4900 #ifndef CONFIG_PREEMPT_RT
netif_tx_owned(struct netdev_queue * txq,unsigned int cpu)4901 static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4902 {
4903 /* Other cpus might concurrently change txq->xmit_lock_owner
4904 * to -1 or to their cpu id, but not to our id.
4905 */
4906 return READ_ONCE(txq->xmit_lock_owner) == cpu;
4907 }
4908
4909 #else
netif_tx_owned(struct netdev_queue * txq,unsigned int cpu)4910 static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
4911 {
4912 return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
4913 }
4914
4915 #endif
4916
netif_addr_lock(struct net_device * dev)4917 static inline void netif_addr_lock(struct net_device *dev)
4918 {
4919 unsigned char nest_level = 0;
4920
4921 #ifdef CONFIG_LOCKDEP
4922 nest_level = dev->nested_level;
4923 #endif
4924 spin_lock_nested(&dev->addr_list_lock, nest_level);
4925 }
4926
netif_addr_lock_bh(struct net_device * dev)4927 static inline void netif_addr_lock_bh(struct net_device *dev)
4928 {
4929 unsigned char nest_level = 0;
4930
4931 #ifdef CONFIG_LOCKDEP
4932 nest_level = dev->nested_level;
4933 #endif
4934 local_bh_disable();
4935 spin_lock_nested(&dev->addr_list_lock, nest_level);
4936 }
4937
netif_addr_unlock(struct net_device * dev)4938 static inline void netif_addr_unlock(struct net_device *dev)
4939 {
4940 spin_unlock(&dev->addr_list_lock);
4941 }
4942
netif_addr_unlock_bh(struct net_device * dev)4943 static inline void netif_addr_unlock_bh(struct net_device *dev)
4944 {
4945 spin_unlock_bh(&dev->addr_list_lock);
4946 }
4947
4948 /*
4949 * dev_addrs walker. Should be used only for read access. Call with
4950 * rcu_read_lock held.
4951 */
4952 #define for_each_dev_addr(dev, ha) \
4953 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4954
4955 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
4956
4957 void ether_setup(struct net_device *dev);
4958
4959 /* Allocate dummy net_device */
4960 struct net_device *alloc_netdev_dummy(int sizeof_priv);
4961
4962 /* Support for loadable net-drivers */
4963 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4964 unsigned char name_assign_type,
4965 void (*setup)(struct net_device *),
4966 unsigned int txqs, unsigned int rxqs);
4967 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4968 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4969
4970 #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4971 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4972 count)
4973
4974 int register_netdev(struct net_device *dev);
4975 void unregister_netdev(struct net_device *dev);
4976
4977 int devm_register_netdev(struct device *dev, struct net_device *ndev);
4978
4979 /* General hardware address lists handling functions */
4980 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4981 struct netdev_hw_addr_list *from_list, int addr_len);
4982 int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
4983 struct netdev_hw_addr_list *from_list,
4984 int addr_len);
4985 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4986 struct netdev_hw_addr_list *from_list, int addr_len);
4987 int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4988 struct net_device *dev,
4989 int (*sync)(struct net_device *, const unsigned char *),
4990 int (*unsync)(struct net_device *,
4991 const unsigned char *));
4992 int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4993 struct net_device *dev,
4994 int (*sync)(struct net_device *,
4995 const unsigned char *, int),
4996 int (*unsync)(struct net_device *,
4997 const unsigned char *, int));
4998 void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4999 struct net_device *dev,
5000 int (*unsync)(struct net_device *,
5001 const unsigned char *, int));
5002 void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
5003 struct net_device *dev,
5004 int (*unsync)(struct net_device *,
5005 const unsigned char *));
5006 void __hw_addr_init(struct netdev_hw_addr_list *list);
5007
5008 /* Functions used for device addresses handling */
5009 void dev_addr_mod(struct net_device *dev, unsigned int offset,
5010 const void *addr, size_t len);
5011
5012 static inline void
__dev_addr_set(struct net_device * dev,const void * addr,size_t len)5013 __dev_addr_set(struct net_device *dev, const void *addr, size_t len)
5014 {
5015 dev_addr_mod(dev, 0, addr, len);
5016 }
5017
dev_addr_set(struct net_device * dev,const u8 * addr)5018 static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
5019 {
5020 __dev_addr_set(dev, addr, dev->addr_len);
5021 }
5022
5023 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
5024 unsigned char addr_type);
5025 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
5026 unsigned char addr_type);
5027
5028 /* Functions used for unicast addresses handling */
5029 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
5030 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
5031 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
5032 int dev_uc_sync(struct net_device *to, struct net_device *from);
5033 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
5034 void dev_uc_unsync(struct net_device *to, struct net_device *from);
5035 void dev_uc_flush(struct net_device *dev);
5036 void dev_uc_init(struct net_device *dev);
5037
5038 /**
5039 * __dev_uc_sync - Synchronize device's unicast list
5040 * @dev: device to sync
5041 * @sync: function to call if address should be added
5042 * @unsync: function to call if address should be removed
5043 *
5044 * Add newly added addresses to the interface, and release
5045 * addresses that have been deleted.
5046 */
__dev_uc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))5047 static inline int __dev_uc_sync(struct net_device *dev,
5048 int (*sync)(struct net_device *,
5049 const unsigned char *),
5050 int (*unsync)(struct net_device *,
5051 const unsigned char *))
5052 {
5053 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
5054 }
5055
5056 /**
5057 * __dev_uc_unsync - Remove synchronized addresses from device
5058 * @dev: device to sync
5059 * @unsync: function to call if address should be removed
5060 *
5061 * Remove all addresses that were added to the device by dev_uc_sync().
5062 */
__dev_uc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))5063 static inline void __dev_uc_unsync(struct net_device *dev,
5064 int (*unsync)(struct net_device *,
5065 const unsigned char *))
5066 {
5067 __hw_addr_unsync_dev(&dev->uc, dev, unsync);
5068 }
5069
5070 /* Functions used for multicast addresses handling */
5071 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
5072 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
5073 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
5074 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
5075 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
5076 int dev_mc_sync(struct net_device *to, struct net_device *from);
5077 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
5078 void dev_mc_unsync(struct net_device *to, struct net_device *from);
5079 void dev_mc_flush(struct net_device *dev);
5080 void dev_mc_init(struct net_device *dev);
5081
5082 /**
5083 * __dev_mc_sync - Synchronize device's multicast list
5084 * @dev: device to sync
5085 * @sync: function to call if address should be added
5086 * @unsync: function to call if address should be removed
5087 *
5088 * Add newly added addresses to the interface, and release
5089 * addresses that have been deleted.
5090 */
__dev_mc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))5091 static inline int __dev_mc_sync(struct net_device *dev,
5092 int (*sync)(struct net_device *,
5093 const unsigned char *),
5094 int (*unsync)(struct net_device *,
5095 const unsigned char *))
5096 {
5097 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
5098 }
5099
5100 /**
5101 * __dev_mc_unsync - Remove synchronized addresses from device
5102 * @dev: device to sync
5103 * @unsync: function to call if address should be removed
5104 *
5105 * Remove all addresses that were added to the device by dev_mc_sync().
5106 */
__dev_mc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))5107 static inline void __dev_mc_unsync(struct net_device *dev,
5108 int (*unsync)(struct net_device *,
5109 const unsigned char *))
5110 {
5111 __hw_addr_unsync_dev(&dev->mc, dev, unsync);
5112 }
5113
5114 /* Functions used for secondary unicast and multicast support */
5115 void dev_set_rx_mode(struct net_device *dev);
5116 int netif_set_promiscuity(struct net_device *dev, int inc);
5117 int dev_set_promiscuity(struct net_device *dev, int inc);
5118 int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
5119 int dev_set_allmulti(struct net_device *dev, int inc);
5120 void netif_state_change(struct net_device *dev);
5121 void netdev_state_change(struct net_device *dev);
5122 void __netdev_notify_peers(struct net_device *dev);
5123 void netdev_notify_peers(struct net_device *dev);
5124 void netdev_features_change(struct net_device *dev);
5125 /* Load a device via the kmod */
5126 void dev_load(struct net *net, const char *name);
5127 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5128 struct rtnl_link_stats64 *storage);
5129 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5130 const struct net_device_stats *netdev_stats);
5131 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
5132 const struct pcpu_sw_netstats __percpu *netstats);
5133 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
5134
5135 enum {
5136 NESTED_SYNC_IMM_BIT,
5137 NESTED_SYNC_TODO_BIT,
5138 };
5139
5140 #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
5141 #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
5142
5143 #define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
5144 #define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
5145
5146 struct netdev_nested_priv {
5147 unsigned char flags;
5148 void *data;
5149 };
5150
5151 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
5152 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5153 struct list_head **iter);
5154
5155 /* iterate through upper list, must be called under RCU read lock */
5156 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
5157 for (iter = &(dev)->adj_list.upper, \
5158 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
5159 updev; \
5160 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
5161
5162 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
5163 int (*fn)(struct net_device *upper_dev,
5164 struct netdev_nested_priv *priv),
5165 struct netdev_nested_priv *priv);
5166
5167 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
5168 struct net_device *upper_dev);
5169
5170 bool netdev_has_any_upper_dev(struct net_device *dev);
5171
5172 void *netdev_lower_get_next_private(struct net_device *dev,
5173 struct list_head **iter);
5174 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5175 struct list_head **iter);
5176
5177 #define netdev_for_each_lower_private(dev, priv, iter) \
5178 for (iter = (dev)->adj_list.lower.next, \
5179 priv = netdev_lower_get_next_private(dev, &(iter)); \
5180 priv; \
5181 priv = netdev_lower_get_next_private(dev, &(iter)))
5182
5183 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
5184 for (iter = &(dev)->adj_list.lower, \
5185 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
5186 priv; \
5187 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
5188
5189 void *netdev_lower_get_next(struct net_device *dev,
5190 struct list_head **iter);
5191
5192 #define netdev_for_each_lower_dev(dev, ldev, iter) \
5193 for (iter = (dev)->adj_list.lower.next, \
5194 ldev = netdev_lower_get_next(dev, &(iter)); \
5195 ldev; \
5196 ldev = netdev_lower_get_next(dev, &(iter)))
5197
5198 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5199 struct list_head **iter);
5200 int netdev_walk_all_lower_dev(struct net_device *dev,
5201 int (*fn)(struct net_device *lower_dev,
5202 struct netdev_nested_priv *priv),
5203 struct netdev_nested_priv *priv);
5204 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
5205 int (*fn)(struct net_device *lower_dev,
5206 struct netdev_nested_priv *priv),
5207 struct netdev_nested_priv *priv);
5208
5209 void *netdev_adjacent_get_private(struct list_head *adj_list);
5210 void *netdev_lower_get_first_private_rcu(struct net_device *dev);
5211 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
5212 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
5213 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
5214 struct netlink_ext_ack *extack);
5215 int netdev_master_upper_dev_link(struct net_device *dev,
5216 struct net_device *upper_dev,
5217 void *upper_priv, void *upper_info,
5218 struct netlink_ext_ack *extack);
5219 void netdev_upper_dev_unlink(struct net_device *dev,
5220 struct net_device *upper_dev);
5221 int netdev_adjacent_change_prepare(struct net_device *old_dev,
5222 struct net_device *new_dev,
5223 struct net_device *dev,
5224 struct netlink_ext_ack *extack);
5225 void netdev_adjacent_change_commit(struct net_device *old_dev,
5226 struct net_device *new_dev,
5227 struct net_device *dev);
5228 void netdev_adjacent_change_abort(struct net_device *old_dev,
5229 struct net_device *new_dev,
5230 struct net_device *dev);
5231 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
5232 void *netdev_lower_dev_get_private(struct net_device *dev,
5233 struct net_device *lower_dev);
5234 void netdev_lower_state_changed(struct net_device *lower_dev,
5235 void *lower_state_info);
5236
5237 #define NETDEV_RSS_KEY_LEN 256
5238 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
5239 void netdev_rss_key_fill(void *buffer, size_t len);
5240
5241 int skb_checksum_help(struct sk_buff *skb);
5242 int skb_crc32c_csum_help(struct sk_buff *skb);
5243 int skb_csum_hwoffload_help(struct sk_buff *skb,
5244 const netdev_features_t features);
5245
5246 struct netdev_bonding_info {
5247 ifslave slave;
5248 ifbond master;
5249 };
5250
5251 struct netdev_notifier_bonding_info {
5252 struct netdev_notifier_info info; /* must be first */
5253 struct netdev_bonding_info bonding_info;
5254 };
5255
5256 void netdev_bonding_info_change(struct net_device *dev,
5257 struct netdev_bonding_info *bonding_info);
5258
5259 #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
5260 void ethtool_notify(struct net_device *dev, unsigned int cmd);
5261 #else
ethtool_notify(struct net_device * dev,unsigned int cmd)5262 static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
5263 {
5264 }
5265 #endif
5266
5267 __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
5268
can_checksum_protocol(netdev_features_t features,__be16 protocol)5269 static inline bool can_checksum_protocol(netdev_features_t features,
5270 __be16 protocol)
5271 {
5272 if (protocol == htons(ETH_P_FCOE))
5273 return !!(features & NETIF_F_FCOE_CRC);
5274
5275 /* Assume this is an IP checksum (not SCTP CRC) */
5276
5277 if (features & NETIF_F_HW_CSUM) {
5278 /* Can checksum everything */
5279 return true;
5280 }
5281
5282 switch (protocol) {
5283 case htons(ETH_P_IP):
5284 return !!(features & NETIF_F_IP_CSUM);
5285 case htons(ETH_P_IPV6):
5286 return !!(features & NETIF_F_IPV6_CSUM);
5287 default:
5288 return false;
5289 }
5290 }
5291
5292 #ifdef CONFIG_BUG
5293 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
5294 #else
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)5295 static inline void netdev_rx_csum_fault(struct net_device *dev,
5296 struct sk_buff *skb)
5297 {
5298 }
5299 #endif
5300 /* rx skb timestamps */
5301 void net_enable_timestamp(void);
5302 void net_disable_timestamp(void);
5303
netdev_get_tstamp(struct net_device * dev,const struct skb_shared_hwtstamps * hwtstamps,bool cycles)5304 static inline ktime_t netdev_get_tstamp(struct net_device *dev,
5305 const struct skb_shared_hwtstamps *hwtstamps,
5306 bool cycles)
5307 {
5308 const struct net_device_ops *ops = dev->netdev_ops;
5309
5310 if (ops->ndo_get_tstamp)
5311 return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
5312
5313 return hwtstamps->hwtstamp;
5314 }
5315
5316 #ifndef CONFIG_PREEMPT_RT
netdev_xmit_set_more(bool more)5317 static inline void netdev_xmit_set_more(bool more)
5318 {
5319 __this_cpu_write(softnet_data.xmit.more, more);
5320 }
5321
netdev_xmit_more(void)5322 static inline bool netdev_xmit_more(void)
5323 {
5324 return __this_cpu_read(softnet_data.xmit.more);
5325 }
5326 #else
netdev_xmit_set_more(bool more)5327 static inline void netdev_xmit_set_more(bool more)
5328 {
5329 current->net_xmit.more = more;
5330 }
5331
netdev_xmit_more(void)5332 static inline bool netdev_xmit_more(void)
5333 {
5334 return current->net_xmit.more;
5335 }
5336 #endif
5337
__netdev_start_xmit(const struct net_device_ops * ops,struct sk_buff * skb,struct net_device * dev,bool more)5338 static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
5339 struct sk_buff *skb, struct net_device *dev,
5340 bool more)
5341 {
5342 netdev_xmit_set_more(more);
5343 return ops->ndo_start_xmit(skb, dev);
5344 }
5345
netdev_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)5346 static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
5347 struct netdev_queue *txq, bool more)
5348 {
5349 const struct net_device_ops *ops = dev->netdev_ops;
5350 netdev_tx_t rc;
5351
5352 rc = __netdev_start_xmit(ops, skb, dev, more);
5353 if (rc == NETDEV_TX_OK)
5354 txq_trans_update(dev, txq);
5355
5356 return rc;
5357 }
5358
5359 int netdev_class_create_file_ns(const struct class_attribute *class_attr,
5360 const struct ns_common *ns);
5361 void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
5362 const struct ns_common *ns);
5363
5364 extern const struct kobj_ns_type_operations net_ns_type_operations;
5365
5366 const char *netdev_drivername(const struct net_device *dev);
5367
netdev_intersect_features(netdev_features_t f1,netdev_features_t f2)5368 static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
5369 netdev_features_t f2)
5370 {
5371 if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
5372 if (f1 & NETIF_F_HW_CSUM)
5373 f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5374 else
5375 f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5376 }
5377
5378 return f1 & f2;
5379 }
5380
netdev_get_wanted_features(struct net_device * dev)5381 static inline netdev_features_t netdev_get_wanted_features(
5382 struct net_device *dev)
5383 {
5384 return (dev->features & ~dev->hw_features) | dev->wanted_features;
5385 }
5386 netdev_features_t netdev_increment_features(netdev_features_t all,
5387 netdev_features_t one, netdev_features_t mask);
5388
5389 /* Allow TSO being used on stacked device :
5390 * Performing the GSO segmentation before last device
5391 * is a performance improvement.
5392 */
netdev_add_tso_features(netdev_features_t features,netdev_features_t mask)5393 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
5394 netdev_features_t mask)
5395 {
5396 return netdev_increment_features(features, NETIF_F_ALL_TSO |
5397 NETIF_F_ALL_FOR_ALL, mask);
5398 }
5399
5400 int __netdev_update_features(struct net_device *dev);
5401 void netdev_update_features(struct net_device *dev);
5402 void netdev_change_features(struct net_device *dev);
5403 void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
5404
5405 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5406 struct net_device *dev);
5407
5408 netdev_features_t passthru_features_check(struct sk_buff *skb,
5409 struct net_device *dev,
5410 netdev_features_t features);
5411 netdev_features_t netif_skb_features(struct sk_buff *skb);
5412 void skb_warn_bad_offload(const struct sk_buff *skb);
5413
net_gso_ok(netdev_features_t features,int gso_type)5414 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
5415 {
5416 netdev_features_t feature;
5417
5418 if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER))
5419 gso_type |= __SKB_GSO_TCP_FIXEDID;
5420
5421 feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
5422
5423 /* check flags correspondence */
5424 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
5425 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
5426 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
5427 BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
5428 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
5429 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
5430 BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
5431 BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
5432 BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
5433 BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
5434 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
5435 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
5436 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
5437 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
5438 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
5439 BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
5440 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
5441 BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
5442 BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
5443 BUILD_BUG_ON(SKB_GSO_TCP_ACCECN !=
5444 (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT));
5445
5446 return (features & feature) == feature;
5447 }
5448
skb_gso_ok(struct sk_buff * skb,netdev_features_t features)5449 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
5450 {
5451 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
5452 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
5453 }
5454
netif_needs_gso(struct sk_buff * skb,netdev_features_t features)5455 static inline bool netif_needs_gso(struct sk_buff *skb,
5456 netdev_features_t features)
5457 {
5458 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
5459 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
5460 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
5461 }
5462
5463 void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
5464 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
5465 void netif_inherit_tso_max(struct net_device *to,
5466 const struct net_device *from);
5467
5468 static inline unsigned int
netif_get_gro_max_size(const struct net_device * dev,const struct sk_buff * skb)5469 netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
5470 {
5471 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
5472 return skb->protocol == htons(ETH_P_IPV6) ?
5473 READ_ONCE(dev->gro_max_size) :
5474 READ_ONCE(dev->gro_ipv4_max_size);
5475 }
5476
5477 static inline unsigned int
netif_get_gso_max_size(const struct net_device * dev,const struct sk_buff * skb)5478 netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
5479 {
5480 /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
5481 return skb->protocol == htons(ETH_P_IPV6) ?
5482 READ_ONCE(dev->gso_max_size) :
5483 READ_ONCE(dev->gso_ipv4_max_size);
5484 }
5485
netif_is_macsec(const struct net_device * dev)5486 static inline bool netif_is_macsec(const struct net_device *dev)
5487 {
5488 return dev->priv_flags & IFF_MACSEC;
5489 }
5490
netif_is_macvlan(const struct net_device * dev)5491 static inline bool netif_is_macvlan(const struct net_device *dev)
5492 {
5493 return dev->priv_flags & IFF_MACVLAN;
5494 }
5495
netif_is_macvlan_port(const struct net_device * dev)5496 static inline bool netif_is_macvlan_port(const struct net_device *dev)
5497 {
5498 return dev->priv_flags & IFF_MACVLAN_PORT;
5499 }
5500
netif_is_bond_master(const struct net_device * dev)5501 static inline bool netif_is_bond_master(const struct net_device *dev)
5502 {
5503 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
5504 }
5505
netif_is_bond_slave(const struct net_device * dev)5506 static inline bool netif_is_bond_slave(const struct net_device *dev)
5507 {
5508 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
5509 }
5510
netif_supports_nofcs(struct net_device * dev)5511 static inline bool netif_supports_nofcs(struct net_device *dev)
5512 {
5513 return dev->priv_flags & IFF_SUPP_NOFCS;
5514 }
5515
netif_has_l3_rx_handler(const struct net_device * dev)5516 static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5517 {
5518 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5519 }
5520
netif_is_l3_master(const struct net_device * dev)5521 static inline bool netif_is_l3_master(const struct net_device *dev)
5522 {
5523 return dev->priv_flags & IFF_L3MDEV_MASTER;
5524 }
5525
netif_is_l3_slave(const struct net_device * dev)5526 static inline bool netif_is_l3_slave(const struct net_device *dev)
5527 {
5528 return dev->priv_flags & IFF_L3MDEV_SLAVE;
5529 }
5530
dev_sdif(const struct net_device * dev)5531 static inline int dev_sdif(const struct net_device *dev)
5532 {
5533 #ifdef CONFIG_NET_L3_MASTER_DEV
5534 if (netif_is_l3_slave(dev))
5535 return dev->ifindex;
5536 #endif
5537 return 0;
5538 }
5539
netif_is_bridge_master(const struct net_device * dev)5540 static inline bool netif_is_bridge_master(const struct net_device *dev)
5541 {
5542 return dev->priv_flags & IFF_EBRIDGE;
5543 }
5544
netif_is_bridge_port(const struct net_device * dev)5545 static inline bool netif_is_bridge_port(const struct net_device *dev)
5546 {
5547 return dev->priv_flags & IFF_BRIDGE_PORT;
5548 }
5549
netif_is_ovs_master(const struct net_device * dev)5550 static inline bool netif_is_ovs_master(const struct net_device *dev)
5551 {
5552 return dev->priv_flags & IFF_OPENVSWITCH;
5553 }
5554
netif_is_ovs_port(const struct net_device * dev)5555 static inline bool netif_is_ovs_port(const struct net_device *dev)
5556 {
5557 return dev->priv_flags & IFF_OVS_DATAPATH;
5558 }
5559
netif_is_any_bridge_master(const struct net_device * dev)5560 static inline bool netif_is_any_bridge_master(const struct net_device *dev)
5561 {
5562 return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
5563 }
5564
netif_is_any_bridge_port(const struct net_device * dev)5565 static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5566 {
5567 return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5568 }
5569
netif_is_team_master(const struct net_device * dev)5570 static inline bool netif_is_team_master(const struct net_device *dev)
5571 {
5572 return dev->priv_flags & IFF_TEAM;
5573 }
5574
netif_is_team_port(const struct net_device * dev)5575 static inline bool netif_is_team_port(const struct net_device *dev)
5576 {
5577 return dev->priv_flags & IFF_TEAM_PORT;
5578 }
5579
netif_is_lag_master(const struct net_device * dev)5580 static inline bool netif_is_lag_master(const struct net_device *dev)
5581 {
5582 return netif_is_bond_master(dev) || netif_is_team_master(dev);
5583 }
5584
netif_is_lag_port(const struct net_device * dev)5585 static inline bool netif_is_lag_port(const struct net_device *dev)
5586 {
5587 return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5588 }
5589
5590 bool netif_is_rxfh_configured(const struct net_device *dev);
5591
netif_is_failover(const struct net_device * dev)5592 static inline bool netif_is_failover(const struct net_device *dev)
5593 {
5594 return dev->priv_flags & IFF_FAILOVER;
5595 }
5596
netif_is_failover_slave(const struct net_device * dev)5597 static inline bool netif_is_failover_slave(const struct net_device *dev)
5598 {
5599 return dev->priv_flags & IFF_FAILOVER_SLAVE;
5600 }
5601
5602 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
netif_keep_dst(struct net_device * dev)5603 static inline void netif_keep_dst(struct net_device *dev)
5604 {
5605 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5606 }
5607
5608 /* return true if dev can't cope with mtu frames that need vlan tag insertion */
netif_reduces_vlan_mtu(struct net_device * dev)5609 static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5610 {
5611 /* TODO: reserve and use an additional IFF bit, if we get more users */
5612 return netif_is_macsec(dev);
5613 }
5614
5615 extern struct pernet_operations __net_initdata loopback_net_ops;
5616
5617 /* Logging, debugging and troubleshooting/diagnostic helpers. */
5618
5619 /* netdev_printk helpers, similar to dev_printk */
5620
netdev_name(const struct net_device * dev)5621 static inline const char *netdev_name(const struct net_device *dev)
5622 {
5623 if (!dev->name[0] || strchr(dev->name, '%'))
5624 return "(unnamed net_device)";
5625 return dev->name;
5626 }
5627
netdev_reg_state(const struct net_device * dev)5628 static inline const char *netdev_reg_state(const struct net_device *dev)
5629 {
5630 u8 reg_state = READ_ONCE(dev->reg_state);
5631
5632 switch (reg_state) {
5633 case NETREG_UNINITIALIZED: return " (uninitialized)";
5634 case NETREG_REGISTERED: return "";
5635 case NETREG_UNREGISTERING: return " (unregistering)";
5636 case NETREG_UNREGISTERED: return " (unregistered)";
5637 case NETREG_RELEASED: return " (released)";
5638 case NETREG_DUMMY: return " (dummy)";
5639 }
5640
5641 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
5642 return " (unknown)";
5643 }
5644
5645 #define MODULE_ALIAS_NETDEV(device) \
5646 MODULE_ALIAS("netdev-" device)
5647
5648 /*
5649 * netdev_WARN() acts like dev_printk(), but with the key difference
5650 * of using a WARN/WARN_ON to get the message out, including the
5651 * file/line information and a backtrace.
5652 */
5653 #define netdev_WARN(dev, format, args...) \
5654 WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
5655 netdev_reg_state(dev), ##args)
5656
5657 #define netdev_WARN_ONCE(dev, format, args...) \
5658 WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
5659 netdev_reg_state(dev), ##args)
5660
5661 /*
5662 * The list of packet types we will receive (as opposed to discard)
5663 * and the routines to invoke.
5664 *
5665 * Why 16. Because with 16 the only overlap we get on a hash of the
5666 * low nibble of the protocol value is RARP/SNAP/X.25.
5667 *
5668 * 0800 IP
5669 * 0001 802.3
5670 * 0002 AX.25
5671 * 0004 802.2
5672 * 8035 RARP
5673 * 0005 SNAP
5674 * 0805 X.25
5675 * 0806 ARP
5676 * 8137 IPX
5677 * 0009 Localtalk
5678 * 86DD IPv6
5679 */
5680 #define PTYPE_HASH_SIZE (16)
5681 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5682
5683 extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
5684
5685 extern struct net_device *blackhole_netdev;
5686
5687 /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
5688 #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
5689 #define DEV_STATS_ADD(DEV, FIELD, VAL) \
5690 atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
5691 #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
5692
5693 #endif /* _LINUX_NETDEVICE_H */
5694