Lines Matching +full:foo +full:- +full:queue
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
87 * - qdisc return codes
88 * - driver transmit return codes
89 * - errno values
93 * the driver transmit return codes though - when qdiscs are used, the actual
100 /* qdisc ->enqueue() return codes. */
110 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
130 * - successful transmission (rc == NETDEV_TX_OK) in dev_xmit_complete()
131 * - error while transmitting (rc < 0) in dev_xmit_complete()
132 * - error while queueing to a different device (rc & NET_XMIT_MASK) in dev_xmit_complete()
141 * Compute the worst-case header length according to the protocols
229 #define netdev_hw_addr_list_count(l) ((l)->count)
232 list_for_each_entry(ha, &(l)->list, list)
234 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
235 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
237 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
239 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
240 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
242 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
251 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
253 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
257 /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
259 * dev->hard_header_len ? (dev->hard_header_len +
260 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
266 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
268 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
299 * This structure holds boot-time configured netdevice settings. They
328 * to the per-CPU poll_list, and whoever clears that bit
356 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
383 * enum rx_handler_result - Possible return values for rx_handlers.
387 * case skb->dev was changed by rx_handler.
395 * to register a second rx_handler will return -EBUSY.
408 * If the rx_handler changed skb->dev, to divert the skb to another
414 * are registered on exact device (ptype->dev == skb->dev).
416 * If the rx_handler didn't change skb->dev, but wants the skb to be normally
437 return test_bit(NAPI_STATE_DISABLE, &n->state); in napi_disable_pending()
443 * napi_schedule - schedule NAPI poll
456 * napi_schedule_irqoff - schedule NAPI poll
467 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
479 * napi_complete - NAPI processing complete
492 * napi_disable - prevent NAPI from scheduling
501 * napi_enable - enable NAPI scheduling
509 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); in napi_enable()
511 clear_bit(NAPI_STATE_SCHED, &n->state); in napi_enable()
512 clear_bit(NAPI_STATE_NPSVC, &n->state); in napi_enable()
516 * napi_synchronize - wait until NAPI is not running
526 while (test_bit(NAPI_STATE_SCHED, &n->state)) in napi_synchronize()
533 * napi_if_scheduled_mark_missed - if napi is running, set the
545 val = READ_ONCE(n->state); in napi_if_scheduled_mark_missed()
553 } while (cmpxchg(&n->state, val, new) != val); in napi_if_scheduled_mark_missed()
575 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
578 * queue independently. The netif_xmit_*stopped functions below are called
579 * to check if the queue has been stopped by the driver or stack (either
586 * read-mostly part
599 * Number of TX timeouts for this queue
604 /* Subordinate device that the queue has been assigned to */
610 * write-mostly part
644 return q->numa_node; in netdev_queue_numa_node_read()
653 q->numa_node = node; in netdev_queue_numa_node_write()
671 * tail pointer for that CPU's input queue at the time of last enqueue, and
695 * Each entry is a 32bit value. Upper part is the high-order bits
698 * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
700 * meaning we use 32-6=26 bits for the hash.
718 unsigned int index = hash & table->mask; in rps_record_sock_flow()
724 if (table->ents[index] != val) in rps_record_sock_flow()
725 table->ents[index] = val; in rps_record_sock_flow()
735 /* This structure contains an instance of an RX queue. */
750 * RX queue sysfs structures and functions.
754 ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
755 ssize_t (*store)(struct netdev_rx_queue *queue,
772 - sizeof(struct xps_map)) / sizeof(u16))
828 return a->id_len == b->id_len && in netdev_phys_item_id_same()
829 memcmp(a->id, b->id, a->id_len) == 0; in netdev_phys_item_id_same()
980 * the queue before that can happen; it's for obsolete devices and weird
981 * corner cases, but the stack really does a non-trivial amount
997 * Called to decide which queue to use when device supports multiple
1033 * for dev->watchdog ticks.
1040 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1043 * (which should normally be dev->stats) and return a pointer to
1046 * 3. Update dev->stats asynchronously and atomically, and define
1067 * SR-IOV management functions.
1090 * tx queues stopped. This allows the netdevice to perform queue
1101 * so the underlying device can perform whatever needed clean-ups to
1128 * FC-GS Fabric Device Management Information(FDMI) specification.
1139 * Set hardware filter for RFS. rxq_index is the target queue index;
1159 * Adjusts the requested feature flags according to device-specific
1166 * Must return >0 or -errno if it changed dev->features itself.
1192 * Called to change device carrier. Soft-devices (like dummy, team, etc)
1196 * network cables) or protocol-dependent mechanisms (eg
1236 * Called when a user wants to set a max-rate limitation of specific
1237 * TX queue.
1265 * no frames were xmit'ed and core-caller will free all frames.
1269 * queue id bound to an AF_XDP socket. The flags field specifies if
1332 int queue, u8 *mac);
1334 int queue, u16 vlan,
1493 * enum net_device_priv_flags - &struct net_device priv_flags
1508 * release skb->dst
1510 * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1604 * struct net_device - The DEVICE structure.
1607 * data with strictly "high-level" data, and it has to know about
1627 * @ptype_all: Device-specific packet handlers for all protocols
1628 * @ptype_specific: Device-specific, protocol-specific packet handlers
1632 * @hw_features: User-changeable features
1634 * @wanted_features: User-requested features
1734 * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1758 * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1759 * indexed by RX queue number. Assigned by driver.
1768 * @tx_queue_len: Max frames per queue allowed
1770 * @xdp_bulkq: XDP device bulk queue
1797 * @ml_priv: Mid-layer private
1810 * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1829 * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1830 * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1836 * @wol_enabled: Wake-on-LAN is enabled
1838 * @net_notifier_list: List of per-net netdev notifier block
1850 * dev->addr_list_lock.
1947 /* Note : dev->mtu is often read without holding a lock.
1993 /* Protocol-specific pointers */
2074 /* These may be needed for future network-power-down code. */
2109 /* mid-layer private */
2172 if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) in netif_elide_gro()
2182 return dev->prio_tc_map[prio & TC_BITMASK]; in netdev_get_prio_tc_map()
2188 if (tc >= dev->num_tc) in netdev_set_prio_tc_map()
2189 return -EINVAL; in netdev_set_prio_tc_map()
2191 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; in netdev_set_prio_tc_map()
2203 return dev->num_tc; in netdev_get_num_tc()
2230 return max_t(int, -dev->num_tc, 0); in netdev_get_sb_channel()
2237 return &dev->_tx[index]; in netdev_get_tx_queue()
2254 for (i = 0; i < dev->num_tx_queues; i++) in netdev_for_each_tx_queue()
2255 f(dev, &dev->_tx[i], arg); in netdev_for_each_tx_queue()
2266 (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2267 (dev)->qdisc_running_key = &qdisc_running_key; \
2268 lockdep_set_class(&(dev)->addr_list_lock, \
2270 for (i = 0; i < (dev)->num_tx_queues; i++) \
2271 lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2286 return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; in netdev_get_fwd_headroom()
2291 if (dev->netdev_ops->ndo_set_rx_headroom) in netdev_set_rx_headroom()
2292 dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); in netdev_set_rx_headroom()
2298 netdev_set_rx_headroom(dev, -1); in netdev_reset_rx_headroom()
2307 return read_pnet(&dev->nd_net); in dev_net()
2313 write_pnet(&dev->nd_net, net); in dev_net_set()
2317 * netdev_priv - access network device private data
2330 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2333 * fine-grained identification of different network device types. For
2336 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2344 * netif_napi_add - initialize a NAPI context
2351 * *any* of the other NAPI-related functions.
2357 * netif_tx_napi_add - initialize a NAPI context
2364 * to exclusively poll a TX queue.
2372 set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); in netif_tx_napi_add()
2377 * __netif_napi_del - remove a NAPI context
2387 * netif_napi_del - remove a NAPI context
2399 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2405 /* This indicates where we are processing relative to skb->data. */
2408 /* This is non-zero if the packet cannot be merged with the new skb. */
2423 /* Used in ipv6_gro_receive() and foo-over-udp */
2426 /* This is non-zero if the packet may be of the same flow. */
2443 /* Used in foo-over-udp, set in udp[46]_gro_receive */
2465 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2470 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; in gro_recursion_inc_test()
2479 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive()
2494 NAPI_GRO_CB(skb)->flush |= 1; in call_gro_receive_sk()
2533 /* often modified stats are per-CPU, other are shared (netdev->stats) */
2552 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); in dev_sw_netstats_rx_add()
2554 u64_stats_update_begin(&tstats->syncp); in dev_sw_netstats_rx_add()
2555 tstats->rx_bytes += len; in dev_sw_netstats_rx_add()
2556 tstats->rx_packets++; in dev_sw_netstats_rx_add()
2557 u64_stats_update_end(&tstats->syncp); in dev_sw_netstats_rx_add()
2562 struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); in dev_lstats_add()
2564 u64_stats_update_begin(&lstats->syncp); in dev_lstats_add()
2565 u64_stats_add(&lstats->bytes, len); in dev_lstats_add()
2566 u64_stats_inc(&lstats->packets); in dev_lstats_add()
2567 u64_stats_update_end(&lstats->syncp); in dev_lstats_add()
2578 u64_stats_init(&stat->syncp); \
2627 - we can use this eg to kick tcp sessions
2713 info->dev = dev; in netdev_notifier_info_init()
2714 info->extack = NULL; in netdev_notifier_info_init()
2720 return info->dev; in netdev_notifier_info_to_dev()
2726 return info->extack; in netdev_notifier_info_to_extack()
2735 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2737 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2739 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2741 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2743 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2745 list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2748 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2760 lh = dev->dev_list.next; in next_net_device()
2761 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device()
2770 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); in next_net_device_rcu()
2771 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in next_net_device_rcu()
2776 return list_empty(&net->dev_base_head) ? NULL : in first_net_device()
2777 net_device_entry(net->dev_base_head.next); in first_net_device()
2782 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); in first_net_device_rcu()
2784 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); in first_net_device_rcu()
2858 return NAPI_GRO_CB(skb)->data_offset; in skb_gro_offset()
2863 return skb->len - NAPI_GRO_CB(skb)->data_offset; in skb_gro_len()
2868 NAPI_GRO_CB(skb)->data_offset += len; in skb_gro_pull()
2874 return NAPI_GRO_CB(skb)->frag0 + offset; in skb_gro_header_fast()
2879 return NAPI_GRO_CB(skb)->frag0_len < hlen; in skb_gro_header_hard()
2884 NAPI_GRO_CB(skb)->frag0 = NULL; in skb_gro_frag0_invalidate()
2885 NAPI_GRO_CB(skb)->frag0_len = 0; in skb_gro_frag0_invalidate()
2895 return skb->data + offset; in skb_gro_header_slow()
2900 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + in skb_gro_network_header()
2907 if (NAPI_GRO_CB(skb)->csum_valid) in skb_gro_postpull_rcsum()
2908 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, in skb_gro_postpull_rcsum()
2921 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); in skb_at_gro_remcsum_start()
2928 return ((skb->ip_summed != CHECKSUM_PARTIAL || in __skb_gro_checksum_validate_needed()
2932 NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_validate_needed()
2939 if (NAPI_GRO_CB(skb)->csum_valid && in __skb_gro_checksum_validate_complete()
2940 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) in __skb_gro_checksum_validate_complete()
2943 NAPI_GRO_CB(skb)->csum = psum; in __skb_gro_checksum_validate_complete()
2950 if (NAPI_GRO_CB(skb)->csum_cnt > 0) { in skb_gro_incr_csum_unnecessary()
2952 NAPI_GRO_CB(skb)->csum_cnt--; in skb_gro_incr_csum_unnecessary()
2986 return (NAPI_GRO_CB(skb)->csum_cnt == 0 && in __skb_gro_checksum_convert_check()
2987 !NAPI_GRO_CB(skb)->csum_valid); in __skb_gro_checksum_convert_check()
2993 NAPI_GRO_CB(skb)->csum = ~pseudo; in __skb_gro_checksum_convert()
2994 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_convert()
3011 grc->offset = 0; in skb_gro_remcsum_init()
3012 grc->delta = 0; in skb_gro_remcsum_init()
3024 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); in skb_gro_remcsum_process()
3027 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; in skb_gro_remcsum_process()
3038 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, in skb_gro_remcsum_process()
3041 /* Adjust skb->csum since we changed the packet */ in skb_gro_remcsum_process()
3042 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); in skb_gro_remcsum_process()
3044 grc->offset = off + hdrlen + offset; in skb_gro_remcsum_process()
3045 grc->delta = delta; in skb_gro_remcsum_process()
3054 size_t plen = grc->offset + sizeof(u16); in skb_gro_remcsum_cleanup()
3056 if (!grc->delta) in skb_gro_remcsum_cleanup()
3059 ptr = skb_gro_header_fast(skb, grc->offset); in skb_gro_remcsum_cleanup()
3060 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { in skb_gro_remcsum_cleanup()
3061 ptr = skb_gro_header_slow(skb, plen, grc->offset); in skb_gro_remcsum_cleanup()
3066 remcsum_unadjust((__sum16 *)ptr, grc->delta); in skb_gro_remcsum_cleanup()
3072 if (PTR_ERR(pp) != -EINPROGRESS) in skb_gro_flush_final()
3073 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
3080 if (PTR_ERR(pp) != -EINPROGRESS) { in skb_gro_flush_final_remcsum()
3081 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
3083 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
3089 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final()
3096 NAPI_GRO_CB(skb)->flush |= flush; in skb_gro_flush_final_remcsum()
3098 skb->remcsum_offload = 0; in skb_gro_flush_final_remcsum()
3107 if (!dev->header_ops || !dev->header_ops->create) in dev_hard_header()
3110 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); in dev_hard_header()
3116 const struct net_device *dev = skb->dev; in dev_parse_header()
3118 if (!dev->header_ops || !dev->header_ops->parse) in dev_parse_header()
3120 return dev->header_ops->parse(skb, haddr); in dev_parse_header()
3125 const struct net_device *dev = skb->dev; in dev_parse_header_protocol()
3127 if (!dev->header_ops || !dev->header_ops->parse_protocol) in dev_parse_header_protocol()
3129 return dev->header_ops->parse_protocol(skb); in dev_parse_header_protocol()
3136 if (likely(len >= dev->hard_header_len)) in dev_validate_header()
3138 if (len < dev->min_header_len) in dev_validate_header()
3142 memset(ll_header + len, 0, dev->hard_header_len - len); in dev_validate_header()
3146 if (dev->header_ops && dev->header_ops->validate) in dev_validate_header()
3147 return dev->header_ops->validate(ll_header, len); in dev_validate_header()
3154 return dev->header_ops && dev->header_ops->create; in dev_has_header()
3179 * Incoming packets are placed on per-CPU queues
3227 sd->input_queue_head++; in input_queue_head_incr()
3235 *qtail = ++sd->input_queue_tail; in input_queue_tail_incr_save()
3270 for (i = 0; i < dev->num_tx_queues; i++) in netif_tx_schedule_all()
3276 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_start_queue()
3280 * netif_start_queue - allow transmit
3294 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_start_all_queues()
3303 * netif_wake_queue - restart transmit
3318 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_wake_all_queues()
3326 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_stop_queue()
3330 * netif_stop_queue - stop transmitted packets
3345 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); in netif_tx_queue_stopped()
3349 * netif_queue_stopped - test if transmit queue is flowblocked
3352 * Test if transmit queue on device is currently unable to send.
3361 return dev_queue->state & QUEUE_STATE_ANY_XOFF; in netif_xmit_stopped()
3367 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; in netif_xmit_frozen_or_stopped()
3373 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; in netif_xmit_frozen_or_drv_stopped()
3377 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3378 * @dev_queue: pointer to transmit queue
3386 prefetchw(&dev_queue->dql.num_queued); in netdev_txq_bql_enqueue_prefetchw()
3391 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3392 * @dev_queue: pointer to transmit queue
3400 prefetchw(&dev_queue->dql.limit); in netdev_txq_bql_complete_prefetchw()
3408 dql_queued(&dev_queue->dql, bytes); in netdev_tx_sent_queue()
3410 if (likely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3413 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3423 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) in netdev_tx_sent_queue()
3424 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); in netdev_tx_sent_queue()
3440 dql_queued(&dev_queue->dql, bytes); in __netdev_tx_sent_queue()
3449 * netdev_sent_queue - report the number of bytes queued to hardware
3451 * @bytes: number of bytes queued to the hardware device queue
3454 * device hardware queue. @bytes should be a good approximation and should
3477 dql_completed(&dev_queue->dql, bytes); in netdev_tx_completed_queue()
3481 * netdev_tx_sent_queue will miss the update and cause the queue to in netdev_tx_completed_queue()
3486 if (unlikely(dql_avail(&dev_queue->dql) < 0)) in netdev_tx_completed_queue()
3489 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) in netdev_tx_completed_queue()
3495 * netdev_completed_queue - report bytes and packets completed by device
3501 * hardware queue over the physical medium, @bytes must exactly match the
3513 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); in netdev_tx_reset_queue()
3514 dql_reset(&q->dql); in netdev_tx_reset_queue()
3519 * netdev_reset_queue - reset the packets and bytes count of a network device
3531 * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3533 * @queue_index: given tx queue index
3535 * Returns 0 if given tx queue index >= number of device tx queues,
3536 * otherwise returns the originally passed tx queue index.
3540 if (unlikely(queue_index >= dev->real_num_tx_queues)) { in netdev_cap_txqueue()
3541 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", in netdev_cap_txqueue()
3542 dev->name, queue_index, in netdev_cap_txqueue()
3543 dev->real_num_tx_queues); in netdev_cap_txqueue()
3551 * netif_running - test if up
3558 return test_bit(__LINK_STATE_START, &dev->state); in netif_running()
3569 * netif_start_subqueue - allow sending packets on subqueue
3571 * @queue_index: sub queue index
3573 * Start individual transmit queue of a device with multiple transmit queues.
3583 * netif_stop_subqueue - stop sending packets on subqueue
3585 * @queue_index: sub queue index
3587 * Stop individual transmit queue of a device with multiple transmit queues.
3596 * netif_subqueue_stopped - test status of subqueue
3598 * @queue_index: sub queue index
3600 * Check individual transmit queue of a device with multiple transmit queues.
3617 * netif_wake_subqueue - allow sending packets on subqueue
3619 * @queue_index: sub queue index
3621 * Resume individual transmit queue of a device with multiple transmit queues.
3637 * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3638 * @j: CPU/Rx queue index
3642 * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3653 * netif_attr_test_online - Test for online CPU/Rx queue
3654 * @j: CPU/Rx queue index
3658 * Returns true if a CPU/Rx queue is online.
3673 * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3674 * @n: CPU/Rx queue index
3675 * @srcp: the cpumask/Rx queue mask pointer
3683 /* -1 is a legal arg here. */ in netif_attrmask_next()
3684 if (n != -1) in netif_attrmask_next()
3694 * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3695 * @n: CPU/Rx queue index
3706 /* -1 is a legal arg here. */ in netif_attrmask_next_and()
3707 if (n != -1) in netif_attrmask_next_and()
3736 * netif_is_multiqueue - test if device has multiple transmit queues
3743 return dev->num_tx_queues > 1; in netif_is_multiqueue()
3754 dev->real_num_rx_queues = rxqs; in netif_set_real_num_rx_queues()
3762 return dev->_rx + rxq; in __netif_get_rx_queue()
3767 struct netdev_rx_queue *queue) in get_netdev_rx_queue_index() argument
3769 struct net_device *dev = queue->dev; in get_netdev_rx_queue_index()
3770 int index = queue - dev->_rx; in get_netdev_rx_queue_index()
3772 BUG_ON(index >= dev->num_rx_queues); in get_netdev_rx_queue_index()
3844 kfree_skb(napi->skb); in napi_free_frags()
3845 napi->skb = NULL; in napi_free_frags()
3916 atomic_long_inc(&dev->rx_dropped); in ____dev_forward_skb()
3922 skb->priority = 0; in ____dev_forward_skb()
3936 * dev_put - release reference to device
3943 this_cpu_dec(*dev->pcpu_refcnt); in dev_put()
3947 * dev_hold - get reference to device
3954 this_cpu_inc(*dev->pcpu_refcnt); in dev_hold()
3971 * netif_carrier_ok - test if carrier present
3978 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); in netif_carrier_ok()
3990 * netif_dormant_on - mark device as dormant.
3997 * in a "pending" state, waiting for some external event. For "on-
4003 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_on()
4008 * netif_dormant_off - set device as not dormant.
4015 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) in netif_dormant_off()
4020 * netif_dormant - test if device is dormant
4027 return test_bit(__LINK_STATE_DORMANT, &dev->state); in netif_dormant()
4032 * netif_testing_on - mark device as under test.
4043 if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_on()
4048 * netif_testing_off - set device as not under test.
4055 if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) in netif_testing_off()
4060 * netif_testing - test if device is under test
4067 return test_bit(__LINK_STATE_TESTING, &dev->state); in netif_testing()
4072 * netif_oper_up - test if device is operational
4079 return (dev->operstate == IF_OPER_UP || in netif_oper_up()
4080 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); in netif_oper_up()
4084 * netif_device_present - is device available or removed
4091 return test_bit(__LINK_STATE_PRESENT, &dev->state); in netif_device_present()
4146 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4147 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4148 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4149 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4150 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4151 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4152 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4153 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4154 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4155 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4156 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4157 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4158 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4159 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4160 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4170 return (1U << debug_value) - 1; in netif_msg_init()
4175 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
4176 txq->xmit_lock_owner = cpu; in __netif_tx_lock()
4181 __acquire(&txq->_xmit_lock); in __netif_tx_acquire()
4187 __release(&txq->_xmit_lock); in __netif_tx_release()
4192 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
4193 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_lock_bh()
4198 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
4200 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_trylock()
4206 txq->xmit_lock_owner = -1; in __netif_tx_unlock()
4207 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
4212 txq->xmit_lock_owner = -1; in __netif_tx_unlock_bh()
4213 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
4218 if (txq->xmit_lock_owner != -1) in txq_trans_update()
4219 txq->trans_start = jiffies; in txq_trans_update()
4222 /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
4227 if (txq->trans_start != jiffies) in netif_trans_update()
4228 txq->trans_start = jiffies; in netif_trans_update()
4232 * netif_tx_lock - grab network device transmit lock
4242 spin_lock(&dev->tx_global_lock); in netif_tx_lock()
4244 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_lock()
4250 * the ->hard_start_xmit() handler and already in netif_tx_lock()
4254 set_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_lock()
4269 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_unlock()
4273 * queue is not stopped for another reason, we in netif_tx_unlock()
4276 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_unlock()
4279 spin_unlock(&dev->tx_global_lock); in netif_tx_unlock()
4289 if ((dev->features & NETIF_F_LLTX) == 0) { \
4297 (((dev->features & NETIF_F_LLTX) == 0) ? \
4302 if ((dev->features & NETIF_F_LLTX) == 0) { \
4316 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_disable()
4331 nest_level = dev->nested_level; in netif_addr_lock()
4333 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock()
4341 nest_level = dev->nested_level; in netif_addr_lock_bh()
4344 spin_lock_nested(&dev->addr_list_lock, nest_level); in netif_addr_lock_bh()
4349 spin_unlock(&dev->addr_list_lock); in netif_addr_unlock()
4354 spin_unlock_bh(&dev->addr_list_lock); in netif_addr_unlock_bh()
4362 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4368 /* Support for loadable net-drivers */
4430 * __dev_uc_sync - Synchonize device's unicast list
4444 return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); in __dev_uc_sync()
4448 * __dev_uc_unsync - Remove synchronized addresses from device
4458 __hw_addr_unsync_dev(&dev->uc, dev, unsync); in __dev_uc_unsync()
4474 * __dev_mc_sync - Synchonize device's multicast list
4488 return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); in __dev_mc_sync()
4492 * __dev_mc_unsync - Remove synchronized addresses from device
4502 __hw_addr_unsync_dev(&dev->mc, dev, unsync); in __dev_mc_unsync()
4558 if (list_empty(&dev->unlink_list)) in net_unlink_todo()
4559 list_add_tail(&dev->unlink_list, &net_unlink_list); in net_unlink_todo()
4565 for (iter = &(dev)->adj_list.upper, \
4586 for (iter = (dev)->adj_list.lower.next, \
4592 for (iter = &(dev)->adj_list.lower, \
4601 for (iter = (dev)->adj_list.lower.next, \
4735 return ops->ndo_start_xmit(skb, dev); in __netdev_start_xmit()
4746 const struct net_device_ops *ops = dev->netdev_ops; in netdev_start_xmit()
4783 return (dev->features & ~dev->hw_features) | dev->wanted_features; in netdev_get_wanted_features()
4840 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && in skb_gso_ok()
4848 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && in netif_needs_gso()
4849 (skb->ip_summed != CHECKSUM_UNNECESSARY))); in netif_needs_gso()
4855 dev->gso_max_size = size; in netif_set_gso_max_size()
4862 skb->protocol = protocol; in skb_gso_error_unwind()
4863 skb->encapsulation = 1; in skb_gso_error_unwind()
4866 skb->mac_header = mac_offset; in skb_gso_error_unwind()
4867 skb->network_header = skb->mac_header + mac_len; in skb_gso_error_unwind()
4868 skb->mac_len = mac_len; in skb_gso_error_unwind()
4873 return dev->priv_flags & IFF_MACSEC; in netif_is_macsec()
4878 return dev->priv_flags & IFF_MACVLAN; in netif_is_macvlan()
4883 return dev->priv_flags & IFF_MACVLAN_PORT; in netif_is_macvlan_port()
4888 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; in netif_is_bond_master()
4893 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; in netif_is_bond_slave()
4898 return dev->priv_flags & IFF_SUPP_NOFCS; in netif_supports_nofcs()
4903 return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; in netif_has_l3_rx_handler()
4908 return dev->priv_flags & IFF_L3MDEV_MASTER; in netif_is_l3_master()
4913 return dev->priv_flags & IFF_L3MDEV_SLAVE; in netif_is_l3_slave()
4918 return dev->priv_flags & IFF_EBRIDGE; in netif_is_bridge_master()
4923 return dev->priv_flags & IFF_BRIDGE_PORT; in netif_is_bridge_port()
4928 return dev->priv_flags & IFF_OPENVSWITCH; in netif_is_ovs_master()
4933 return dev->priv_flags & IFF_OVS_DATAPATH; in netif_is_ovs_port()
4943 return dev->priv_flags & IFF_TEAM; in netif_is_team_master()
4948 return dev->priv_flags & IFF_TEAM_PORT; in netif_is_team_port()
4963 return dev->priv_flags & IFF_RXFH_CONFIGURED; in netif_is_rxfh_configured()
4968 return dev->priv_flags & IFF_FAILOVER; in netif_is_failover()
4973 return dev->priv_flags & IFF_FAILOVER_SLAVE; in netif_is_failover_slave()
4979 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); in netif_keep_dst()
4986 return dev->priv_flags & IFF_MACSEC; in netif_reduces_vlan_mtu()
4997 if (!dev->name[0] || strchr(dev->name, '%')) in netdev_name()
4999 return dev->name; in netdev_name()
5004 return dev->reg_state == NETREG_UNREGISTERING; in netdev_unregistering()
5009 switch (dev->reg_state) { in netdev_reg_state()
5018 WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); in netdev_reg_state()
5066 MODULE_ALIAS("netdev-" device)
5198 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)