Lines Matching full:napi

136 #include <trace/events/napi.h>
469 * (e.g. NAPI context).
797 struct napi_struct *napi; in napi_by_id() local
799 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) in napi_by_id()
800 if (napi->napi_id == napi_id) in napi_by_id()
801 return napi; in napi_by_id()
810 struct napi_struct *napi; in netdev_napi_by_id() local
812 napi = napi_by_id(napi_id); in netdev_napi_by_id()
813 if (!napi) in netdev_napi_by_id()
816 if (WARN_ON_ONCE(!napi->dev)) in netdev_napi_by_id()
818 if (!net_eq(net, dev_net(napi->dev))) in netdev_napi_by_id()
821 return napi; in netdev_napi_by_id()
825 * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
827 * @napi_id: ID of a NAPI of a target device
829 * Find a NAPI instance with @napi_id. Lock its device.
833 * Return: pointer to NAPI, its device with lock held, NULL if not found.
838 struct napi_struct *napi; in netdev_napi_by_id_lock() local
842 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
843 if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) { in netdev_napi_by_id_lock()
848 dev = napi->dev; in netdev_napi_by_id_lock()
857 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
858 if (napi && napi->dev != dev) in netdev_napi_by_id_lock()
859 napi = NULL; in netdev_napi_by_id_lock()
862 if (!napi) in netdev_napi_by_id_lock()
864 return napi; in netdev_napi_by_id_lock()
1034 * @napi_id: ID of the NAPI struct
1036 * Search for an interface by NAPI ID. Returns %NULL if the device
1043 struct napi_struct *napi; in dev_get_by_napi_id() local
1050 napi = napi_by_id(napi_id); in dev_get_by_napi_id()
1052 return napi ? napi->dev : NULL; in dev_get_by_napi_id()
1656 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", in napi_kthread_create()
4943 struct napi_struct *napi) in ____napi_schedule() argument
4949 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { in ____napi_schedule()
4953 * read on napi->thread. Only call in ____napi_schedule()
4956 thread = READ_ONCE(napi->thread); in ____napi_schedule()
4961 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in ____napi_schedule()
4968 DEBUG_NET_WARN_ON_ONCE(!list_empty(&napi->poll_list)); in ____napi_schedule()
4969 list_add_tail(&napi->poll_list, &sd->poll_list); in ____napi_schedule()
4970 WRITE_ONCE(napi->list_owner, smp_processor_id()); in ____napi_schedule()
5253 * - If this is our own queue, NAPI schedule our backlog.
5370 /* Schedule NAPI for backlog device. We can use in enqueue_to_backlog()
5722 * the upper (protocol) levels to process via the backlog NAPI device. It
5725 * The network buffer is passed via the backlog NAPI device. Modern NIC
5726 * driver should use NAPI and GRO.
6623 static int process_backlog(struct napi_struct *napi, int quota) in process_backlog() argument
6625 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6637 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog()
6660 * only current cpu owns and manipulates this napi, in process_backlog()
6666 napi->state &= NAPIF_STATE_THREADED; in process_backlog()
6700 * napi_schedule_prep - check if napi can be scheduled
6701 * @n: napi context
6703 * Test if NAPI routine is already running, and if not mark
6705 * insure only one NAPI poll instance runs. We also make
6706 * sure there is no pending NAPI disable.
6756 * 1) Don't let napi dequeue from the cpu poll list in napi_complete_done()
6778 * When the NAPI instance uses a timeout and keeps postponing in napi_complete_done()
6801 * because we will call napi->poll() one more time. in napi_complete_done()
6844 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) in __busy_poll_stop() argument
6847 gro_normal_list(&napi->gro); in __busy_poll_stop()
6848 __napi_schedule(napi); in __busy_poll_stop()
6853 gro_flush_normal(&napi->gro, HZ >= 1000); in __busy_poll_stop()
6855 clear_bit(NAPI_STATE_SCHED, &napi->state); in __busy_poll_stop()
6863 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, in busy_poll_stop() argument
6874 * Since we are about to call napi->poll() once more, we can safely in busy_poll_stop()
6880 clear_bit(NAPI_STATE_MISSED, &napi->state); in busy_poll_stop()
6881 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); in busy_poll_stop()
6887 napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); in busy_poll_stop()
6888 timeout = napi_get_gro_flush_timeout(napi); in busy_poll_stop()
6889 if (napi->defer_hard_irqs_count && timeout) { in busy_poll_stop()
6890 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); in busy_poll_stop()
6898 rc = napi->poll(napi, budget); in busy_poll_stop()
6899 /* We can't gro_normal_list() here, because napi->poll() might have in busy_poll_stop()
6900 * rearmed the napi (napi_complete_done()) in which case it could in busy_poll_stop()
6903 trace_napi_poll(napi, rc, budget); in busy_poll_stop()
6906 __busy_poll_stop(napi, skip_schedule); in busy_poll_stop()
6916 int (*napi_poll)(struct napi_struct *napi, int budget); in __napi_busy_loop()
6919 struct napi_struct *napi; in __napi_busy_loop() local
6926 napi = napi_by_id(napi_id); in __napi_busy_loop()
6927 if (!napi) in __napi_busy_loop()
6938 unsigned long val = READ_ONCE(napi->state); in __napi_busy_loop()
6940 /* If multiple threads are competing for this napi, in __napi_busy_loop()
6941 * we avoid dirtying napi->state as much as we can. in __napi_busy_loop()
6946 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6949 if (cmpxchg(&napi->state, val, in __napi_busy_loop()
6953 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6956 have_poll_lock = netpoll_poll_lock(napi); in __napi_busy_loop()
6957 napi_poll = napi->poll; in __napi_busy_loop()
6959 work = napi_poll(napi, budget); in __napi_busy_loop()
6960 trace_napi_poll(napi, work, budget); in __napi_busy_loop()
6961 gro_normal_list(&napi->gro); in __napi_busy_loop()
6964 __NET_ADD_STATS(dev_net(napi->dev), in __napi_busy_loop()
6977 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6990 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
7021 struct napi_struct *napi; in napi_suspend_irqs() local
7024 napi = napi_by_id(napi_id); in napi_suspend_irqs()
7025 if (napi) { in napi_suspend_irqs()
7026 unsigned long timeout = napi_get_irq_suspend_timeout(napi); in napi_suspend_irqs()
7029 hrtimer_start(&napi->timer, ns_to_ktime(timeout), in napi_suspend_irqs()
7037 struct napi_struct *napi; in napi_resume_irqs() local
7040 napi = napi_by_id(napi_id); in napi_resume_irqs()
7041 if (napi) { in napi_resume_irqs()
7047 if (napi_get_irq_suspend_timeout(napi)) { in napi_resume_irqs()
7049 napi_schedule(napi); in napi_resume_irqs()
7058 static void __napi_hash_add_with_id(struct napi_struct *napi, in __napi_hash_add_with_id() argument
7061 napi->gro.cached_napi_id = napi_id; in __napi_hash_add_with_id()
7063 WRITE_ONCE(napi->napi_id, napi_id); in __napi_hash_add_with_id()
7064 hlist_add_head_rcu(&napi->napi_hash_node, in __napi_hash_add_with_id()
7065 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); in __napi_hash_add_with_id()
7068 static void napi_hash_add_with_id(struct napi_struct *napi, in napi_hash_add_with_id() argument
7075 __napi_hash_add_with_id(napi, napi_id); in napi_hash_add_with_id()
7079 static void napi_hash_add(struct napi_struct *napi) in napi_hash_add() argument
7083 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) in napi_hash_add()
7094 __napi_hash_add_with_id(napi, napi_gen_id); in napi_hash_add()
7100 * is respected before freeing memory containing @napi
7102 static void napi_hash_del(struct napi_struct *napi) in napi_hash_del() argument
7108 hlist_del_init_rcu(&napi->napi_hash_node); in napi_hash_del()
7115 struct napi_struct *napi; in napi_watchdog() local
7117 napi = container_of(timer, struct napi_struct, timer); in napi_watchdog()
7122 if (!napi_disable_pending(napi) && in napi_watchdog()
7123 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { in napi_watchdog()
7124 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in napi_watchdog()
7125 __napi_schedule_irqoff(napi); in napi_watchdog()
7131 static void napi_stop_kthread(struct napi_struct *napi) in napi_stop_kthread() argument
7135 /* Wait until the napi STATE_THREADED is unset. */ in napi_stop_kthread()
7137 val = READ_ONCE(napi->state); in napi_stop_kthread()
7139 /* If napi kthread own this napi or the napi is idle, in napi_stop_kthread()
7151 if (try_cmpxchg(&napi->state, &val, new)) in napi_stop_kthread()
7159 if (!test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) in napi_stop_kthread()
7165 kthread_stop(napi->thread); in napi_stop_kthread()
7166 napi->thread = NULL; in napi_stop_kthread()
7169 static void napi_set_threaded_state(struct napi_struct *napi, in napi_set_threaded_state() argument
7175 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); in napi_set_threaded_state()
7176 assign_bit(NAPI_STATE_THREADED_BUSY_POLL, &napi->state, busy_poll); in napi_set_threaded_state()
7179 int napi_set_threaded(struct napi_struct *napi, in napi_set_threaded() argument
7183 if (!napi->thread) { in napi_set_threaded()
7184 int err = napi_kthread_create(napi); in napi_set_threaded()
7191 if (napi->config) in napi_set_threaded()
7192 napi->config->threaded = threaded; in napi_set_threaded()
7194 /* Setting/unsetting threaded mode on a napi might not immediately in napi_set_threaded()
7195 * take effect, if the current napi instance is actively being in napi_set_threaded()
7200 if (!threaded && napi->thread) { in napi_set_threaded()
7201 napi_stop_kthread(napi); in napi_set_threaded()
7205 napi_set_threaded_state(napi, threaded); in napi_set_threaded()
7214 struct napi_struct *napi; in netif_set_threaded() local
7220 list_for_each_entry(napi, &dev->napi_list, dev_list) { in netif_set_threaded()
7221 if (!napi->thread) { in netif_set_threaded()
7222 err = napi_kthread_create(napi); in netif_set_threaded()
7234 list_for_each_entry(napi, &dev->napi_list, dev_list) in netif_set_threaded()
7235 WARN_ON_ONCE(napi_set_threaded(napi, threaded)); in netif_set_threaded()
7248 * Enable threaded mode for the NAPI instances of the device. This may be useful
7249 * for devices where multiple NAPI instances get scheduled by a single
7250 * interrupt. Threaded NAPI allows moving the NAPI processing to cores other
7262 * netif_queue_set_napi - Associate queue with the napi
7263 * @dev: device to which NAPI and queue belong
7266 * @napi: NAPI context, pass NULL to clear previously set NAPI
7268 * Set queue with its corresponding napi context. This should be done after
7269 * registering the NAPI handler for the queue-vector and the queues have been
7273 enum netdev_queue_type type, struct napi_struct *napi) in netif_queue_set_napi() argument
7278 if (WARN_ON_ONCE(napi && !napi->dev)) in netif_queue_set_napi()
7285 rxq->napi = napi; in netif_queue_set_napi()
7289 txq->napi = napi; in netif_queue_set_napi()
7301 struct napi_struct *napi = in netif_napi_irq_notify() local
7304 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_irq_notify()
7308 if (napi->config && napi->dev->irq_affinity_auto) in netif_napi_irq_notify()
7309 cpumask_copy(&napi->config->affinity_mask, mask); in netif_napi_irq_notify()
7312 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_irq_notify()
7313 err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); in netif_napi_irq_notify()
7315 netdev_warn(napi->dev, "RMAP update failed (%d)\n", in netif_napi_irq_notify()
7324 struct napi_struct *napi = in netif_napi_affinity_release() local
7326 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_affinity_release()
7328 netdev_assert_locked(napi->dev); in netif_napi_affinity_release()
7330 &napi->state)); in netif_napi_affinity_release()
7332 if (!napi->dev->rx_cpu_rmap_auto) in netif_napi_affinity_release()
7334 rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_affinity_release()
7335 napi->napi_rmap_idx = -1; in netif_napi_affinity_release()
7397 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) in netif_napi_set_irq_locked() argument
7401 netdev_assert_locked_or_invisible(napi->dev); in netif_napi_set_irq_locked()
7403 if (napi->irq == irq) in netif_napi_set_irq_locked()
7407 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in netif_napi_set_irq_locked()
7408 irq_set_affinity_notifier(napi->irq, NULL); in netif_napi_set_irq_locked()
7410 napi->irq = irq; in netif_napi_set_irq_locked()
7412 (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) in netif_napi_set_irq_locked()
7416 if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) in netif_napi_set_irq_locked()
7420 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7421 rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); in netif_napi_set_irq_locked()
7425 cpu_rmap_get(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7426 napi->napi_rmap_idx = rc; in netif_napi_set_irq_locked()
7431 napi->notify.notify = netif_napi_irq_notify; in netif_napi_set_irq_locked()
7432 napi->notify.release = netif_napi_affinity_release; in netif_napi_set_irq_locked()
7433 rc = irq_set_affinity_notifier(irq, &napi->notify); in netif_napi_set_irq_locked()
7435 netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", in netif_napi_set_irq_locked()
7440 set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); in netif_napi_set_irq_locked()
7445 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7446 napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_set_irq_locked()
7447 cpu_rmap_put(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7448 napi->napi_rmap_idx = -1; in netif_napi_set_irq_locked()
7451 napi->notify.notify = NULL; in netif_napi_set_irq_locked()
7452 napi->notify.release = NULL; in netif_napi_set_irq_locked()
7466 /* a NAPI ID might be stored in the config, if so use it. if not, use in napi_restore_config()
7487 /* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
7491 netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) in netif_napi_dev_list_add() argument
7498 if (napi->config && napi->config->napi_id) in netif_napi_dev_list_add()
7499 new_id = napi->config->napi_id; in netif_napi_dev_list_add()
7514 list_add_rcu(&napi->dev_list, higher); /* adds after higher */ in netif_napi_dev_list_add()
7523 static void napi_get_frags_check(struct napi_struct *napi) in napi_get_frags_check() argument
7528 skb = napi_get_frags(napi); in napi_get_frags_check()
7530 napi_free_frags(napi); in napi_get_frags_check()
7535 struct napi_struct *napi, in netif_napi_add_weight_locked() argument
7540 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) in netif_napi_add_weight_locked()
7543 INIT_LIST_HEAD(&napi->poll_list); in netif_napi_add_weight_locked()
7544 INIT_HLIST_NODE(&napi->napi_hash_node); in netif_napi_add_weight_locked()
7545 hrtimer_setup(&napi->timer, napi_watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); in netif_napi_add_weight_locked()
7546 gro_init(&napi->gro); in netif_napi_add_weight_locked()
7547 napi->skb = NULL; in netif_napi_add_weight_locked()
7548 napi->poll = poll; in netif_napi_add_weight_locked()
7552 napi->weight = weight; in netif_napi_add_weight_locked()
7553 napi->dev = dev; in netif_napi_add_weight_locked()
7555 napi->poll_owner = -1; in netif_napi_add_weight_locked()
7557 napi->list_owner = -1; in netif_napi_add_weight_locked()
7558 set_bit(NAPI_STATE_SCHED, &napi->state); in netif_napi_add_weight_locked()
7559 set_bit(NAPI_STATE_NPSVC, &napi->state); in netif_napi_add_weight_locked()
7560 netif_napi_dev_list_add(dev, napi); in netif_napi_add_weight_locked()
7562 /* default settings from sysfs are applied to all NAPIs. any per-NAPI in netif_napi_add_weight_locked()
7565 napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); in netif_napi_add_weight_locked()
7566 napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); in netif_napi_add_weight_locked()
7568 napi_get_frags_check(napi); in netif_napi_add_weight_locked()
7569 /* Create kthread for this napi if dev->threaded is set. in netif_napi_add_weight_locked()
7573 if (napi_get_threaded_config(dev, napi)) in netif_napi_add_weight_locked()
7574 if (napi_kthread_create(napi)) in netif_napi_add_weight_locked()
7576 netif_napi_set_irq_locked(napi, -1); in netif_napi_add_weight_locked()
7614 * napi_disable() - prevent NAPI from scheduling
7615 * @n: NAPI context
7617 * Stop NAPI from being scheduled on this context.
7649 * napi_enable() - enable NAPI scheduling
7650 * @n: NAPI context
7652 * Enable scheduling of a NAPI instance.
7665 void __netif_napi_del_locked(struct napi_struct *napi) in __netif_napi_del_locked() argument
7667 netdev_assert_locked(napi->dev); in __netif_napi_del_locked()
7669 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) in __netif_napi_del_locked()
7672 /* Make sure NAPI is disabled (or was never enabled). */ in __netif_napi_del_locked()
7673 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); in __netif_napi_del_locked()
7675 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in __netif_napi_del_locked()
7676 irq_set_affinity_notifier(napi->irq, NULL); in __netif_napi_del_locked()
7678 if (napi->config) { in __netif_napi_del_locked()
7679 napi->index = -1; in __netif_napi_del_locked()
7680 napi->config = NULL; in __netif_napi_del_locked()
7683 list_del_rcu(&napi->dev_list); in __netif_napi_del_locked()
7684 napi_free_frags(napi); in __netif_napi_del_locked()
7686 gro_cleanup(&napi->gro); in __netif_napi_del_locked()
7688 if (napi->thread) { in __netif_napi_del_locked()
7689 kthread_stop(napi->thread); in __netif_napi_del_locked()
7690 napi->thread = NULL; in __netif_napi_del_locked()
7705 * accidentally calling ->poll() when NAPI is not scheduled. in __napi_poll()
7716 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", in __napi_poll()
7722 /* Drivers must not modify the NAPI state if they in __napi_poll()
7724 * still "owns" the NAPI instance and therefore can in __napi_poll()
7732 /* The NAPI context has more processing work, but busy-polling in __napi_poll()
7738 * that the NAPI is re-scheduled. in __napi_poll()
7752 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", in __napi_poll()
7777 pr_crit("repoll requested for device %s %ps but napi is not scheduled.\n", in napi_poll()
7787 static int napi_thread_wait(struct napi_struct *napi) in napi_thread_wait() argument
7793 * kthread owns this napi and could poll on this napi. in napi_thread_wait()
7797 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) { in napi_thread_wait()
7798 WARN_ON(!list_empty(&napi->poll_list)); in napi_thread_wait()
7811 static void napi_threaded_poll_loop(struct napi_struct *napi, in napi_threaded_poll_loop() argument
7828 have = netpoll_poll_lock(napi); in napi_threaded_poll_loop()
7829 __napi_poll(napi, &repoll); in napi_threaded_poll_loop()
7846 gro_flush_normal(&napi->gro, HZ >= 1000); in napi_threaded_poll_loop()
7865 struct napi_struct *napi = data; in napi_threaded_poll() local
7871 while (!napi_thread_wait(napi)) { in napi_threaded_poll()
7872 val = READ_ONCE(napi->state); in napi_threaded_poll()
7881 assign_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state, in napi_threaded_poll()
7884 napi_threaded_poll_loop(napi, want_busy_poll ? &last_qs : NULL); in napi_threaded_poll()
12727 /* Append NAPI poll list from offline CPU, with one exception : in dev_cpu_dead()
12732 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, in dev_cpu_dead() local
12736 list_del_init(&napi->poll_list); in dev_cpu_dead()
12737 if (napi->poll == process_backlog) in dev_cpu_dead()
12738 napi->state &= NAPIF_STATE_THREADED; in dev_cpu_dead()
12740 ____napi_schedule(sd, napi); in dev_cpu_dead()
13188 struct napi_struct *napi = &sd->backlog; in backlog_napi_should_run() local
13190 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in backlog_napi_should_run()
13203 struct napi_struct *napi = &sd->backlog; in backlog_napi_setup() local
13205 napi->thread = this_cpu_read(backlog_napi); in backlog_napi_setup()
13206 set_bit(NAPI_STATE_THREADED, &napi->state); in backlog_napi_setup()