Lines Matching full:napi
136 #include <trace/events/napi.h>
463 * (e.g. NAPI context).
776 struct napi_struct *napi; in napi_by_id() local
778 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node) in napi_by_id()
779 if (napi->napi_id == napi_id) in napi_by_id()
780 return napi; in napi_by_id()
789 struct napi_struct *napi; in netdev_napi_by_id() local
791 napi = napi_by_id(napi_id); in netdev_napi_by_id()
792 if (!napi) in netdev_napi_by_id()
795 if (WARN_ON_ONCE(!napi->dev)) in netdev_napi_by_id()
797 if (!net_eq(net, dev_net(napi->dev))) in netdev_napi_by_id()
800 return napi; in netdev_napi_by_id()
804 * netdev_napi_by_id_lock() - find a device by NAPI ID and lock it
806 * @napi_id: ID of a NAPI of a target device
808 * Find a NAPI instance with @napi_id. Lock its device.
812 * Return: pointer to NAPI, its device with lock held, NULL if not found.
817 struct napi_struct *napi; in netdev_napi_by_id_lock() local
821 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
822 if (!napi || READ_ONCE(napi->dev->reg_state) != NETREG_REGISTERED) { in netdev_napi_by_id_lock()
827 dev = napi->dev; in netdev_napi_by_id_lock()
836 napi = netdev_napi_by_id(net, napi_id); in netdev_napi_by_id_lock()
837 if (napi && napi->dev != dev) in netdev_napi_by_id_lock()
838 napi = NULL; in netdev_napi_by_id_lock()
841 if (!napi) in netdev_napi_by_id_lock()
843 return napi; in netdev_napi_by_id_lock()
1013 * @napi_id: ID of the NAPI struct
1015 * Search for an interface by NAPI ID. Returns %NULL if the device
1022 struct napi_struct *napi; in dev_get_by_napi_id() local
1029 napi = napi_by_id(napi_id); in dev_get_by_napi_id()
1031 return napi ? napi->dev : NULL; in dev_get_by_napi_id()
1583 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", in napi_kthread_create()
4709 struct napi_struct *napi) in ____napi_schedule() argument
4715 if (test_bit(NAPI_STATE_THREADED, &napi->state)) { in ____napi_schedule()
4719 * read on napi->thread. Only call in ____napi_schedule()
4722 thread = READ_ONCE(napi->thread); in ____napi_schedule()
4727 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in ____napi_schedule()
4734 list_add_tail(&napi->poll_list, &sd->poll_list); in ____napi_schedule()
4735 WRITE_ONCE(napi->list_owner, smp_processor_id()); in ____napi_schedule()
4970 * - If this is our own queue, NAPI schedule our backlog.
5084 /* Schedule NAPI for backlog device. We can use in enqueue_to_backlog()
5432 * the upper (protocol) levels to process via the backlog NAPI device. It
5435 * The network buffer is passed via the backlog NAPI device. Modern NIC
5436 * driver should use NAPI and GRO.
6330 static int process_backlog(struct napi_struct *napi, int quota) in process_backlog() argument
6332 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
6344 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog()
6367 * only current cpu owns and manipulates this napi, in process_backlog()
6373 napi->state &= NAPIF_STATE_THREADED; in process_backlog()
6407 * napi_schedule_prep - check if napi can be scheduled
6408 * @n: napi context
6410 * Test if NAPI routine is already running, and if not mark
6412 * insure only one NAPI poll instance runs. We also make
6413 * sure there is no pending NAPI disable.
6463 * 1) Don't let napi dequeue from the cpu poll list in napi_complete_done()
6485 * When the NAPI instance uses a timeout and keeps postponing in napi_complete_done()
6509 * because we will call napi->poll() one more time. in napi_complete_done()
6551 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) in __busy_poll_stop() argument
6554 gro_normal_list(&napi->gro); in __busy_poll_stop()
6555 __napi_schedule(napi); in __busy_poll_stop()
6560 gro_flush(&napi->gro, HZ >= 1000); in __busy_poll_stop()
6561 gro_normal_list(&napi->gro); in __busy_poll_stop()
6563 clear_bit(NAPI_STATE_SCHED, &napi->state); in __busy_poll_stop()
6571 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, in busy_poll_stop() argument
6582 * Since we are about to call napi->poll() once more, we can safely in busy_poll_stop()
6588 clear_bit(NAPI_STATE_MISSED, &napi->state); in busy_poll_stop()
6589 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); in busy_poll_stop()
6595 napi->defer_hard_irqs_count = napi_get_defer_hard_irqs(napi); in busy_poll_stop()
6596 timeout = napi_get_gro_flush_timeout(napi); in busy_poll_stop()
6597 if (napi->defer_hard_irqs_count && timeout) { in busy_poll_stop()
6598 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); in busy_poll_stop()
6606 rc = napi->poll(napi, budget); in busy_poll_stop()
6607 /* We can't gro_normal_list() here, because napi->poll() might have in busy_poll_stop()
6608 * rearmed the napi (napi_complete_done()) in which case it could in busy_poll_stop()
6611 trace_napi_poll(napi, rc, budget); in busy_poll_stop()
6614 __busy_poll_stop(napi, skip_schedule); in busy_poll_stop()
6624 int (*napi_poll)(struct napi_struct *napi, int budget); in __napi_busy_loop()
6627 struct napi_struct *napi; in __napi_busy_loop() local
6634 napi = napi_by_id(napi_id); in __napi_busy_loop()
6635 if (!napi) in __napi_busy_loop()
6646 unsigned long val = READ_ONCE(napi->state); in __napi_busy_loop()
6648 /* If multiple threads are competing for this napi, in __napi_busy_loop()
6649 * we avoid dirtying napi->state as much as we can. in __napi_busy_loop()
6654 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6657 if (cmpxchg(&napi->state, val, in __napi_busy_loop()
6661 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in __napi_busy_loop()
6664 have_poll_lock = netpoll_poll_lock(napi); in __napi_busy_loop()
6665 napi_poll = napi->poll; in __napi_busy_loop()
6667 work = napi_poll(napi, budget); in __napi_busy_loop()
6668 trace_napi_poll(napi, work, budget); in __napi_busy_loop()
6669 gro_normal_list(&napi->gro); in __napi_busy_loop()
6672 __NET_ADD_STATS(dev_net(napi->dev), in __napi_busy_loop()
6685 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6698 busy_poll_stop(napi, have_poll_lock, flags, budget); in __napi_busy_loop()
6729 struct napi_struct *napi; in napi_suspend_irqs() local
6732 napi = napi_by_id(napi_id); in napi_suspend_irqs()
6733 if (napi) { in napi_suspend_irqs()
6734 unsigned long timeout = napi_get_irq_suspend_timeout(napi); in napi_suspend_irqs()
6737 hrtimer_start(&napi->timer, ns_to_ktime(timeout), in napi_suspend_irqs()
6745 struct napi_struct *napi; in napi_resume_irqs() local
6748 napi = napi_by_id(napi_id); in napi_resume_irqs()
6749 if (napi) { in napi_resume_irqs()
6755 if (napi_get_irq_suspend_timeout(napi)) { in napi_resume_irqs()
6757 napi_schedule(napi); in napi_resume_irqs()
6766 static void __napi_hash_add_with_id(struct napi_struct *napi, in __napi_hash_add_with_id() argument
6769 napi->gro.cached_napi_id = napi_id; in __napi_hash_add_with_id()
6771 WRITE_ONCE(napi->napi_id, napi_id); in __napi_hash_add_with_id()
6772 hlist_add_head_rcu(&napi->napi_hash_node, in __napi_hash_add_with_id()
6773 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); in __napi_hash_add_with_id()
6776 static void napi_hash_add_with_id(struct napi_struct *napi, in napi_hash_add_with_id() argument
6783 __napi_hash_add_with_id(napi, napi_id); in napi_hash_add_with_id()
6787 static void napi_hash_add(struct napi_struct *napi) in napi_hash_add() argument
6791 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state)) in napi_hash_add()
6802 __napi_hash_add_with_id(napi, napi_gen_id); in napi_hash_add()
6808 * is respected before freeing memory containing @napi
6810 static void napi_hash_del(struct napi_struct *napi) in napi_hash_del() argument
6816 hlist_del_init_rcu(&napi->napi_hash_node); in napi_hash_del()
6823 struct napi_struct *napi; in napi_watchdog() local
6825 napi = container_of(timer, struct napi_struct, timer); in napi_watchdog()
6830 if (!napi_disable_pending(napi) && in napi_watchdog()
6831 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) { in napi_watchdog()
6832 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state); in napi_watchdog()
6833 __napi_schedule_irqoff(napi); in napi_watchdog()
6841 struct napi_struct *napi; in dev_set_threaded() local
6850 list_for_each_entry(napi, &dev->napi_list, dev_list) { in dev_set_threaded()
6851 if (!napi->thread) { in dev_set_threaded()
6852 err = napi_kthread_create(napi); in dev_set_threaded()
6868 /* Setting/unsetting threaded mode on a napi might not immediately in dev_set_threaded()
6869 * take effect, if the current napi instance is actively being in dev_set_threaded()
6874 list_for_each_entry(napi, &dev->napi_list, dev_list) in dev_set_threaded()
6875 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); in dev_set_threaded()
6882 * netif_queue_set_napi - Associate queue with the napi
6883 * @dev: device to which NAPI and queue belong
6886 * @napi: NAPI context, pass NULL to clear previously set NAPI
6888 * Set queue with its corresponding napi context. This should be done after
6889 * registering the NAPI handler for the queue-vector and the queues have been
6893 enum netdev_queue_type type, struct napi_struct *napi) in netif_queue_set_napi() argument
6898 if (WARN_ON_ONCE(napi && !napi->dev)) in netif_queue_set_napi()
6905 rxq->napi = napi; in netif_queue_set_napi()
6909 txq->napi = napi; in netif_queue_set_napi()
6921 struct napi_struct *napi = in netif_napi_irq_notify() local
6924 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_irq_notify()
6928 if (napi->config && napi->dev->irq_affinity_auto) in netif_napi_irq_notify()
6929 cpumask_copy(&napi->config->affinity_mask, mask); in netif_napi_irq_notify()
6932 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_irq_notify()
6933 err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); in netif_napi_irq_notify()
6935 netdev_warn(napi->dev, "RMAP update failed (%d)\n", in netif_napi_irq_notify()
6944 struct napi_struct *napi = in netif_napi_affinity_release() local
6946 struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; in netif_napi_affinity_release()
6948 netdev_assert_locked(napi->dev); in netif_napi_affinity_release()
6950 &napi->state)); in netif_napi_affinity_release()
6952 if (!napi->dev->rx_cpu_rmap_auto) in netif_napi_affinity_release()
6954 rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_affinity_release()
6955 napi->napi_rmap_idx = -1; in netif_napi_affinity_release()
7017 void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) in netif_napi_set_irq_locked() argument
7021 netdev_assert_locked_or_invisible(napi->dev); in netif_napi_set_irq_locked()
7023 if (napi->irq == irq) in netif_napi_set_irq_locked()
7027 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in netif_napi_set_irq_locked()
7028 irq_set_affinity_notifier(napi->irq, NULL); in netif_napi_set_irq_locked()
7030 napi->irq = irq; in netif_napi_set_irq_locked()
7032 (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) in netif_napi_set_irq_locked()
7036 if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) in netif_napi_set_irq_locked()
7040 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7041 rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); in netif_napi_set_irq_locked()
7045 cpu_rmap_get(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7046 napi->napi_rmap_idx = rc; in netif_napi_set_irq_locked()
7051 napi->notify.notify = netif_napi_irq_notify; in netif_napi_set_irq_locked()
7052 napi->notify.release = netif_napi_affinity_release; in netif_napi_set_irq_locked()
7053 rc = irq_set_affinity_notifier(irq, &napi->notify); in netif_napi_set_irq_locked()
7055 netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", in netif_napi_set_irq_locked()
7060 set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); in netif_napi_set_irq_locked()
7065 if (napi->dev->rx_cpu_rmap_auto) { in netif_napi_set_irq_locked()
7066 napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; in netif_napi_set_irq_locked()
7067 cpu_rmap_put(napi->dev->rx_cpu_rmap); in netif_napi_set_irq_locked()
7068 napi->napi_rmap_idx = -1; in netif_napi_set_irq_locked()
7071 napi->notify.notify = NULL; in netif_napi_set_irq_locked()
7072 napi->notify.release = NULL; in netif_napi_set_irq_locked()
7086 /* a NAPI ID might be stored in the config, if so use it. if not, use in napi_restore_config()
7105 /* Netlink wants the NAPI list to be sorted by ID, if adding a NAPI which will
7109 netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) in netif_napi_dev_list_add() argument
7116 if (napi->config && napi->config->napi_id) in netif_napi_dev_list_add()
7117 new_id = napi->config->napi_id; in netif_napi_dev_list_add()
7132 list_add_rcu(&napi->dev_list, higher); /* adds after higher */ in netif_napi_dev_list_add()
7141 static void napi_get_frags_check(struct napi_struct *napi) in napi_get_frags_check() argument
7146 skb = napi_get_frags(napi); in napi_get_frags_check()
7148 napi_free_frags(napi); in napi_get_frags_check()
7153 struct napi_struct *napi, in netif_napi_add_weight_locked() argument
7158 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state))) in netif_napi_add_weight_locked()
7161 INIT_LIST_HEAD(&napi->poll_list); in netif_napi_add_weight_locked()
7162 INIT_HLIST_NODE(&napi->napi_hash_node); in netif_napi_add_weight_locked()
7163 hrtimer_setup(&napi->timer, napi_watchdog, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); in netif_napi_add_weight_locked()
7164 gro_init(&napi->gro); in netif_napi_add_weight_locked()
7165 napi->skb = NULL; in netif_napi_add_weight_locked()
7166 napi->poll = poll; in netif_napi_add_weight_locked()
7170 napi->weight = weight; in netif_napi_add_weight_locked()
7171 napi->dev = dev; in netif_napi_add_weight_locked()
7173 napi->poll_owner = -1; in netif_napi_add_weight_locked()
7175 napi->list_owner = -1; in netif_napi_add_weight_locked()
7176 set_bit(NAPI_STATE_SCHED, &napi->state); in netif_napi_add_weight_locked()
7177 set_bit(NAPI_STATE_NPSVC, &napi->state); in netif_napi_add_weight_locked()
7178 netif_napi_dev_list_add(dev, napi); in netif_napi_add_weight_locked()
7180 /* default settings from sysfs are applied to all NAPIs. any per-NAPI in netif_napi_add_weight_locked()
7183 napi_set_defer_hard_irqs(napi, READ_ONCE(dev->napi_defer_hard_irqs)); in netif_napi_add_weight_locked()
7184 napi_set_gro_flush_timeout(napi, READ_ONCE(dev->gro_flush_timeout)); in netif_napi_add_weight_locked()
7186 napi_get_frags_check(napi); in netif_napi_add_weight_locked()
7187 /* Create kthread for this napi if dev->threaded is set. in netif_napi_add_weight_locked()
7191 if (dev->threaded && napi_kthread_create(napi)) in netif_napi_add_weight_locked()
7193 netif_napi_set_irq_locked(napi, -1); in netif_napi_add_weight_locked()
7229 * napi_disable() - prevent NAPI from scheduling
7230 * @n: NAPI context
7232 * Stop NAPI from being scheduled on this context.
7264 * napi_enable() - enable NAPI scheduling
7265 * @n: NAPI context
7267 * Enable scheduling of a NAPI instance.
7280 void __netif_napi_del_locked(struct napi_struct *napi) in __netif_napi_del_locked() argument
7282 netdev_assert_locked(napi->dev); in __netif_napi_del_locked()
7284 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) in __netif_napi_del_locked()
7287 /* Make sure NAPI is disabled (or was never enabled). */ in __netif_napi_del_locked()
7288 WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); in __netif_napi_del_locked()
7290 if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) in __netif_napi_del_locked()
7291 irq_set_affinity_notifier(napi->irq, NULL); in __netif_napi_del_locked()
7293 if (napi->config) { in __netif_napi_del_locked()
7294 napi->index = -1; in __netif_napi_del_locked()
7295 napi->config = NULL; in __netif_napi_del_locked()
7298 list_del_rcu(&napi->dev_list); in __netif_napi_del_locked()
7299 napi_free_frags(napi); in __netif_napi_del_locked()
7301 gro_cleanup(&napi->gro); in __netif_napi_del_locked()
7303 if (napi->thread) { in __netif_napi_del_locked()
7304 kthread_stop(napi->thread); in __netif_napi_del_locked()
7305 napi->thread = NULL; in __netif_napi_del_locked()
7320 * accidentally calling ->poll() when NAPI is not scheduled. in __napi_poll()
7331 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", in __napi_poll()
7337 /* Drivers must not modify the NAPI state if they in __napi_poll()
7339 * still "owns" the NAPI instance and therefore can in __napi_poll()
7347 /* The NAPI context has more processing work, but busy-polling in __napi_poll()
7353 * that the NAPI is re-scheduled. in __napi_poll()
7368 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", in __napi_poll()
7398 static int napi_thread_wait(struct napi_struct *napi) in napi_thread_wait() argument
7404 * kthread owns this napi and could poll on this napi. in napi_thread_wait()
7408 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) { in napi_thread_wait()
7409 WARN_ON(!list_empty(&napi->poll_list)); in napi_thread_wait()
7422 static void napi_threaded_poll_loop(struct napi_struct *napi) in napi_threaded_poll_loop() argument
7438 have = netpoll_poll_lock(napi); in napi_threaded_poll_loop()
7439 __napi_poll(napi, &repoll); in napi_threaded_poll_loop()
7463 struct napi_struct *napi = data; in napi_threaded_poll() local
7465 while (!napi_thread_wait(napi)) in napi_threaded_poll()
7466 napi_threaded_poll_loop(napi); in napi_threaded_poll()
12259 /* Append NAPI poll list from offline CPU, with one exception : in dev_cpu_dead()
12264 struct napi_struct *napi = list_first_entry(&oldsd->poll_list, in dev_cpu_dead() local
12268 list_del_init(&napi->poll_list); in dev_cpu_dead()
12269 if (napi->poll == process_backlog) in dev_cpu_dead()
12270 napi->state &= NAPIF_STATE_THREADED; in dev_cpu_dead()
12272 ____napi_schedule(sd, napi); in dev_cpu_dead()
12632 struct napi_struct *napi = &sd->backlog; in backlog_napi_should_run() local
12634 return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state); in backlog_napi_should_run()
12647 struct napi_struct *napi = &sd->backlog; in backlog_napi_setup() local
12649 napi->thread = this_cpu_read(backlog_napi); in backlog_napi_setup()
12650 set_bit(NAPI_STATE_THREADED, &napi->state); in backlog_napi_setup()