Lines Matching full:pool
18 /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
19 * pool->user.
25 * linked to a netdev at creation time. Following page pool "visibility"
32 * to error, or (c) the entire namespace which owned this pool disappeared
36 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
42 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
47 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
48 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
49 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
60 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
88 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local
94 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump()
95 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump()
98 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump()
99 err = fill(skb, pool, info); in netdev_nl_page_pool_get_dump()
114 page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool, in page_pool_nl_stats_fill() argument
122 if (!page_pool_get_stats(pool, &stats)) in page_pool_nl_stats_fill()
131 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) || in page_pool_nl_stats_fill()
132 (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && in page_pool_nl_stats_fill()
134 pool->slow.netdev->ifindex))) in page_pool_nl_stats_fill()
216 page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, in page_pool_nl_fill() argument
227 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id)) in page_pool_nl_fill()
230 if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && in page_pool_nl_fill()
232 pool->slow.netdev->ifindex)) in page_pool_nl_fill()
235 napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0; in page_pool_nl_fill()
240 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill()
241 refsz = PAGE_SIZE << pool->p.order; in page_pool_nl_fill()
246 if (pool->user.detach_time && in page_pool_nl_fill()
248 pool->user.detach_time)) in page_pool_nl_fill()
251 if (pool->mp_ops && pool->mp_ops->nl_fill(pool->mp_priv, rsp, NULL)) in page_pool_nl_fill()
262 static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd) in netdev_nl_page_pool_event() argument
271 if (hlist_unhashed(&pool->user.list)) in netdev_nl_page_pool_event()
273 net = dev_net(pool->slow.netdev); in netdev_nl_page_pool_event()
284 if (page_pool_nl_fill(ntf, pool, &info)) { in netdev_nl_page_pool_event()
311 int page_pool_list(struct page_pool *pool) in page_pool_list() argument
317 err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b, in page_pool_list()
322 INIT_HLIST_NODE(&pool->user.list); in page_pool_list()
323 if (pool->slow.netdev) { in page_pool_list()
324 hlist_add_head(&pool->user.list, in page_pool_list()
325 &pool->slow.netdev->page_pools); in page_pool_list()
326 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF); in page_pool_list()
337 void page_pool_detached(struct page_pool *pool) in page_pool_detached() argument
340 pool->user.detach_time = ktime_get_boottime_seconds(); in page_pool_detached()
341 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); in page_pool_detached()
345 void page_pool_unlist(struct page_pool *pool) in page_pool_unlist() argument
348 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF); in page_pool_unlist()
349 xa_erase(&page_pools, pool->user.id); in page_pool_unlist()
350 if (!hlist_unhashed(&pool->user.list)) in page_pool_unlist()
351 hlist_del(&pool->user.list); in page_pool_unlist()
359 struct page_pool *pool; in page_pool_check_memory_provider() local
366 hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { in page_pool_check_memory_provider()
367 if (pool->mp_priv != binding) in page_pool_check_memory_provider()
370 if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) { in page_pool_check_memory_provider()
381 struct page_pool *pool; in page_pool_unreg_netdev_wipe() local
385 hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) { in page_pool_unreg_netdev_wipe()
386 hlist_del_init(&pool->user.list); in page_pool_unreg_netdev_wipe()
387 pool->slow.netdev = NET_PTR_POISON; in page_pool_unreg_netdev_wipe()
394 struct page_pool *pool, *last; in page_pool_unreg_netdev() local
401 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in page_pool_unreg_netdev()
402 pool->slow.netdev = lo; in page_pool_unreg_netdev()
403 netdev_nl_page_pool_event(pool, in page_pool_unreg_netdev()
405 last = pool; in page_pool_unreg_netdev()