Lines Matching full:pool

15 /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
21 * linked to a netdev at creation time. Following page pool "visibility"
28 * to error, or (c) the entire namespace which owned this pool disappeared
32 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
38 struct page_pool *pool; in netdev_nl_page_pool_get_do() local
43 pool = xa_load(&page_pools, id); in netdev_nl_page_pool_get_do()
44 if (!pool || hlist_unhashed(&pool->user.list) || in netdev_nl_page_pool_get_do()
45 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { in netdev_nl_page_pool_get_do()
56 err = fill(rsp, pool, info); in netdev_nl_page_pool_get_do()
84 struct page_pool *pool; in netdev_nl_page_pool_get_dump() local
90 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in netdev_nl_page_pool_get_dump()
91 if (state->pp_id && state->pp_id < pool->user.id) in netdev_nl_page_pool_get_dump()
94 state->pp_id = pool->user.id; in netdev_nl_page_pool_get_dump()
95 err = fill(skb, pool, info); in netdev_nl_page_pool_get_dump()
112 page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool, in page_pool_nl_stats_fill() argument
120 if (!page_pool_get_stats(pool, &stats)) in page_pool_nl_stats_fill()
129 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) || in page_pool_nl_stats_fill()
130 (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && in page_pool_nl_stats_fill()
132 pool->slow.netdev->ifindex))) in page_pool_nl_stats_fill()
214 page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, in page_pool_nl_fill() argument
224 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id)) in page_pool_nl_fill()
227 if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && in page_pool_nl_fill()
229 pool->slow.netdev->ifindex)) in page_pool_nl_fill()
231 if (pool->user.napi_id && in page_pool_nl_fill()
232 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id)) in page_pool_nl_fill()
235 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill()
236 refsz = PAGE_SIZE << pool->p.order; in page_pool_nl_fill()
241 if (pool->user.detach_time && in page_pool_nl_fill()
243 pool->user.detach_time)) in page_pool_nl_fill()
254 static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd) in netdev_nl_page_pool_event() argument
263 if (hlist_unhashed(&pool->user.list)) in netdev_nl_page_pool_event()
265 net = dev_net(pool->slow.netdev); in netdev_nl_page_pool_event()
276 if (page_pool_nl_fill(ntf, pool, &info)) { in netdev_nl_page_pool_event()
303 int page_pool_list(struct page_pool *pool) in page_pool_list() argument
309 err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b, in page_pool_list()
314 INIT_HLIST_NODE(&pool->user.list); in page_pool_list()
315 if (pool->slow.netdev) { in page_pool_list()
316 hlist_add_head(&pool->user.list, in page_pool_list()
317 &pool->slow.netdev->page_pools); in page_pool_list()
318 pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0; in page_pool_list()
320 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF); in page_pool_list()
331 void page_pool_detached(struct page_pool *pool) in page_pool_detached() argument
334 pool->user.detach_time = ktime_get_boottime_seconds(); in page_pool_detached()
335 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); in page_pool_detached()
339 void page_pool_unlist(struct page_pool *pool) in page_pool_unlist() argument
342 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF); in page_pool_unlist()
343 xa_erase(&page_pools, pool->user.id); in page_pool_unlist()
344 if (!hlist_unhashed(&pool->user.list)) in page_pool_unlist()
345 hlist_del(&pool->user.list); in page_pool_unlist()
351 struct page_pool *pool; in page_pool_unreg_netdev_wipe() local
355 hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) { in page_pool_unreg_netdev_wipe()
356 hlist_del_init(&pool->user.list); in page_pool_unreg_netdev_wipe()
357 pool->slow.netdev = NET_PTR_POISON; in page_pool_unreg_netdev_wipe()
364 struct page_pool *pool, *last; in page_pool_unreg_netdev() local
371 hlist_for_each_entry(pool, &netdev->page_pools, user.list) { in page_pool_unreg_netdev()
372 pool->slow.netdev = lo; in page_pool_unreg_netdev()
373 netdev_nl_page_pool_event(pool, in page_pool_unreg_netdev()
375 last = pool; in page_pool_unreg_netdev()