Lines Matching full:pool

11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)  in xp_add_xsk()  argument
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool->heads); in xp_destroy()
41 kvfree(pool); in xp_destroy()
47 struct xsk_buff_pool *pool; in xp_create_and_assign_umem() local
51 pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), in xp_create_and_assign_umem()
53 if (!pool) in xp_create_and_assign_umem()
56 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
57 if (!pool->heads) in xp_create_and_assign_umem()
60 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
61 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
62 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
63 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
64 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
65 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
66 pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
67 pool->frame_len = umem->chunk_size - umem->headroom - in xp_create_and_assign_umem()
69 pool->umem = umem; in xp_create_and_assign_umem()
70 pool->addrs = umem->addrs; in xp_create_and_assign_umem()
71 INIT_LIST_HEAD(&pool->free_list); in xp_create_and_assign_umem()
72 INIT_LIST_HEAD(&pool->xsk_tx_list); in xp_create_and_assign_umem()
73 spin_lock_init(&pool->xsk_tx_list_lock); in xp_create_and_assign_umem()
74 refcount_set(&pool->users, 1); in xp_create_and_assign_umem()
76 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem()
77 pool->cq = xs->cq_tmp; in xp_create_and_assign_umem()
81 for (i = 0; i < pool->free_heads_cnt; i++) { in xp_create_and_assign_umem()
82 xskb = &pool->heads[i]; in xp_create_and_assign_umem()
83 xskb->pool = pool; in xp_create_and_assign_umem()
85 pool->free_heads[i] = xskb; in xp_create_and_assign_umem()
88 return pool; in xp_create_and_assign_umem()
91 xp_destroy(pool); in xp_create_and_assign_umem()
95 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) in xp_set_rxq_info() argument
99 for (i = 0; i < pool->heads_cnt; i++) in xp_set_rxq_info()
100 pool->heads[i].xdp.rxq = rxq; in xp_set_rxq_info()
104 static void xp_disable_drv_zc(struct xsk_buff_pool *pool) in xp_disable_drv_zc() argument
111 if (pool->umem->zc) { in xp_disable_drv_zc()
113 bpf.xsk.pool = NULL; in xp_disable_drv_zc()
114 bpf.xsk.queue_id = pool->queue_id; in xp_disable_drv_zc()
116 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); in xp_disable_drv_zc()
123 static int __xp_assign_dev(struct xsk_buff_pool *pool, in __xp_assign_dev() argument
141 pool->netdev = netdev; in __xp_assign_dev()
142 pool->queue_id = queue_id; in __xp_assign_dev()
143 err = xsk_reg_pool_at_qid(netdev, pool, queue_id); in __xp_assign_dev()
148 pool->uses_need_wakeup = true; in __xp_assign_dev()
153 pool->cached_need_wakeup = XDP_WAKEUP_TX; in __xp_assign_dev()
169 bpf.xsk.pool = pool; in __xp_assign_dev()
176 if (!pool->dma_pages) { in __xp_assign_dev()
181 pool->umem->zc = true; in __xp_assign_dev()
185 xp_disable_drv_zc(pool); in __xp_assign_dev()
196 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, in xp_assign_dev() argument
199 return __xp_assign_dev(pool, dev, queue_id, flags); in xp_assign_dev()
202 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, in xp_assign_dev_shared() argument
208 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
212 if (pool->uses_need_wakeup) in xp_assign_dev_shared()
215 return __xp_assign_dev(pool, dev, queue_id, flags); in xp_assign_dev_shared()
218 void xp_clear_dev(struct xsk_buff_pool *pool) in xp_clear_dev() argument
220 if (!pool->netdev) in xp_clear_dev()
223 xp_disable_drv_zc(pool); in xp_clear_dev()
224 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); in xp_clear_dev()
225 dev_put(pool->netdev); in xp_clear_dev()
226 pool->netdev = NULL; in xp_clear_dev()
231 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, in xp_release_deferred() local
235 xp_clear_dev(pool); in xp_release_deferred()
238 if (pool->fq) { in xp_release_deferred()
239 xskq_destroy(pool->fq); in xp_release_deferred()
240 pool->fq = NULL; in xp_release_deferred()
243 if (pool->cq) { in xp_release_deferred()
244 xskq_destroy(pool->cq); in xp_release_deferred()
245 pool->cq = NULL; in xp_release_deferred()
248 xdp_put_umem(pool->umem, false); in xp_release_deferred()
249 xp_destroy(pool); in xp_release_deferred()
252 void xp_get_pool(struct xsk_buff_pool *pool) in xp_get_pool() argument
254 refcount_inc(&pool->users); in xp_get_pool()
257 bool xp_put_pool(struct xsk_buff_pool *pool) in xp_put_pool() argument
259 if (!pool) in xp_put_pool()
262 if (refcount_dec_and_test(&pool->users)) { in xp_put_pool()
263 INIT_WORK(&pool->work, xp_release_deferred); in xp_put_pool()
264 schedule_work(&pool->work); in xp_put_pool()
271 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) in xp_find_dma_map() argument
275 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
276 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
331 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) in xp_dma_unmap() argument
335 if (pool->dma_pages_cnt == 0) in xp_dma_unmap()
338 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
348 kvfree(pool->dma_pages); in xp_dma_unmap()
349 pool->dma_pages_cnt = 0; in xp_dma_unmap()
350 pool->dev = NULL; in xp_dma_unmap()
366 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
368 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
369 if (!pool->dma_pages) in xp_init_dma_info()
372 pool->dev = dma_map->dev; in xp_init_dma_info()
373 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
374 pool->dma_need_sync = dma_map->dma_need_sync; in xp_init_dma_info()
375 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
376 pool->dma_pages_cnt * sizeof(*pool->dma_pages)); in xp_init_dma_info()
381 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, in xp_dma_map() argument
389 dma_map = xp_find_dma_map(pool); in xp_dma_map()
391 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
399 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
415 if (pool->unaligned) in xp_dma_map()
418 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
428 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, in xp_addr_crosses_non_contig_pg() argument
431 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); in xp_addr_crosses_non_contig_pg()
434 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) in xp_check_unaligned() argument
437 if (*addr >= pool->addrs_cnt || in xp_check_unaligned()
438 *addr + pool->chunk_size > pool->addrs_cnt || in xp_check_unaligned()
439 xp_addr_crosses_non_contig_pg(pool, *addr)) in xp_check_unaligned()
444 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) in xp_check_aligned() argument
446 *addr = xp_aligned_extract_addr(pool, *addr); in xp_check_aligned()
447 return *addr < pool->addrs_cnt; in xp_check_aligned()
450 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) in __xp_alloc() argument
456 if (pool->free_heads_cnt == 0) in __xp_alloc()
459 xskb = pool->free_heads[--pool->free_heads_cnt]; in __xp_alloc()
462 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { in __xp_alloc()
463 pool->fq->queue_empty_descs++; in __xp_alloc()
468 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : in __xp_alloc()
469 xp_check_aligned(pool, &addr); in __xp_alloc()
471 pool->fq->invalid_descs++; in __xp_alloc()
472 xskq_cons_release(pool->fq); in __xp_alloc()
477 xskq_cons_release(pool->fq); in __xp_alloc()
480 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; in __xp_alloc()
481 if (pool->dma_pages_cnt) { in __xp_alloc()
482 xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] & in __xp_alloc()
485 xskb->dma = xskb->frame_dma + pool->headroom + in __xp_alloc()
491 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) in xp_alloc() argument
495 if (!pool->free_list_cnt) { in xp_alloc()
496 xskb = __xp_alloc(pool); in xp_alloc()
500 pool->free_list_cnt--; in xp_alloc()
501 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, in xp_alloc()
509 if (pool->dma_need_sync) { in xp_alloc()
510 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, in xp_alloc()
511 pool->frame_len, in xp_alloc()
518 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) in xp_can_alloc() argument
520 if (pool->free_list_cnt >= count) in xp_can_alloc()
522 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); in xp_can_alloc()
528 xskb->pool->free_list_cnt++; in xp_free()
529 list_add(&xskb->free_list_node, &xskb->pool->free_list); in xp_free()
533 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) in xp_raw_get_data() argument
535 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; in xp_raw_get_data()
536 return pool->addrs + addr; in xp_raw_get_data()
540 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) in xp_raw_get_dma() argument
542 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; in xp_raw_get_dma()
543 return (pool->dma_pages[addr >> PAGE_SHIFT] & in xp_raw_get_dma()
551 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, in xp_dma_sync_for_cpu_slow()
552 xskb->pool->frame_len, DMA_BIDIRECTIONAL); in xp_dma_sync_for_cpu_slow()
556 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, in xp_dma_sync_for_device_slow() argument
559 dma_sync_single_range_for_device(pool->dev, dma, 0, in xp_dma_sync_for_device_slow()