Lines Matching full:area

37 				 struct io_zcrx_area *area, int nr_mapped)  in __io_zcrx_unmap_area()  argument
42 struct net_iov *niov = &area->nia.niovs[i]; in __io_zcrx_unmap_area()
52 static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) in io_zcrx_unmap_area() argument
56 if (area->is_mapped) in io_zcrx_unmap_area()
57 __io_zcrx_unmap_area(ifq, area, area->nia.num_niovs); in io_zcrx_unmap_area()
58 area->is_mapped = false; in io_zcrx_unmap_area()
61 static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area) in io_zcrx_map_area() argument
66 if (area->is_mapped) in io_zcrx_map_area()
69 for (i = 0; i < area->nia.num_niovs; i++) { in io_zcrx_map_area()
70 struct net_iov *niov = &area->nia.niovs[i]; in io_zcrx_map_area()
73 dma = dma_map_page_attrs(ifq->dev, area->pages[i], 0, PAGE_SIZE, in io_zcrx_map_area()
84 if (i != area->nia.num_niovs) { in io_zcrx_map_area()
85 __io_zcrx_unmap_area(ifq, area, i); in io_zcrx_map_area()
89 area->is_mapped = true; in io_zcrx_map_area()
130 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); in io_get_user_counter() local
132 return &area->user_refs[net_iov_idx(niov)]; in io_get_user_counter()
152 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); in io_zcrx_iov_page() local
154 return area->pages[net_iov_idx(niov)]; in io_zcrx_iov_page()
188 static void io_zcrx_free_area(struct io_zcrx_area *area) in io_zcrx_free_area() argument
190 io_zcrx_unmap_area(area->ifq, area); in io_zcrx_free_area()
192 kvfree(area->freelist); in io_zcrx_free_area()
193 kvfree(area->nia.niovs); in io_zcrx_free_area()
194 kvfree(area->user_refs); in io_zcrx_free_area()
195 if (area->pages) { in io_zcrx_free_area()
196 unpin_user_pages(area->pages, area->nr_folios); in io_zcrx_free_area()
197 kvfree(area->pages); in io_zcrx_free_area()
199 kfree(area); in io_zcrx_free_area()
206 struct io_zcrx_area *area; in io_zcrx_create_area() local
224 area = kzalloc(sizeof(*area), GFP_KERNEL); in io_zcrx_create_area()
225 if (!area) in io_zcrx_create_area()
228 area->pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len, in io_zcrx_create_area()
230 if (IS_ERR(area->pages)) { in io_zcrx_create_area()
231 ret = PTR_ERR(area->pages); in io_zcrx_create_area()
232 area->pages = NULL; in io_zcrx_create_area()
235 area->nr_folios = nr_iovs = nr_pages; in io_zcrx_create_area()
236 area->nia.num_niovs = nr_iovs; in io_zcrx_create_area()
238 area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]), in io_zcrx_create_area()
240 if (!area->nia.niovs) in io_zcrx_create_area()
243 area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]), in io_zcrx_create_area()
245 if (!area->freelist) in io_zcrx_create_area()
249 area->freelist[i] = i; in io_zcrx_create_area()
251 area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]), in io_zcrx_create_area()
253 if (!area->user_refs) in io_zcrx_create_area()
257 struct net_iov *niov = &area->nia.niovs[i]; in io_zcrx_create_area()
259 niov->owner = &area->nia; in io_zcrx_create_area()
260 area->freelist[i] = i; in io_zcrx_create_area()
261 atomic_set(&area->user_refs[i], 0); in io_zcrx_create_area()
264 area->free_count = nr_iovs; in io_zcrx_create_area()
265 area->ifq = ifq; in io_zcrx_create_area()
266 /* we're only supporting one area per ifq for now */ in io_zcrx_create_area()
267 area->area_id = 0; in io_zcrx_create_area()
268 area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT; in io_zcrx_create_area()
269 spin_lock_init(&area->freelist_lock); in io_zcrx_create_area()
270 *res = area; in io_zcrx_create_area()
273 if (area) in io_zcrx_create_area()
274 io_zcrx_free_area(area); in io_zcrx_create_area()
334 if (ifq->area) in io_zcrx_ifq_free()
335 io_zcrx_free_area(ifq->area); in io_zcrx_ifq_free()
348 struct io_uring_zcrx_area_reg area; in io_register_zcrx_ifq() local
383 if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area))) in io_register_zcrx_ifq()
394 ret = io_zcrx_create_area(ifq, &ifq->area, &area); in io_register_zcrx_ifq()
425 copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) { in io_register_zcrx_ifq()
449 static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area) in __io_zcrx_get_free_niov() argument
453 lockdep_assert_held(&area->freelist_lock); in __io_zcrx_get_free_niov()
455 niov_idx = area->freelist[--area->free_count]; in __io_zcrx_get_free_niov()
456 return &area->nia.niovs[niov_idx]; in __io_zcrx_get_free_niov()
461 struct io_zcrx_area *area = io_zcrx_iov_to_area(niov); in io_zcrx_return_niov_freelist() local
463 spin_lock_bh(&area->freelist_lock); in io_zcrx_return_niov_freelist()
464 area->freelist[area->free_count++] = net_iov_idx(niov); in io_zcrx_return_niov_freelist()
465 spin_unlock_bh(&area->freelist_lock); in io_zcrx_return_niov_freelist()
482 struct io_zcrx_area *area = ifq->area; in io_zcrx_scrub() local
485 if (!area) in io_zcrx_scrub()
489 for (i = 0; i < area->nia.num_niovs; i++) { in io_zcrx_scrub()
490 struct net_iov *niov = &area->nia.niovs[i]; in io_zcrx_scrub()
545 struct io_zcrx_area *area; in io_zcrx_ring_refill() local
554 area = ifq->area; in io_zcrx_ring_refill()
556 if (unlikely(niov_idx >= area->nia.num_niovs)) in io_zcrx_ring_refill()
558 niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs); in io_zcrx_ring_refill()
560 niov = &area->nia.niovs[niov_idx]; in io_zcrx_ring_refill()
583 struct io_zcrx_area *area = ifq->area; in io_zcrx_refill_slow() local
585 spin_lock_bh(&area->freelist_lock); in io_zcrx_refill_slow()
586 while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) { in io_zcrx_refill_slow()
587 struct net_iov *niov = __io_zcrx_get_free_niov(area); in io_zcrx_refill_slow()
594 spin_unlock_bh(&area->freelist_lock); in io_zcrx_refill_slow()
645 ret = io_zcrx_map_area(ifq, ifq->area); in io_pp_zc_init()
656 struct io_zcrx_area *area = ifq->area; in io_pp_zc_destroy() local
658 if (WARN_ON_ONCE(area->free_count != area->nia.num_niovs)) in io_pp_zc_destroy()
684 if (ifq->area) in io_pp_uninstall()
685 io_zcrx_unmap_area(ifq, ifq->area); in io_pp_uninstall()
704 struct io_zcrx_area *area; in io_zcrx_queue_cqe() local
715 area = io_zcrx_iov_to_area(niov); in io_zcrx_queue_cqe()
718 rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT); in io_zcrx_queue_cqe()
723 static struct net_iov *io_zcrx_alloc_fallback(struct io_zcrx_area *area) in io_zcrx_alloc_fallback() argument
727 spin_lock_bh(&area->freelist_lock); in io_zcrx_alloc_fallback()
728 if (area->free_count) in io_zcrx_alloc_fallback()
729 niov = __io_zcrx_get_free_niov(area); in io_zcrx_alloc_fallback()
730 spin_unlock_bh(&area->freelist_lock); in io_zcrx_alloc_fallback()
741 struct io_zcrx_area *area = ifq->area; in io_zcrx_copy_chunk() local
752 niov = io_zcrx_alloc_fallback(area); in io_zcrx_copy_chunk()