Lines Matching full:bl

44 						   struct io_buffer_list *bl,  in __io_buffer_get_list()  argument
47 if (bl && bgid < BGID_ARRAY) in __io_buffer_get_list()
48 return &bl[bgid]; in __io_buffer_get_list()
62 struct io_buffer_list *bl, unsigned int bgid) in io_buffer_add_list() argument
69 bl->bgid = bgid; in io_buffer_add_list()
70 smp_store_release(&bl->is_ready, 1); in io_buffer_add_list()
75 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); in io_buffer_add_list()
81 struct io_buffer_list *bl; in io_kbuf_recycle_legacy() local
96 bl = io_buffer_get_list(ctx, buf->bgid); in io_kbuf_recycle_legacy()
97 list_add(&buf->list, &bl->buf_list); in io_kbuf_recycle_legacy()
139 struct io_buffer_list *bl) in io_provided_buffer_select() argument
141 if (!list_empty(&bl->buf_list)) { in io_provided_buffer_select()
144 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_provided_buffer_select()
157 struct io_buffer_list *bl, in io_ring_buffer_select() argument
160 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffer_select()
162 __u16 head = bl->head; in io_ring_buffer_select()
167 head &= bl->mask; in io_ring_buffer_select()
169 if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) { in io_ring_buffer_select()
174 buf = page_address(bl->buf_pages[index]); in io_ring_buffer_select()
180 req->buf_list = bl; in io_ring_buffer_select()
195 bl->head++; in io_ring_buffer_select()
204 struct io_buffer_list *bl; in io_buffer_select() local
209 bl = io_buffer_get_list(ctx, req->buf_index); in io_buffer_select()
210 if (likely(bl)) { in io_buffer_select()
211 if (bl->is_mapped) in io_buffer_select()
212 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
214 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select()
222 struct io_buffer_list *bl; in io_init_bl_list() local
225 bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL); in io_init_bl_list()
226 if (!bl) in io_init_bl_list()
230 INIT_LIST_HEAD(&bl[i].buf_list); in io_init_bl_list()
231 bl[i].bgid = i; in io_init_bl_list()
234 smp_store_release(&ctx->io_bl, bl); in io_init_bl_list()
241 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_kbuf_mark_free() argument
246 if (bl->buf_ring == ibf->mem) { in io_kbuf_mark_free()
257 struct io_buffer_list *bl, unsigned nbufs) in __io_remove_buffers() argument
265 if (bl->is_mapped) { in __io_remove_buffers()
266 i = bl->buf_ring->tail - bl->head; in __io_remove_buffers()
267 if (bl->is_mmap) { in __io_remove_buffers()
272 io_kbuf_mark_free(ctx, bl); in __io_remove_buffers()
273 bl->buf_ring = NULL; in __io_remove_buffers()
274 bl->is_mmap = 0; in __io_remove_buffers()
275 } else if (bl->buf_nr_pages) { in __io_remove_buffers()
278 for (j = 0; j < bl->buf_nr_pages; j++) in __io_remove_buffers()
279 unpin_user_page(bl->buf_pages[j]); in __io_remove_buffers()
280 kvfree(bl->buf_pages); in __io_remove_buffers()
281 bl->buf_pages = NULL; in __io_remove_buffers()
282 bl->buf_nr_pages = 0; in __io_remove_buffers()
285 INIT_LIST_HEAD(&bl->buf_list); in __io_remove_buffers()
286 bl->is_mapped = 0; in __io_remove_buffers()
293 while (!list_empty(&bl->buf_list)) { in __io_remove_buffers()
296 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); in __io_remove_buffers()
308 struct io_buffer_list *bl; in io_destroy_buffers() local
320 xa_for_each(&ctx->io_bl_xa, index, bl) { in io_destroy_buffers()
321 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_buffers()
322 __io_remove_buffers(ctx, bl, -1U); in io_destroy_buffers()
323 kfree_rcu(bl, rcu); in io_destroy_buffers()
363 struct io_buffer_list *bl; in io_remove_buffers() local
369 bl = io_buffer_get_list(ctx, p->bgid); in io_remove_buffers()
370 if (bl) { in io_remove_buffers()
373 if (!bl->is_mapped) in io_remove_buffers()
374 ret = __io_remove_buffers(ctx, bl, p->nbufs); in io_remove_buffers()
467 struct io_buffer_list *bl) in io_add_buffers() argument
479 list_move_tail(&buf->list, &bl->buf_list); in io_add_buffers()
496 struct io_buffer_list *bl; in io_provide_buffers() local
507 bl = io_buffer_get_list(ctx, p->bgid); in io_provide_buffers()
508 if (unlikely(!bl)) { in io_provide_buffers()
509 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_provide_buffers()
510 if (!bl) { in io_provide_buffers()
514 INIT_LIST_HEAD(&bl->buf_list); in io_provide_buffers()
515 ret = io_buffer_add_list(ctx, bl, p->bgid); in io_provide_buffers()
524 kfree_rcu(bl, rcu); in io_provide_buffers()
531 if (bl->is_mapped) { in io_provide_buffers()
536 ret = io_add_buffers(ctx, p, bl); in io_provide_buffers()
547 struct io_buffer_list *bl) in io_pin_pbuf_ring() argument
584 bl->buf_pages = pages; in io_pin_pbuf_ring()
585 bl->buf_nr_pages = nr_pages; in io_pin_pbuf_ring()
586 bl->buf_ring = br; in io_pin_pbuf_ring()
587 bl->is_mapped = 1; in io_pin_pbuf_ring()
588 bl->is_mmap = 0; in io_pin_pbuf_ring()
627 struct io_buffer_list *bl) in io_alloc_pbuf_ring() argument
653 bl->buf_ring = ibf->mem; in io_alloc_pbuf_ring()
654 bl->is_mapped = 1; in io_alloc_pbuf_ring()
655 bl->is_mmap = 1; in io_alloc_pbuf_ring()
662 struct io_buffer_list *bl, *free_bl = NULL; in io_register_pbuf_ring() local
697 bl = io_buffer_get_list(ctx, reg.bgid); in io_register_pbuf_ring()
698 if (bl) { in io_register_pbuf_ring()
700 if (bl->is_mapped || !list_empty(&bl->buf_list)) in io_register_pbuf_ring()
703 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); in io_register_pbuf_ring()
704 if (!bl) in io_register_pbuf_ring()
709 ret = io_pin_pbuf_ring(&reg, bl); in io_register_pbuf_ring()
711 ret = io_alloc_pbuf_ring(ctx, &reg, bl); in io_register_pbuf_ring()
714 bl->nr_entries = reg.ring_entries; in io_register_pbuf_ring()
715 bl->mask = reg.ring_entries - 1; in io_register_pbuf_ring()
717 io_buffer_add_list(ctx, bl, reg.bgid); in io_register_pbuf_ring()
728 struct io_buffer_list *bl; in io_unregister_pbuf_ring() local
739 bl = io_buffer_get_list(ctx, reg.bgid); in io_unregister_pbuf_ring()
740 if (!bl) in io_unregister_pbuf_ring()
742 if (!bl->is_mapped) in io_unregister_pbuf_ring()
745 __io_remove_buffers(ctx, bl, -1U); in io_unregister_pbuf_ring()
746 if (bl->bgid >= BGID_ARRAY) { in io_unregister_pbuf_ring()
747 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_unregister_pbuf_ring()
748 kfree_rcu(bl, rcu); in io_unregister_pbuf_ring()
756 struct io_buffer_list *bl; in io_register_pbuf_status() local
766 bl = io_buffer_get_list(ctx, buf_status.buf_group); in io_register_pbuf_status()
767 if (!bl) in io_register_pbuf_status()
769 if (!bl->is_mapped) in io_register_pbuf_status()
772 buf_status.head = bl->head; in io_register_pbuf_status()
781 struct io_buffer_list *bl; in io_pbuf_get_address() local
783 bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid); in io_pbuf_get_address()
785 if (!bl || !bl->is_mmap) in io_pbuf_get_address()
792 if (!smp_load_acquire(&bl->is_ready)) in io_pbuf_get_address()
795 return bl->buf_ring; in io_pbuf_get_address()