Lines Matching +full:i +full:- +full:cache +full:- +full:block +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
20 #include <linux/blk-crypto.h>
23 #include <trace/events/block.h>
25 #include "blk-rq-qos.h"
26 #include "blk-cgroup.h"
43 { .nr_vecs = 16, .name = "biovec-16" },
44 { .nr_vecs = 64, .name = "biovec-64" },
45 { .nr_vecs = 128, .name = "biovec-128" },
46 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
86 static struct bio_slab *create_bio_slab(unsigned int size) in create_bio_slab() argument
93 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); in create_bio_slab()
94 bslab->slab = kmem_cache_create(bslab->name, size, in create_bio_slab()
97 if (!bslab->slab) in create_bio_slab()
100 bslab->slab_ref = 1; in create_bio_slab()
101 bslab->slab_size = size; in create_bio_slab()
103 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) in create_bio_slab()
106 kmem_cache_destroy(bslab->slab); in create_bio_slab()
115 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
120 unsigned int size = bs_bio_slab_size(bs); in bio_find_or_create_slab() local
124 bslab = xa_load(&bio_slabs, size); in bio_find_or_create_slab()
126 bslab->slab_ref++; in bio_find_or_create_slab()
128 bslab = create_bio_slab(size); in bio_find_or_create_slab()
132 return bslab->slab; in bio_find_or_create_slab()
147 WARN_ON_ONCE(bslab->slab != bs->bio_slab); in bio_put_slab()
149 WARN_ON(!bslab->slab_ref); in bio_put_slab()
151 if (--bslab->slab_ref) in bio_put_slab()
156 kmem_cache_destroy(bslab->slab); in bio_put_slab()
170 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); in bvec_free()
195 *nr_vecs = bvs->nr_vecs; in bvec_alloc()
205 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); in bvec_alloc()
217 if (bio->bi_blkg) { in bio_uninit()
218 blkg_put(bio->bi_blkg); in bio_uninit()
219 bio->bi_blkg = NULL; in bio_uninit()
231 struct bio_set *bs = bio->bi_pool; in bio_free()
237 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
238 mempool_free(p - bs->front_pad, &bs->bio_pool); in bio_free()
249 bio->bi_next = NULL; in bio_init()
250 bio->bi_bdev = bdev; in bio_init()
251 bio->bi_opf = opf; in bio_init()
252 bio->bi_flags = 0; in bio_init()
253 bio->bi_ioprio = 0; in bio_init()
254 bio->bi_status = 0; in bio_init()
255 bio->bi_iter.bi_sector = 0; in bio_init()
256 bio->bi_iter.bi_size = 0; in bio_init()
257 bio->bi_iter.bi_idx = 0; in bio_init()
258 bio->bi_iter.bi_bvec_done = 0; in bio_init()
259 bio->bi_end_io = NULL; in bio_init()
260 bio->bi_private = NULL; in bio_init()
262 bio->bi_blkg = NULL; in bio_init()
263 bio->bi_issue.value = 0; in bio_init()
267 bio->bi_iocost_cost = 0; in bio_init()
271 bio->bi_crypt_context = NULL; in bio_init()
274 bio->bi_integrity = NULL; in bio_init()
276 bio->bi_vcnt = 0; in bio_init()
278 atomic_set(&bio->__bi_remaining, 1); in bio_init()
279 atomic_set(&bio->__bi_cnt, 1); in bio_init()
280 bio->bi_cookie = BLK_QC_T_NONE; in bio_init()
282 bio->bi_max_vecs = max_vecs; in bio_init()
283 bio->bi_io_vec = table; in bio_init()
284 bio->bi_pool = NULL; in bio_init()
289 * bio_reset - reinitialize a bio
291 * @bdev: block device to use the bio for
296 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
304 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
305 bio->bi_bdev = bdev; in bio_reset()
306 if (bio->bi_bdev) in bio_reset()
308 bio->bi_opf = opf; in bio_reset()
314 struct bio *parent = bio->bi_private; in __bio_chain_endio()
316 if (bio->bi_status && !parent->bi_status) in __bio_chain_endio()
317 parent->bi_status = bio->bi_status; in __bio_chain_endio()
328 * bio_chain - chain bio completions
332 * The caller won't have a bi_end_io called when @bio completes - instead,
340 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
342 bio->bi_private = parent; in bio_chain()
343 bio->bi_end_io = bio_chain_endio; in bio_chain()
368 spin_lock(&bs->rescue_lock); in bio_alloc_rescue()
369 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
370 spin_unlock(&bs->rescue_lock); in bio_alloc_rescue()
384 if (WARN_ON_ONCE(!bs->rescue_workqueue)) in punt_bios_to_rescuer()
400 while ((bio = bio_list_pop(¤t->bio_list[0]))) in punt_bios_to_rescuer()
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
402 current->bio_list[0] = nopunt; in punt_bios_to_rescuer()
405 while ((bio = bio_list_pop(¤t->bio_list[1]))) in punt_bios_to_rescuer()
406 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
407 current->bio_list[1] = nopunt; in punt_bios_to_rescuer()
409 spin_lock(&bs->rescue_lock); in punt_bios_to_rescuer()
410 bio_list_merge(&bs->rescue_list, &punt); in punt_bios_to_rescuer()
411 spin_unlock(&bs->rescue_lock); in punt_bios_to_rescuer()
413 queue_work(bs->rescue_workqueue, &bs->rescue_work); in punt_bios_to_rescuer()
416 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) in bio_alloc_irq_cache_splice() argument
420 /* cache->free_list must be empty */ in bio_alloc_irq_cache_splice()
421 if (WARN_ON_ONCE(cache->free_list)) in bio_alloc_irq_cache_splice()
425 cache->free_list = cache->free_list_irq; in bio_alloc_irq_cache_splice()
426 cache->free_list_irq = NULL; in bio_alloc_irq_cache_splice()
427 cache->nr += cache->nr_irq; in bio_alloc_irq_cache_splice()
428 cache->nr_irq = 0; in bio_alloc_irq_cache_splice()
436 struct bio_alloc_cache *cache; in bio_alloc_percpu_cache() local
439 cache = per_cpu_ptr(bs->cache, get_cpu()); in bio_alloc_percpu_cache()
440 if (!cache->free_list) { in bio_alloc_percpu_cache()
441 if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) in bio_alloc_percpu_cache()
442 bio_alloc_irq_cache_splice(cache); in bio_alloc_percpu_cache()
443 if (!cache->free_list) { in bio_alloc_percpu_cache()
448 bio = cache->free_list; in bio_alloc_percpu_cache()
449 cache->free_list = bio->bi_next; in bio_alloc_percpu_cache()
450 cache->nr--; in bio_alloc_percpu_cache()
453 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); in bio_alloc_percpu_cache()
454 bio->bi_pool = bs; in bio_alloc_percpu_cache()
459 * bio_alloc_bioset - allocate a bio for I/O
460 * @bdev: block device to allocate the bio for (can be %NULL)
461 * @nr_vecs: number of bvecs to pre-allocate
475 * Note that when running under submit_bio_noacct() (i.e. any block driver),
476 * bios are not submitted until after you return - see the code in
487 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
501 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) in bio_alloc_bioset()
505 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { in bio_alloc_bioset()
512 * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache. in bio_alloc_bioset()
526 * If we were to allocate multiple bios (say a stacking block driver in bio_alloc_bioset()
532 * current->bio_list, we first try the allocation without in bio_alloc_bioset()
537 if (current->bio_list && in bio_alloc_bioset()
538 (!bio_list_empty(¤t->bio_list[0]) || in bio_alloc_bioset()
539 !bio_list_empty(¤t->bio_list[1])) && in bio_alloc_bioset()
540 bs->rescue_workqueue) in bio_alloc_bioset()
543 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
547 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
551 if (!mempool_is_saturated(&bs->bio_pool)) in bio_alloc_bioset()
554 bio = p + bs->front_pad; in bio_alloc_bioset()
558 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
562 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
569 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); in bio_alloc_bioset()
574 bio->bi_pool = bs; in bio_alloc_bioset()
578 mempool_free(p, &bs->bio_pool); in bio_alloc_bioset()
584 * bio_kmalloc - kmalloc a bio
595 * for allocations in the file system I/O path.
620 * bio_truncate - truncate the bio to small size of @new_size
622 * @new_size: new size for truncating the bio
625 * Truncate the bio to new size of @new_size. If bio_op(bio) is
636 if (new_size >= bio->bi_iter.bi_size) in bio_truncate()
647 offset = new_size - done; in bio_truncate()
651 bv.bv_len - offset); in bio_truncate()
666 bio->bi_iter.bi_size = new_size; in bio_truncate()
670 * guard_bio_eod - truncate a BIO to fit the block device
674 * block size is some multiple of the physical sector size.
676 * We'll just truncate the bio to the size of the device, and clear the end of
677 * the buffer head manually. Truly out-of-range accesses will turn into actual
678 * I/O errors, this only handles the "we need to be able to do I/O at the final
683 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); in guard_bio_eod()
693 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
696 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
697 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod()
703 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, in __bio_alloc_cache_prune() argument
706 unsigned int i = 0; in __bio_alloc_cache_prune() local
709 while ((bio = cache->free_list) != NULL) { in __bio_alloc_cache_prune()
710 cache->free_list = bio->bi_next; in __bio_alloc_cache_prune()
711 cache->nr--; in __bio_alloc_cache_prune()
713 if (++i == nr) in __bio_alloc_cache_prune()
716 return i; in __bio_alloc_cache_prune()
719 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, in bio_alloc_cache_prune() argument
722 nr -= __bio_alloc_cache_prune(cache, nr); in bio_alloc_cache_prune()
723 if (!READ_ONCE(cache->free_list)) { in bio_alloc_cache_prune()
724 bio_alloc_irq_cache_splice(cache); in bio_alloc_cache_prune()
725 __bio_alloc_cache_prune(cache, nr); in bio_alloc_cache_prune()
734 if (bs->cache) { in bio_cpu_dead()
735 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); in bio_cpu_dead() local
737 bio_alloc_cache_prune(cache, -1U); in bio_cpu_dead()
746 if (!bs->cache) in bio_alloc_cache_destroy()
749 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bio_alloc_cache_destroy()
751 struct bio_alloc_cache *cache; in bio_alloc_cache_destroy() local
753 cache = per_cpu_ptr(bs->cache, cpu); in bio_alloc_cache_destroy()
754 bio_alloc_cache_prune(cache, -1U); in bio_alloc_cache_destroy()
756 free_percpu(bs->cache); in bio_alloc_cache_destroy()
757 bs->cache = NULL; in bio_alloc_cache_destroy()
762 struct bio_alloc_cache *cache; in bio_put_percpu_cache() local
764 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); in bio_put_percpu_cache()
765 if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) { in bio_put_percpu_cache()
773 if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) { in bio_put_percpu_cache()
774 bio->bi_next = cache->free_list; in bio_put_percpu_cache()
775 bio->bi_bdev = NULL; in bio_put_percpu_cache()
776 cache->free_list = bio; in bio_put_percpu_cache()
777 cache->nr++; in bio_put_percpu_cache()
782 bio->bi_next = cache->free_list_irq; in bio_put_percpu_cache()
783 cache->free_list_irq = bio; in bio_put_percpu_cache()
784 cache->nr_irq++; in bio_put_percpu_cache()
791 * bio_put - release a reference to a bio
801 BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
802 if (!atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
805 if (bio->bi_opf & REQ_ALLOC_CACHE) in bio_put()
815 bio->bi_ioprio = bio_src->bi_ioprio; in __bio_clone()
816 bio->bi_iter = bio_src->bi_iter; in __bio_clone()
818 if (bio->bi_bdev) { in __bio_clone()
819 if (bio->bi_bdev == bio_src->bi_bdev && in __bio_clone()
826 return -ENOMEM; in __bio_clone()
829 return -ENOMEM; in __bio_clone()
834 * bio_alloc_clone - clone a bio that shares the original bio's biovec
850 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
858 bio->bi_io_vec = bio_src->bi_io_vec; in bio_alloc_clone()
865 * bio_init_clone - clone a bio that shares the original bio's biovec
881 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); in bio_init_clone()
890 * bio_full - check if the bio is full
899 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_full()
901 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_full()
909 size_t bv_end = bv->bv_offset + bv->bv_len; in bvec_try_merge_page()
910 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; in bvec_try_merge_page()
917 if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) in bvec_try_merge_page()
924 if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) in bvec_try_merge_page()
928 bv->bv_len += len; in bvec_try_merge_page()
934 * size limit. This is not for normal read/write bios, but for passthrough
942 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; in bvec_try_merge_hw_page()
943 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; in bvec_try_merge_hw_page()
947 if (len > queue_max_segment_size(q) - bv->bv_len) in bvec_try_merge_hw_page()
953 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
975 if (len > max_size - bio->bi_iter.bi_size) in bio_add_hw_page()
978 if (bio->bi_vcnt > 0) { in bio_add_hw_page()
979 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_hw_page()
983 bio->bi_iter.bi_size += len; in bio_add_hw_page()
987 if (bio->bi_vcnt >= in bio_add_hw_page()
988 min(bio->bi_max_vecs, queue_max_segments(q))) in bio_add_hw_page()
995 if (bvec_gap_to_prev(&q->limits, bv, offset)) in bio_add_hw_page()
999 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset); in bio_add_hw_page()
1000 bio->bi_vcnt++; in bio_add_hw_page()
1001 bio->bi_iter.bi_size += len; in bio_add_hw_page()
1006 * bio_add_pc_page - attempt to add page to passthrough bio
1014 * number of reasons, such as the bio being full or target block device
1015 * limitations. The target block device must allow bio's up to PAGE_SIZE,
1030 * bio_add_zone_append_page - attempt to add page to zone-append bio
1037 * for a zone-append request. This can fail for a number of reasons, such as the
1038 * bio being full or the target block device is not a zoned block device or
1039 * other limitations of the target block device. The target block device must
1048 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_zone_append_page()
1054 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) in bio_add_zone_append_page()
1063 * __bio_add_page - add page(s) to a bio in a new segment
1078 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); in __bio_add_page()
1079 bio->bi_iter.bi_size += len; in __bio_add_page()
1080 bio->bi_vcnt++; in __bio_add_page()
1085 * bio_add_page - attempt to add page(s) to bio
1092 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1101 if (bio->bi_iter.bi_size > UINT_MAX - len) in bio_add_page()
1104 if (bio->bi_vcnt > 0 && in bio_add_page()
1105 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], in bio_add_page()
1107 bio->bi_iter.bi_size += len; in bio_add_page()
1111 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_add_page()
1123 __bio_add_page(bio, &folio->page, len, off); in bio_add_folio_nofail()
1127 * bio_add_folio - Attempt to add part of a folio to a bio.
1145 return bio_add_page(bio, &folio->page, len, off) > 0; in bio_add_folio()
1173 size_t size = iov_iter_count(iter); in bio_iov_bvec_set() local
1175 WARN_ON_ONCE(bio->bi_max_vecs); in bio_iov_bvec_set()
1178 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_bvec_set()
1181 size = min(size, max_sectors << SECTOR_SHIFT); in bio_iov_bvec_set()
1184 bio->bi_vcnt = iter->nr_segs; in bio_iov_bvec_set()
1185 bio->bi_io_vec = (struct bio_vec *)iter->bvec; in bio_iov_bvec_set()
1186 bio->bi_iter.bi_bvec_done = iter->iov_offset; in bio_iov_bvec_set()
1187 bio->bi_iter.bi_size = size; in bio_iov_bvec_set()
1196 if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) in bio_iov_add_page()
1197 return -EIO; in bio_iov_add_page()
1199 if (bio->bi_vcnt > 0 && in bio_iov_add_page()
1200 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], in bio_iov_add_page()
1202 bio->bi_iter.bi_size += len; in bio_iov_add_page()
1214 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_iov_add_zone_append_page()
1219 return -EINVAL; in bio_iov_add_zone_append_page()
1228 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1234 * For a multi-segment *iter, this function only adds pages from the next
1235 * non-empty segment of the iov iterator.
1240 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1241 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; in __bio_iov_iter_get_pages()
1242 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; in __bio_iov_iter_get_pages()
1244 ssize_t size, left; in __bio_iov_iter_get_pages() local
1245 unsigned len, i = 0; in __bio_iov_iter_get_pages() local
1255 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); in __bio_iov_iter_get_pages()
1257 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) in __bio_iov_iter_get_pages()
1261 * Each segment in the iov is required to be a block size multiple. in __bio_iov_iter_get_pages()
1264 * result to ensure the bio's total size is correct. The remainder of in __bio_iov_iter_get_pages()
1267 size = iov_iter_extract_pages(iter, &pages, in __bio_iov_iter_get_pages()
1268 UINT_MAX - bio->bi_iter.bi_size, in __bio_iov_iter_get_pages()
1270 if (unlikely(size <= 0)) in __bio_iov_iter_get_pages()
1271 return size ? size : -EFAULT; in __bio_iov_iter_get_pages()
1273 nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); in __bio_iov_iter_get_pages()
1275 if (bio->bi_bdev) { in __bio_iov_iter_get_pages()
1276 size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); in __bio_iov_iter_get_pages()
1278 size -= trim; in __bio_iov_iter_get_pages()
1281 if (unlikely(!size)) { in __bio_iov_iter_get_pages()
1282 ret = -EFAULT; in __bio_iov_iter_get_pages()
1286 for (left = size, i = 0; left > 0; left -= len, i++) { in __bio_iov_iter_get_pages()
1287 struct page *page = pages[i]; in __bio_iov_iter_get_pages()
1289 len = min_t(size_t, PAGE_SIZE - offset, left); in __bio_iov_iter_get_pages()
1303 while (i < nr_pages) in __bio_iov_iter_get_pages()
1304 bio_release_page(bio, pages[i++]); in __bio_iov_iter_get_pages()
1310 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1319 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1320 * completed by a call to ->ki_complete() or returns with an error other than
1321 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1334 return -EIO; in bio_iov_iter_get_pages()
1338 iov_iter_advance(iter, bio->bi_iter.bi_size); in bio_iov_iter_get_pages()
1348 return bio->bi_vcnt ? 0 : ret; in bio_iov_iter_get_pages()
1354 complete(bio->bi_private); in submit_bio_wait_endio()
1358 * submit_bio_wait - submit a bio, and wait until it completes
1359 * @bio: The &struct bio which describes the I/O
1371 bio->bi_bdev->bd_disk->lockdep_map); in submit_bio_wait()
1374 bio->bi_private = &done; in submit_bio_wait()
1375 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
1376 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
1379 /* Prevent hang_check timer from firing at us during very long I/O */ in submit_bio_wait()
1388 return blk_status_to_errno(bio->bi_status); in submit_bio_wait()
1398 bio_advance_iter(bio, &bio->bi_iter, bytes); in __bio_advance()
1405 while (src_iter->bi_size && dst_iter->bi_size) { in bio_copy_data_iter()
1424 * bio_copy_data - copy contents of data buffers from one bio to another
1428 * Stops when it reaches the end of either @src or @dst - that is, copies
1429 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1433 struct bvec_iter src_iter = src->bi_iter; in bio_copy_data()
1434 struct bvec_iter dst_iter = dst->bi_iter; in bio_copy_data()
1446 __free_page(bvec->bv_page); in bio_free_pages()
1452 * for performing direct-IO in BIOs.
1455 * because the required locks are not interrupt-safe. So what we can do is to
1461 * direct-io pins the pages with get_user_pages(). This makes
1488 * have been written out during the direct-IO read. So we take another ref on
1489 * the BIO and re-dirty the pages in process context.
1515 next = bio->bi_private; in bio_dirty_fn()
1537 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1547 * If we're not chaining, then ->__bi_remaining is always 1 and in bio_remaining_done()
1553 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1555 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1564 * bio_endio - end I/O on a bio
1568 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1569 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1573 * using bio_chain(). The ->bi_end_io() function will only be called the
1586 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { in bio_endio()
1587 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); in bio_endio()
1593 * various corner cases will break (like stacking block devices that in bio_endio()
1594 * save/restore bi_end_io) - however, we want to avoid unbounded in bio_endio()
1599 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1607 if (bio->bi_end_io) in bio_endio()
1608 bio->bi_end_io(bio); in bio_endio()
1613 * bio_split - split a bio
1638 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1642 split->bi_iter.bi_size = sectors << 9; in bio_split()
1647 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1657 * bio_trim - trim a bio
1660 * @size: size we want to trim @bio to, in sectors
1665 void bio_trim(struct bio *bio, sector_t offset, sector_t size) in bio_trim() argument
1667 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || in bio_trim()
1668 offset + size > bio_sectors(bio))) in bio_trim()
1671 size <<= 9; in bio_trim()
1672 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1676 bio->bi_iter.bi_size = size; in bio_trim()
1689 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; in biovec_init_pool()
1691 return mempool_init_slab_pool(pool, pool_entries, bp->slab); in biovec_init_pool()
1695 * bioset_exit - exit a bioset initialized with bioset_init()
1697 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1703 if (bs->rescue_workqueue) in bioset_exit()
1704 destroy_workqueue(bs->rescue_workqueue); in bioset_exit()
1705 bs->rescue_workqueue = NULL; in bioset_exit()
1707 mempool_exit(&bs->bio_pool); in bioset_exit()
1708 mempool_exit(&bs->bvec_pool); in bioset_exit()
1711 if (bs->bio_slab) in bioset_exit()
1713 bs->bio_slab = NULL; in bioset_exit()
1718 * bioset_init - Initialize a bio_set
1720 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1743 bs->front_pad = front_pad; in bioset_init()
1745 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); in bioset_init()
1747 bs->back_pad = 0; in bioset_init()
1749 spin_lock_init(&bs->rescue_lock); in bioset_init()
1750 bio_list_init(&bs->rescue_list); in bioset_init()
1751 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); in bioset_init()
1753 bs->bio_slab = bio_find_or_create_slab(bs); in bioset_init()
1754 if (!bs->bio_slab) in bioset_init()
1755 return -ENOMEM; in bioset_init()
1757 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) in bioset_init()
1761 biovec_init_pool(&bs->bvec_pool, pool_size)) in bioset_init()
1765 bs->rescue_workqueue = alloc_workqueue("bioset", in bioset_init()
1767 if (!bs->rescue_workqueue) in bioset_init()
1771 bs->cache = alloc_percpu(struct bio_alloc_cache); in bioset_init()
1772 if (!bs->cache) in bioset_init()
1774 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bioset_init()
1780 return -ENOMEM; in bioset_init()
1786 int i; in init_bio() local
1792 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { in init_bio()
1793 struct biovec_slab *bvs = bvec_slabs + i; in init_bio()
1795 bvs->slab = kmem_cache_create(bvs->name, in init_bio()
1796 bvs->nr_vecs * sizeof(struct bio_vec), 0, in init_bio()
1800 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, in init_bio()