xref: /linux/fs/iomap/bio.c (revision d1384f70b2e3162786bc73b8f86c27417803bd57)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (C) 2016-2023 Christoph Hellwig.
5  */
6 #include <linux/iomap.h>
7 #include <linux/pagemap.h>
8 #include "internal.h"
9 #include "trace.h"
10 
11 static DEFINE_SPINLOCK(failed_read_lock);
12 static struct bio_list failed_read_list = BIO_EMPTY_LIST;
13 
__iomap_read_end_io(struct bio * bio)14 static void __iomap_read_end_io(struct bio *bio)
15 {
16 	int error = blk_status_to_errno(bio->bi_status);
17 	struct folio_iter fi;
18 
19 	bio_for_each_folio_all(fi, bio)
20 		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
21 	bio_put(bio);
22 }
23 
24 static void
iomap_fail_reads(struct work_struct * work)25 iomap_fail_reads(
26 	struct work_struct	*work)
27 {
28 	struct bio		*bio;
29 	struct bio_list		tmp = BIO_EMPTY_LIST;
30 	unsigned long		flags;
31 
32 	spin_lock_irqsave(&failed_read_lock, flags);
33 	bio_list_merge_init(&tmp, &failed_read_list);
34 	spin_unlock_irqrestore(&failed_read_lock, flags);
35 
36 	while ((bio = bio_list_pop(&tmp)) != NULL) {
37 		__iomap_read_end_io(bio);
38 		cond_resched();
39 	}
40 }
41 
42 static DECLARE_WORK(failed_read_work, iomap_fail_reads);
43 
iomap_fail_buffered_read(struct bio * bio)44 static void iomap_fail_buffered_read(struct bio *bio)
45 {
46 	unsigned long flags;
47 
48 	/*
49 	 * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
50 	 * in the fserror code.  The caller no longer owns the bio reference
51 	 * after the spinlock drops.
52 	 */
53 	spin_lock_irqsave(&failed_read_lock, flags);
54 	if (bio_list_empty(&failed_read_list))
55 		WARN_ON_ONCE(!schedule_work(&failed_read_work));
56 	bio_list_add(&failed_read_list, bio);
57 	spin_unlock_irqrestore(&failed_read_lock, flags);
58 }
59 
iomap_read_end_io(struct bio * bio)60 static void iomap_read_end_io(struct bio *bio)
61 {
62 	if (bio->bi_status) {
63 		iomap_fail_buffered_read(bio);
64 		return;
65 	}
66 
67 	__iomap_read_end_io(bio);
68 }
69 
iomap_bio_submit_read(struct iomap_read_folio_ctx * ctx)70 static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
71 {
72 	struct bio *bio = ctx->read_ctx;
73 
74 	if (bio)
75 		submit_bio(bio);
76 }
77 
iomap_bio_read_folio_range(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t plen)78 static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
79 		struct iomap_read_folio_ctx *ctx, size_t plen)
80 {
81 	struct folio *folio = ctx->cur_folio;
82 	const struct iomap *iomap = &iter->iomap;
83 	loff_t pos = iter->pos;
84 	size_t poff = offset_in_folio(folio, pos);
85 	loff_t length = iomap_length(iter);
86 	sector_t sector;
87 	struct bio *bio = ctx->read_ctx;
88 
89 	sector = iomap_sector(iomap, pos);
90 	if (!bio || bio_end_sector(bio) != sector ||
91 	    !bio_add_folio(bio, folio, plen, poff)) {
92 		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
93 		gfp_t orig_gfp = gfp;
94 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
95 
96 		if (bio)
97 			submit_bio(bio);
98 
99 		if (ctx->rac) /* same as readahead_gfp_mask */
100 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
101 		bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
102 				     gfp);
103 		/*
104 		 * If the bio_alloc fails, try it again for a single page to
105 		 * avoid having to deal with partial page reads.  This emulates
106 		 * what do_mpage_read_folio does.
107 		 */
108 		if (!bio)
109 			bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
110 		if (ctx->rac)
111 			bio->bi_opf |= REQ_RAHEAD;
112 		bio->bi_iter.bi_sector = sector;
113 		bio->bi_end_io = iomap_read_end_io;
114 		bio_add_folio_nofail(bio, folio, plen, poff);
115 		ctx->read_ctx = bio;
116 	}
117 	return 0;
118 }
119 
120 const struct iomap_read_ops iomap_bio_read_ops = {
121 	.read_folio_range = iomap_bio_read_folio_range,
122 	.submit_read = iomap_bio_submit_read,
123 };
124 EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
125 
iomap_bio_read_folio_range_sync(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t len)126 int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
127 		struct folio *folio, loff_t pos, size_t len)
128 {
129 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
130 	struct bio_vec bvec;
131 	struct bio bio;
132 
133 	bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
134 	bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
135 	bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
136 	return submit_bio_wait(&bio);
137 }
138