1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2023 Christoph Hellwig.
5 */
6 #include <linux/bio-integrity.h>
7 #include <linux/iomap.h>
8 #include <linux/pagemap.h>
9 #include "internal.h"
10 #include "trace.h"
11
12 static DEFINE_SPINLOCK(failed_read_lock);
13 static struct bio_list failed_read_list = BIO_EMPTY_LIST;
14
__iomap_read_end_io(struct bio * bio,int error)15 static u32 __iomap_read_end_io(struct bio *bio, int error)
16 {
17 struct folio_iter fi;
18 u32 folio_count = 0;
19
20 bio_for_each_folio_all(fi, bio) {
21 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
22 folio_count++;
23 }
24 if (bio_integrity(bio))
25 fs_bio_integrity_free(bio);
26 bio_put(bio);
27 return folio_count;
28 }
29
30 static void
iomap_fail_reads(struct work_struct * work)31 iomap_fail_reads(
32 struct work_struct *work)
33 {
34 struct bio *bio;
35 struct bio_list tmp = BIO_EMPTY_LIST;
36 unsigned long flags;
37
38 spin_lock_irqsave(&failed_read_lock, flags);
39 bio_list_merge_init(&tmp, &failed_read_list);
40 spin_unlock_irqrestore(&failed_read_lock, flags);
41
42 while ((bio = bio_list_pop(&tmp)) != NULL) {
43 __iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
44 cond_resched();
45 }
46 }
47
48 static DECLARE_WORK(failed_read_work, iomap_fail_reads);
49
iomap_fail_buffered_read(struct bio * bio)50 static void iomap_fail_buffered_read(struct bio *bio)
51 {
52 unsigned long flags;
53
54 /*
55 * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
56 * in the fserror code. The caller no longer owns the bio reference
57 * after the spinlock drops.
58 */
59 spin_lock_irqsave(&failed_read_lock, flags);
60 if (bio_list_empty(&failed_read_list))
61 WARN_ON_ONCE(!schedule_work(&failed_read_work));
62 bio_list_add(&failed_read_list, bio);
63 spin_unlock_irqrestore(&failed_read_lock, flags);
64 }
65
iomap_read_end_io(struct bio * bio)66 static void iomap_read_end_io(struct bio *bio)
67 {
68 if (bio->bi_status) {
69 iomap_fail_buffered_read(bio);
70 return;
71 }
72
73 __iomap_read_end_io(bio, 0);
74 }
75
iomap_finish_ioend_buffered_read(struct iomap_ioend * ioend)76 u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
77 {
78 return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
79 }
80
iomap_bio_submit_read(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx)81 static void iomap_bio_submit_read(const struct iomap_iter *iter,
82 struct iomap_read_folio_ctx *ctx)
83 {
84 struct bio *bio = ctx->read_ctx;
85
86 if (iter->iomap.flags & IOMAP_F_INTEGRITY)
87 fs_bio_integrity_alloc(bio);
88 submit_bio(bio);
89 }
90
iomap_read_bio_set(struct iomap_read_folio_ctx * ctx)91 static struct bio_set *iomap_read_bio_set(struct iomap_read_folio_ctx *ctx)
92 {
93 if (ctx->ops && ctx->ops->bio_set)
94 return ctx->ops->bio_set;
95 return &fs_bio_set;
96 }
97
iomap_read_alloc_bio(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t plen)98 static void iomap_read_alloc_bio(const struct iomap_iter *iter,
99 struct iomap_read_folio_ctx *ctx, size_t plen)
100 {
101 const struct iomap *iomap = &iter->iomap;
102 unsigned int nr_vecs = DIV_ROUND_UP(iomap_length(iter), PAGE_SIZE);
103 struct bio_set *bio_set = iomap_read_bio_set(ctx);
104 struct folio *folio = ctx->cur_folio;
105 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
106 gfp_t orig_gfp = gfp;
107 struct bio *bio;
108
109 /* Submit the existing range if there was one. */
110 if (ctx->read_ctx)
111 ctx->ops->submit_read(iter, ctx);
112
113 /* Same as readahead_gfp_mask: */
114 if (ctx->rac)
115 gfp |= __GFP_NORETRY | __GFP_NOWARN;
116
117 /*
118 * If the bio_alloc fails, try it again for a single page to avoid
119 * having to deal with partial page reads. This emulates what
120 * do_mpage_read_folio does.
121 */
122 bio = bio_alloc_bioset(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
123 gfp, bio_set);
124 if (!bio)
125 bio = bio_alloc_bioset(iomap->bdev, 1, REQ_OP_READ, orig_gfp,
126 bio_set);
127 if (ctx->rac)
128 bio->bi_opf |= REQ_RAHEAD;
129 bio->bi_iter.bi_sector = iomap_sector(iomap, iter->pos);
130 bio->bi_end_io = iomap_read_end_io;
131 bio_add_folio_nofail(bio, folio, plen,
132 offset_in_folio(folio, iter->pos));
133 ctx->read_ctx = bio;
134 ctx->read_ctx_file_offset = iter->pos;
135 }
136
iomap_bio_read_folio_range(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t plen)137 int iomap_bio_read_folio_range(const struct iomap_iter *iter,
138 struct iomap_read_folio_ctx *ctx, size_t plen)
139 {
140 struct folio *folio = ctx->cur_folio;
141 struct bio *bio = ctx->read_ctx;
142
143 if (!bio ||
144 bio_end_sector(bio) != iomap_sector(&iter->iomap, iter->pos) ||
145 bio->bi_iter.bi_size > iomap_max_bio_size(&iter->iomap) - plen ||
146 !bio_add_folio(bio, folio, plen, offset_in_folio(folio, iter->pos)))
147 iomap_read_alloc_bio(iter, ctx, plen);
148 return 0;
149 }
150 EXPORT_SYMBOL_GPL(iomap_bio_read_folio_range);
151
152 const struct iomap_read_ops iomap_bio_read_ops = {
153 .read_folio_range = iomap_bio_read_folio_range,
154 .submit_read = iomap_bio_submit_read,
155 };
156 EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
157
iomap_bio_read_folio_range_sync(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t len)158 int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
159 struct folio *folio, loff_t pos, size_t len)
160 {
161 const struct iomap *srcmap = iomap_iter_srcmap(iter);
162 sector_t sector = iomap_sector(srcmap, pos);
163 struct bio_vec bvec;
164 struct bio bio;
165 int error;
166
167 bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
168 bio.bi_iter.bi_sector = sector;
169 bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
170 if (srcmap->flags & IOMAP_F_INTEGRITY)
171 fs_bio_integrity_alloc(&bio);
172 error = submit_bio_wait(&bio);
173 if (srcmap->flags & IOMAP_F_INTEGRITY) {
174 if (!error)
175 error = fs_bio_integrity_verify(&bio, sector, len);
176 fs_bio_integrity_free(&bio);
177 }
178 return error;
179 }
180