1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_READ_H
3 #define _BCACHEFS_IO_READ_H
4 
5 #include "bkey_buf.h"
6 #include "btree_iter.h"
7 #include "reflink.h"
8 
9 struct bch_read_bio {
10 	struct bch_fs		*c;
11 	u64			start_time;
12 	u64			submit_time;
13 
14 	/*
15 	 * Reads will often have to be split, and if the extent being read from
16 	 * was checksummed or compressed we'll also have to allocate bounce
17 	 * buffers and copy the data back into the original bio.
18 	 *
19 	 * If we didn't have to split, we have to save and restore the original
20 	 * bi_end_io - @split below indicates which:
21 	 */
22 	union {
23 	struct bch_read_bio	*parent;
24 	bio_end_io_t		*end_io;
25 	};
26 
27 	/*
28 	 * Saved copy of bio->bi_iter, from submission time - allows us to
29 	 * resubmit on IO error, and also to copy data back to the original bio
30 	 * when we're bouncing:
31 	 */
32 	struct bvec_iter	bvec_iter;
33 
34 	unsigned		offset_into_extent;
35 
36 	u16			flags;
37 	union {
38 	struct {
39 	u16			data_update:1,
40 				promote:1,
41 				bounce:1,
42 				split:1,
43 				have_ioref:1,
44 				narrow_crcs:1,
45 				saw_error:1,
46 				context:2;
47 	};
48 	u16			_state;
49 	};
50 	s16			ret;
51 
52 	struct extent_ptr_decoded pick;
53 
54 	/*
55 	 * pos we read from - different from data_pos for indirect extents:
56 	 */
57 	u32			subvol;
58 	struct bpos		read_pos;
59 
60 	/*
61 	 * start pos of data we read (may not be pos of data we want) - for
62 	 * promote, narrow extents paths:
63 	 */
64 	enum btree_id		data_btree;
65 	struct bpos		data_pos;
66 	struct bversion		version;
67 
68 	struct bch_io_opts	opts;
69 
70 	struct work_struct	work;
71 
72 	struct bio		bio;
73 };
74 
75 #define to_rbio(_bio)		container_of((_bio), struct bch_read_bio, bio)
76 
77 struct bch_devs_mask;
78 struct cache_promote_op;
79 struct extent_ptr_decoded;
80 
bch2_read_indirect_extent(struct btree_trans * trans,enum btree_id * data_btree,s64 * offset_into_extent,struct bkey_buf * extent)81 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
82 					    enum btree_id *data_btree,
83 					    s64 *offset_into_extent,
84 					    struct bkey_buf *extent)
85 {
86 	if (extent->k->k.type != KEY_TYPE_reflink_p)
87 		return 0;
88 
89 	*data_btree = BTREE_ID_reflink;
90 	struct btree_iter iter;
91 	struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter,
92 						offset_into_extent,
93 						bkey_i_to_s_c_reflink_p(extent->k),
94 						true, 0);
95 	int ret = bkey_err(k);
96 	if (ret)
97 		return ret;
98 
99 	if (bkey_deleted(k.k)) {
100 		bch2_trans_iter_exit(trans, &iter);
101 		return -BCH_ERR_missing_indirect_extent;
102 	}
103 
104 	bch2_bkey_buf_reassemble(extent, trans->c, k);
105 	bch2_trans_iter_exit(trans, &iter);
106 	return 0;
107 }
108 
109 #define BCH_READ_FLAGS()		\
110 	x(retry_if_stale)		\
111 	x(may_promote)			\
112 	x(user_mapped)			\
113 	x(last_fragment)		\
114 	x(must_bounce)			\
115 	x(must_clone)			\
116 	x(in_retry)
117 
118 enum __bch_read_flags {
119 #define x(n)	__BCH_READ_##n,
120 	BCH_READ_FLAGS()
121 #undef x
122 };
123 
124 enum bch_read_flags {
125 #define x(n)	BCH_READ_##n = BIT(__BCH_READ_##n),
126 	BCH_READ_FLAGS()
127 #undef x
128 };
129 
130 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
131 		       struct bvec_iter, struct bpos, enum btree_id,
132 		       struct bkey_s_c, unsigned,
133 		       struct bch_io_failures *, unsigned, int);
134 
bch2_read_extent(struct btree_trans * trans,struct bch_read_bio * rbio,struct bpos read_pos,enum btree_id data_btree,struct bkey_s_c k,unsigned offset_into_extent,unsigned flags)135 static inline void bch2_read_extent(struct btree_trans *trans,
136 			struct bch_read_bio *rbio, struct bpos read_pos,
137 			enum btree_id data_btree, struct bkey_s_c k,
138 			unsigned offset_into_extent, unsigned flags)
139 {
140 	int ret = __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
141 				     data_btree, k, offset_into_extent, NULL, flags, -1);
142 	/* __bch2_read_extent only returns errors if BCH_READ_in_retry is set */
143 	WARN(ret, "unhandled error from __bch2_read_extent()");
144 }
145 
146 int __bch2_read(struct btree_trans *, struct bch_read_bio *, struct bvec_iter,
147 		subvol_inum, struct bch_io_failures *, unsigned flags);
148 
bch2_read(struct bch_fs * c,struct bch_read_bio * rbio,subvol_inum inum)149 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
150 			     subvol_inum inum)
151 {
152 	BUG_ON(rbio->_state);
153 
154 	rbio->subvol = inum.subvol;
155 
156 	bch2_trans_run(c,
157 		__bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL,
158 			    BCH_READ_retry_if_stale|
159 			    BCH_READ_may_promote|
160 			    BCH_READ_user_mapped));
161 }
162 
rbio_init_fragment(struct bio * bio,struct bch_read_bio * orig)163 static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio,
164 						      struct bch_read_bio *orig)
165 {
166 	struct bch_read_bio *rbio = to_rbio(bio);
167 
168 	rbio->c			= orig->c;
169 	rbio->_state		= 0;
170 	rbio->flags		= 0;
171 	rbio->ret		= 0;
172 	rbio->split		= true;
173 	rbio->parent		= orig;
174 	rbio->opts		= orig->opts;
175 	return rbio;
176 }
177 
rbio_init(struct bio * bio,struct bch_fs * c,struct bch_io_opts opts,bio_end_io_t end_io)178 static inline struct bch_read_bio *rbio_init(struct bio *bio,
179 					     struct bch_fs *c,
180 					     struct bch_io_opts opts,
181 					     bio_end_io_t end_io)
182 {
183 	struct bch_read_bio *rbio = to_rbio(bio);
184 
185 	rbio->start_time	= local_clock();
186 	rbio->c			= c;
187 	rbio->_state		= 0;
188 	rbio->flags		= 0;
189 	rbio->ret		= 0;
190 	rbio->opts		= opts;
191 	rbio->bio.bi_end_io	= end_io;
192 	return rbio;
193 }
194 
195 void bch2_fs_io_read_exit(struct bch_fs *);
196 int bch2_fs_io_read_init(struct bch_fs *);
197 
198 #endif /* _BCACHEFS_IO_READ_H */
199