xref: /linux/include/linux/bvec.h (revision 99dfe2d4da67d863ff8f185d1e8033cce28e4c49)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * bvec iterator
4  *
5  * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
6  */
7 #ifndef __LINUX_BVEC_H
8 #define __LINUX_BVEC_H
9 
10 #include <linux/highmem.h>
11 #include <linux/bug.h>
12 #include <linux/errno.h>
13 #include <linux/limits.h>
14 #include <linux/minmax.h>
15 #include <linux/types.h>
16 
17 struct page;
18 
19 /**
20  * struct bio_vec - a contiguous range of physical memory addresses
21  * @bv_page:   First page associated with the address range.
22  * @bv_len:    Number of bytes in the address range.
23  * @bv_offset: Start of the address range relative to the start of @bv_page.
24  *
25  * All pages within a bio_vec starting from @bv_page are contiguous and
26  * can simply be iterated (see bvec_advance()).
27  */
28 struct bio_vec {
29 	struct page	*bv_page;
30 	unsigned int	bv_len;
31 	unsigned int	bv_offset;
32 };
33 
34 /**
35  * bvec_set_page - initialize a bvec based off a struct page
36  * @bv:		bvec to initialize
37  * @page:	page the bvec should point to
38  * @len:	length of the bvec
39  * @offset:	offset into the page
40  */
bvec_set_page(struct bio_vec * bv,struct page * page,unsigned int len,unsigned int offset)41 static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
42 		unsigned int len, unsigned int offset)
43 {
44 	bv->bv_page = page;
45 	bv->bv_len = len;
46 	bv->bv_offset = offset;
47 }
48 
49 /**
50  * bvec_set_folio - initialize a bvec based off a struct folio
51  * @bv:		bvec to initialize
52  * @folio:	folio the bvec should point to
53  * @len:	length of the bvec
54  * @offset:	offset into the folio
55  */
bvec_set_folio(struct bio_vec * bv,struct folio * folio,size_t len,size_t offset)56 static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
57 		size_t len, size_t offset)
58 {
59 	unsigned long nr = offset / PAGE_SIZE;
60 
61 	WARN_ON_ONCE(len > UINT_MAX);
62 	bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
63 }
64 
65 /**
66  * bvec_set_virt - initialize a bvec based on a virtual address
67  * @bv:		bvec to initialize
68  * @vaddr:	virtual address to set the bvec to
69  * @len:	length of the bvec
70  */
bvec_set_virt(struct bio_vec * bv,void * vaddr,unsigned int len)71 static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
72 		unsigned int len)
73 {
74 	bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
75 }
76 
77 struct bvec_iter {
78 	/*
79 	 * Current device address in 512 byte sectors. Only updated by the bio
80 	 * iter wrappers and not the bvec iterator helpers themselves.
81 	 */
82 	sector_t		bi_sector;
83 
84 	/*
85 	 * Remaining size in bytes.
86 	 */
87 	unsigned int		bi_size;
88 
89 	/*
90 	 * Current index into the bvec array. This indexes into `bi_io_vec` when
91 	 * iterating a bvec array that is part of a `bio`.
92 	 */
93 	unsigned int		bi_idx;
94 
95 	/*
96 	 * Current offset in the bvec entry pointed to by `bi_idx`.
97 	 */
98 	unsigned int		bi_bvec_done;
99 } __packed __aligned(4);
100 
101 struct bvec_iter_all {
102 	struct bio_vec	bv;
103 	int		idx;
104 	unsigned	done;
105 };
106 
107 /*
108  * various member access, note that bio_data should of course not be used
109  * on highmem page vectors
110  */
111 #define __bvec_iter_bvec(bvec, iter)	(&(bvec)[(iter).bi_idx])
112 
113 /* multi-page (mp_bvec) helpers */
114 #define mp_bvec_iter_page(bvec, iter)				\
115 	(__bvec_iter_bvec((bvec), (iter))->bv_page)
116 
117 #define mp_bvec_iter_len(bvec, iter)				\
118 	min((iter).bi_size,					\
119 	    __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
120 
121 #define mp_bvec_iter_offset(bvec, iter)				\
122 	(__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
123 
124 #define mp_bvec_iter_page_idx(bvec, iter)			\
125 	(mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE)
126 
127 #define mp_bvec_iter_bvec(bvec, iter)				\
128 ((struct bio_vec) {						\
129 	.bv_page	= mp_bvec_iter_page((bvec), (iter)),	\
130 	.bv_len		= mp_bvec_iter_len((bvec), (iter)),	\
131 	.bv_offset	= mp_bvec_iter_offset((bvec), (iter)),	\
132 })
133 
134 /* For building single-page bvec in flight */
135  #define bvec_iter_offset(bvec, iter)				\
136 	(mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE)
137 
138 #define bvec_iter_len(bvec, iter)				\
139 	min_t(unsigned, mp_bvec_iter_len((bvec), (iter)),		\
140 	      PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
141 
142 #define bvec_iter_page(bvec, iter)				\
143 	(mp_bvec_iter_page((bvec), (iter)) +			\
144 	 mp_bvec_iter_page_idx((bvec), (iter)))
145 
146 #define bvec_iter_bvec(bvec, iter)				\
147 ((struct bio_vec) {						\
148 	.bv_page	= bvec_iter_page((bvec), (iter)),	\
149 	.bv_len		= bvec_iter_len((bvec), (iter)),	\
150 	.bv_offset	= bvec_iter_offset((bvec), (iter)),	\
151 })
152 
bvec_iter_advance(const struct bio_vec * bv,struct bvec_iter * iter,unsigned bytes)153 static inline bool bvec_iter_advance(const struct bio_vec *bv,
154 		struct bvec_iter *iter, unsigned bytes)
155 {
156 	unsigned int idx = iter->bi_idx;
157 
158 	if (WARN_ONCE(bytes > iter->bi_size,
159 		     "Attempted to advance past end of bvec iter\n")) {
160 		iter->bi_size = 0;
161 		return false;
162 	}
163 
164 	iter->bi_size -= bytes;
165 	bytes += iter->bi_bvec_done;
166 
167 	while (bytes && bytes >= bv[idx].bv_len) {
168 		bytes -= bv[idx].bv_len;
169 		idx++;
170 	}
171 
172 	iter->bi_idx = idx;
173 	iter->bi_bvec_done = bytes;
174 	return true;
175 }
176 
177 /*
178  * A simpler version of bvec_iter_advance(), @bytes should not span
179  * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
180  */
bvec_iter_advance_single(const struct bio_vec * bv,struct bvec_iter * iter,unsigned int bytes)181 static inline void bvec_iter_advance_single(const struct bio_vec *bv,
182 				struct bvec_iter *iter, unsigned int bytes)
183 {
184 	unsigned int done = iter->bi_bvec_done + bytes;
185 
186 	if (done == bv[iter->bi_idx].bv_len) {
187 		done = 0;
188 		iter->bi_idx++;
189 	}
190 	iter->bi_bvec_done = done;
191 	iter->bi_size -= bytes;
192 }
193 
194 #define for_each_bvec(bvl, bio_vec, iter, start)			\
195 	for (iter = (start);						\
196 	     (iter).bi_size &&						\
197 		((bvl = bvec_iter_bvec((bio_vec), (iter))), 1);	\
198 	     bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
199 
200 #define for_each_mp_bvec(bvl, bio_vec, iter, start)			\
201 	for (iter = (start);						\
202 	     (iter).bi_size &&						\
203 		((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1);	\
204 	     bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
205 
206 /* for iterating one bio from start to end */
207 #define BVEC_ITER_ALL_INIT (struct bvec_iter)				\
208 {									\
209 	.bi_sector	= 0,						\
210 	.bi_size	= UINT_MAX,					\
211 	.bi_idx		= 0,						\
212 	.bi_bvec_done	= 0,						\
213 }
214 
bvec_init_iter_all(struct bvec_iter_all * iter_all)215 static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
216 {
217 	iter_all->done = 0;
218 	iter_all->idx = 0;
219 
220 	return &iter_all->bv;
221 }
222 
bvec_advance(const struct bio_vec * bvec,struct bvec_iter_all * iter_all)223 static inline void bvec_advance(const struct bio_vec *bvec,
224 				struct bvec_iter_all *iter_all)
225 {
226 	struct bio_vec *bv = &iter_all->bv;
227 
228 	if (iter_all->done) {
229 		bv->bv_page++;
230 		bv->bv_offset = 0;
231 	} else {
232 		bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
233 		bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
234 	}
235 	bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
236 			   bvec->bv_len - iter_all->done);
237 	iter_all->done += bv->bv_len;
238 
239 	if (iter_all->done == bvec->bv_len) {
240 		iter_all->idx++;
241 		iter_all->done = 0;
242 	}
243 }
244 
245 /**
246  * bvec_kmap_local - map a bvec into the kernel virtual address space
247  * @bvec: bvec to map
248  *
249  * Must be called on single-page bvecs only.  Call kunmap_local on the returned
250  * address to unmap.
251  */
bvec_kmap_local(struct bio_vec * bvec)252 static inline void *bvec_kmap_local(struct bio_vec *bvec)
253 {
254 	return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
255 }
256 
257 /**
258  * memcpy_from_bvec - copy data from a bvec
259  * @bvec: bvec to copy from
260  *
261  * Must be called on single-page bvecs only.
262  */
memcpy_from_bvec(char * to,struct bio_vec * bvec)263 static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
264 {
265 	memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
266 }
267 
268 /**
269  * memcpy_to_bvec - copy data to a bvec
270  * @bvec: bvec to copy to
271  *
272  * Must be called on single-page bvecs only.
273  */
memcpy_to_bvec(struct bio_vec * bvec,const char * from)274 static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
275 {
276 	memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
277 }
278 
279 /**
280  * memzero_bvec - zero all data in a bvec
281  * @bvec: bvec to zero
282  *
283  * Must be called on single-page bvecs only.
284  */
memzero_bvec(struct bio_vec * bvec)285 static inline void memzero_bvec(struct bio_vec *bvec)
286 {
287 	memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
288 }
289 
290 /**
291  * bvec_virt - return the virtual address for a bvec
292  * @bvec: bvec to return the virtual address for
293  *
294  * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
295  */
bvec_virt(struct bio_vec * bvec)296 static inline void *bvec_virt(struct bio_vec *bvec)
297 {
298 	WARN_ON_ONCE(PageHighMem(bvec->bv_page));
299 	return page_address(bvec->bv_page) + bvec->bv_offset;
300 }
301 
302 /**
303  * bvec_phys - return the physical address for a bvec
304  * @bvec: bvec to return the physical address for
305  */
bvec_phys(const struct bio_vec * bvec)306 static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
307 {
308 	return page_to_phys(bvec->bv_page) + bvec->bv_offset;
309 }
310 
311 #endif /* __LINUX_BVEC_H */
312