1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2023 Christoph Hellwig.
5 */
6 #include <linux/module.h>
7 #include <linux/compiler.h>
8 #include <linux/fs.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/bio.h>
17 #include <linux/sched/signal.h>
18 #include <linux/migrate.h>
19 #include "internal.h"
20 #include "trace.h"
21
22 #include "../internal.h"
23
24 /*
25 * Structure allocated for each folio to track per-block uptodate, dirty state
26 * and I/O completions.
27 */
28 struct iomap_folio_state {
29 spinlock_t state_lock;
30 unsigned int read_bytes_pending;
31 atomic_t write_bytes_pending;
32
33 /*
34 * Each block has two bits in this bitmap:
35 * Bits [0..blocks_per_folio) has the uptodate status.
36 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
37 */
38 unsigned long state[];
39 };
40
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)41 static inline bool ifs_is_fully_uptodate(struct folio *folio,
42 struct iomap_folio_state *ifs)
43 {
44 struct inode *inode = folio->mapping->host;
45
46 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
47 }
48
ifs_block_is_uptodate(struct iomap_folio_state * ifs,unsigned int block)49 static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
50 unsigned int block)
51 {
52 return test_bit(block, ifs->state);
53 }
54
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)55 static bool ifs_set_range_uptodate(struct folio *folio,
56 struct iomap_folio_state *ifs, size_t off, size_t len)
57 {
58 struct inode *inode = folio->mapping->host;
59 unsigned int first_blk = off >> inode->i_blkbits;
60 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
61 unsigned int nr_blks = last_blk - first_blk + 1;
62
63 bitmap_set(ifs->state, first_blk, nr_blks);
64 return ifs_is_fully_uptodate(folio, ifs);
65 }
66
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)67 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
68 size_t len)
69 {
70 struct iomap_folio_state *ifs = folio->private;
71 unsigned long flags;
72 bool uptodate = true;
73
74 if (ifs) {
75 spin_lock_irqsave(&ifs->state_lock, flags);
76 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
77 spin_unlock_irqrestore(&ifs->state_lock, flags);
78 }
79
80 if (uptodate)
81 folio_mark_uptodate(folio);
82 }
83
ifs_block_is_dirty(struct folio * folio,struct iomap_folio_state * ifs,int block)84 static inline bool ifs_block_is_dirty(struct folio *folio,
85 struct iomap_folio_state *ifs, int block)
86 {
87 struct inode *inode = folio->mapping->host;
88 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
89
90 return test_bit(block + blks_per_folio, ifs->state);
91 }
92
ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end)93 static unsigned ifs_find_dirty_range(struct folio *folio,
94 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
95 {
96 struct inode *inode = folio->mapping->host;
97 unsigned start_blk =
98 offset_in_folio(folio, *range_start) >> inode->i_blkbits;
99 unsigned end_blk = min_not_zero(
100 offset_in_folio(folio, range_end) >> inode->i_blkbits,
101 i_blocks_per_folio(inode, folio));
102 unsigned nblks = 1;
103
104 while (!ifs_block_is_dirty(folio, ifs, start_blk))
105 if (++start_blk == end_blk)
106 return 0;
107
108 while (start_blk + nblks < end_blk) {
109 if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks))
110 break;
111 nblks++;
112 }
113
114 *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
115 return nblks << inode->i_blkbits;
116 }
117
iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end)118 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
119 u64 range_end)
120 {
121 struct iomap_folio_state *ifs = folio->private;
122
123 if (*range_start >= range_end)
124 return 0;
125
126 if (ifs)
127 return ifs_find_dirty_range(folio, ifs, range_start, range_end);
128 return range_end - *range_start;
129 }
130
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)131 static void ifs_clear_range_dirty(struct folio *folio,
132 struct iomap_folio_state *ifs, size_t off, size_t len)
133 {
134 struct inode *inode = folio->mapping->host;
135 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
136 unsigned int first_blk = (off >> inode->i_blkbits);
137 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
138 unsigned int nr_blks = last_blk - first_blk + 1;
139 unsigned long flags;
140
141 spin_lock_irqsave(&ifs->state_lock, flags);
142 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
143 spin_unlock_irqrestore(&ifs->state_lock, flags);
144 }
145
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)146 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
147 {
148 struct iomap_folio_state *ifs = folio->private;
149
150 if (ifs)
151 ifs_clear_range_dirty(folio, ifs, off, len);
152 }
153
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)154 static void ifs_set_range_dirty(struct folio *folio,
155 struct iomap_folio_state *ifs, size_t off, size_t len)
156 {
157 struct inode *inode = folio->mapping->host;
158 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
159 unsigned int first_blk = (off >> inode->i_blkbits);
160 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
161 unsigned int nr_blks = last_blk - first_blk + 1;
162 unsigned long flags;
163
164 spin_lock_irqsave(&ifs->state_lock, flags);
165 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
166 spin_unlock_irqrestore(&ifs->state_lock, flags);
167 }
168
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)169 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
170 {
171 struct iomap_folio_state *ifs = folio->private;
172
173 if (ifs)
174 ifs_set_range_dirty(folio, ifs, off, len);
175 }
176
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)177 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
178 struct folio *folio, unsigned int flags)
179 {
180 struct iomap_folio_state *ifs = folio->private;
181 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
182 gfp_t gfp;
183
184 if (ifs || nr_blocks <= 1)
185 return ifs;
186
187 if (flags & IOMAP_NOWAIT)
188 gfp = GFP_NOWAIT;
189 else
190 gfp = GFP_NOFS | __GFP_NOFAIL;
191
192 /*
193 * ifs->state tracks two sets of state flags when the
194 * filesystem block size is smaller than the folio size.
195 * The first state tracks per-block uptodate and the
196 * second tracks per-block dirty state.
197 */
198 ifs = kzalloc(struct_size(ifs, state,
199 BITS_TO_LONGS(2 * nr_blocks)), gfp);
200 if (!ifs)
201 return ifs;
202
203 spin_lock_init(&ifs->state_lock);
204 if (folio_test_uptodate(folio))
205 bitmap_set(ifs->state, 0, nr_blocks);
206 if (folio_test_dirty(folio))
207 bitmap_set(ifs->state, nr_blocks, nr_blocks);
208 folio_attach_private(folio, ifs);
209
210 return ifs;
211 }
212
ifs_free(struct folio * folio)213 static void ifs_free(struct folio *folio)
214 {
215 struct iomap_folio_state *ifs = folio_detach_private(folio);
216
217 if (!ifs)
218 return;
219 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
220 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
221 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
222 folio_test_uptodate(folio));
223 kfree(ifs);
224 }
225
226 /*
227 * Calculate the range inside the folio that we actually need to read.
228 */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)229 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
230 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
231 {
232 struct iomap_folio_state *ifs = folio->private;
233 loff_t orig_pos = *pos;
234 loff_t isize = i_size_read(inode);
235 unsigned block_bits = inode->i_blkbits;
236 unsigned block_size = (1 << block_bits);
237 size_t poff = offset_in_folio(folio, *pos);
238 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
239 size_t orig_plen = plen;
240 unsigned first = poff >> block_bits;
241 unsigned last = (poff + plen - 1) >> block_bits;
242
243 /*
244 * If the block size is smaller than the page size, we need to check the
245 * per-block uptodate status and adjust the offset and length if needed
246 * to avoid reading in already uptodate ranges.
247 */
248 if (ifs) {
249 unsigned int i;
250
251 /* move forward for each leading block marked uptodate */
252 for (i = first; i <= last; i++) {
253 if (!ifs_block_is_uptodate(ifs, i))
254 break;
255 *pos += block_size;
256 poff += block_size;
257 plen -= block_size;
258 first++;
259 }
260
261 /* truncate len if we find any trailing uptodate block(s) */
262 while (++i <= last) {
263 if (ifs_block_is_uptodate(ifs, i)) {
264 plen -= (last - i + 1) * block_size;
265 last = i - 1;
266 break;
267 }
268 }
269 }
270
271 /*
272 * If the extent spans the block that contains the i_size, we need to
273 * handle both halves separately so that we properly zero data in the
274 * page cache for blocks that are entirely outside of i_size.
275 */
276 if (orig_pos <= isize && orig_pos + orig_plen > isize) {
277 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
278
279 if (first <= end && last > end)
280 plen -= (last - end) * block_size;
281 }
282
283 *offp = poff;
284 *lenp = plen;
285 }
286
iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error)287 static void iomap_finish_folio_read(struct folio *folio, size_t off,
288 size_t len, int error)
289 {
290 struct iomap_folio_state *ifs = folio->private;
291 bool uptodate = !error;
292 bool finished = true;
293
294 if (ifs) {
295 unsigned long flags;
296
297 spin_lock_irqsave(&ifs->state_lock, flags);
298 if (!error)
299 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
300 ifs->read_bytes_pending -= len;
301 finished = !ifs->read_bytes_pending;
302 spin_unlock_irqrestore(&ifs->state_lock, flags);
303 }
304
305 if (finished)
306 folio_end_read(folio, uptodate);
307 }
308
iomap_read_end_io(struct bio * bio)309 static void iomap_read_end_io(struct bio *bio)
310 {
311 int error = blk_status_to_errno(bio->bi_status);
312 struct folio_iter fi;
313
314 bio_for_each_folio_all(fi, bio)
315 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
316 bio_put(bio);
317 }
318
319 struct iomap_readpage_ctx {
320 struct folio *cur_folio;
321 bool cur_folio_in_bio;
322 struct bio *bio;
323 struct readahead_control *rac;
324 };
325
326 /**
327 * iomap_read_inline_data - copy inline data into the page cache
328 * @iter: iteration structure
329 * @folio: folio to copy to
330 *
331 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
332 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
333 * Returns zero for success to complete the read, or the usual negative errno.
334 */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)335 static int iomap_read_inline_data(const struct iomap_iter *iter,
336 struct folio *folio)
337 {
338 const struct iomap *iomap = iomap_iter_srcmap(iter);
339 size_t size = i_size_read(iter->inode) - iomap->offset;
340 size_t offset = offset_in_folio(folio, iomap->offset);
341
342 if (folio_test_uptodate(folio))
343 return 0;
344
345 if (WARN_ON_ONCE(size > iomap->length))
346 return -EIO;
347 if (offset > 0)
348 ifs_alloc(iter->inode, folio, iter->flags);
349
350 folio_fill_tail(folio, offset, iomap->inline_data, size);
351 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
352 return 0;
353 }
354
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)355 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
356 loff_t pos)
357 {
358 const struct iomap *srcmap = iomap_iter_srcmap(iter);
359
360 return srcmap->type != IOMAP_MAPPED ||
361 (srcmap->flags & IOMAP_F_NEW) ||
362 pos >= i_size_read(iter->inode);
363 }
364
iomap_readpage_iter(struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)365 static int iomap_readpage_iter(struct iomap_iter *iter,
366 struct iomap_readpage_ctx *ctx)
367 {
368 const struct iomap *iomap = &iter->iomap;
369 loff_t pos = iter->pos;
370 loff_t length = iomap_length(iter);
371 struct folio *folio = ctx->cur_folio;
372 struct iomap_folio_state *ifs;
373 size_t poff, plen;
374 sector_t sector;
375 int ret;
376
377 if (iomap->type == IOMAP_INLINE) {
378 ret = iomap_read_inline_data(iter, folio);
379 if (ret)
380 return ret;
381 return iomap_iter_advance(iter, &length);
382 }
383
384 /* zero post-eof blocks as the page may be mapped */
385 ifs = ifs_alloc(iter->inode, folio, iter->flags);
386 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
387 if (plen == 0)
388 goto done;
389
390 if (iomap_block_needs_zeroing(iter, pos)) {
391 folio_zero_range(folio, poff, plen);
392 iomap_set_range_uptodate(folio, poff, plen);
393 goto done;
394 }
395
396 ctx->cur_folio_in_bio = true;
397 if (ifs) {
398 spin_lock_irq(&ifs->state_lock);
399 ifs->read_bytes_pending += plen;
400 spin_unlock_irq(&ifs->state_lock);
401 }
402
403 sector = iomap_sector(iomap, pos);
404 if (!ctx->bio ||
405 bio_end_sector(ctx->bio) != sector ||
406 !bio_add_folio(ctx->bio, folio, plen, poff)) {
407 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
408 gfp_t orig_gfp = gfp;
409 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
410
411 if (ctx->bio)
412 submit_bio(ctx->bio);
413
414 if (ctx->rac) /* same as readahead_gfp_mask */
415 gfp |= __GFP_NORETRY | __GFP_NOWARN;
416 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
417 REQ_OP_READ, gfp);
418 /*
419 * If the bio_alloc fails, try it again for a single page to
420 * avoid having to deal with partial page reads. This emulates
421 * what do_mpage_read_folio does.
422 */
423 if (!ctx->bio) {
424 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
425 orig_gfp);
426 }
427 if (ctx->rac)
428 ctx->bio->bi_opf |= REQ_RAHEAD;
429 ctx->bio->bi_iter.bi_sector = sector;
430 ctx->bio->bi_end_io = iomap_read_end_io;
431 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
432 }
433
434 done:
435 /*
436 * Move the caller beyond our range so that it keeps making progress.
437 * For that, we have to include any leading non-uptodate ranges, but
438 * we can skip trailing ones as they will be handled in the next
439 * iteration.
440 */
441 length = pos - iter->pos + plen;
442 return iomap_iter_advance(iter, &length);
443 }
444
iomap_read_folio_iter(struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)445 static int iomap_read_folio_iter(struct iomap_iter *iter,
446 struct iomap_readpage_ctx *ctx)
447 {
448 int ret;
449
450 while (iomap_length(iter)) {
451 ret = iomap_readpage_iter(iter, ctx);
452 if (ret)
453 return ret;
454 }
455
456 return 0;
457 }
458
iomap_read_folio(struct folio * folio,const struct iomap_ops * ops)459 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
460 {
461 struct iomap_iter iter = {
462 .inode = folio->mapping->host,
463 .pos = folio_pos(folio),
464 .len = folio_size(folio),
465 };
466 struct iomap_readpage_ctx ctx = {
467 .cur_folio = folio,
468 };
469 int ret;
470
471 trace_iomap_readpage(iter.inode, 1);
472
473 while ((ret = iomap_iter(&iter, ops)) > 0)
474 iter.status = iomap_read_folio_iter(&iter, &ctx);
475
476 if (ctx.bio) {
477 submit_bio(ctx.bio);
478 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
479 } else {
480 WARN_ON_ONCE(ctx.cur_folio_in_bio);
481 folio_unlock(folio);
482 }
483
484 /*
485 * Just like mpage_readahead and block_read_full_folio, we always
486 * return 0 and just set the folio error flag on errors. This
487 * should be cleaned up throughout the stack eventually.
488 */
489 return 0;
490 }
491 EXPORT_SYMBOL_GPL(iomap_read_folio);
492
iomap_readahead_iter(struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)493 static int iomap_readahead_iter(struct iomap_iter *iter,
494 struct iomap_readpage_ctx *ctx)
495 {
496 int ret;
497
498 while (iomap_length(iter)) {
499 if (ctx->cur_folio &&
500 offset_in_folio(ctx->cur_folio, iter->pos) == 0) {
501 if (!ctx->cur_folio_in_bio)
502 folio_unlock(ctx->cur_folio);
503 ctx->cur_folio = NULL;
504 }
505 if (!ctx->cur_folio) {
506 ctx->cur_folio = readahead_folio(ctx->rac);
507 ctx->cur_folio_in_bio = false;
508 }
509 ret = iomap_readpage_iter(iter, ctx);
510 if (ret)
511 return ret;
512 }
513
514 return 0;
515 }
516
517 /**
518 * iomap_readahead - Attempt to read pages from a file.
519 * @rac: Describes the pages to be read.
520 * @ops: The operations vector for the filesystem.
521 *
522 * This function is for filesystems to call to implement their readahead
523 * address_space operation.
524 *
525 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
526 * blocks from disc), and may wait for it. The caller may be trying to
527 * access a different page, and so sleeping excessively should be avoided.
528 * It may allocate memory, but should avoid costly allocations. This
529 * function is called with memalloc_nofs set, so allocations will not cause
530 * the filesystem to be reentered.
531 */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)532 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
533 {
534 struct iomap_iter iter = {
535 .inode = rac->mapping->host,
536 .pos = readahead_pos(rac),
537 .len = readahead_length(rac),
538 };
539 struct iomap_readpage_ctx ctx = {
540 .rac = rac,
541 };
542
543 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
544
545 while (iomap_iter(&iter, ops) > 0)
546 iter.status = iomap_readahead_iter(&iter, &ctx);
547
548 if (ctx.bio)
549 submit_bio(ctx.bio);
550 if (ctx.cur_folio) {
551 if (!ctx.cur_folio_in_bio)
552 folio_unlock(ctx.cur_folio);
553 }
554 }
555 EXPORT_SYMBOL_GPL(iomap_readahead);
556
557 /*
558 * iomap_is_partially_uptodate checks whether blocks within a folio are
559 * uptodate or not.
560 *
561 * Returns true if all blocks which correspond to the specified part
562 * of the folio are uptodate.
563 */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)564 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
565 {
566 struct iomap_folio_state *ifs = folio->private;
567 struct inode *inode = folio->mapping->host;
568 unsigned first, last, i;
569
570 if (!ifs)
571 return false;
572
573 /* Caller's range may extend past the end of this folio */
574 count = min(folio_size(folio) - from, count);
575
576 /* First and last blocks in range within folio */
577 first = from >> inode->i_blkbits;
578 last = (from + count - 1) >> inode->i_blkbits;
579
580 for (i = first; i <= last; i++)
581 if (!ifs_block_is_uptodate(ifs, i))
582 return false;
583 return true;
584 }
585 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
586
587 /**
588 * iomap_get_folio - get a folio reference for writing
589 * @iter: iteration structure
590 * @pos: start offset of write
591 * @len: Suggested size of folio to create.
592 *
593 * Returns a locked reference to the folio at @pos, or an error pointer if the
594 * folio could not be obtained.
595 */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)596 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
597 {
598 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
599
600 if (iter->flags & IOMAP_NOWAIT)
601 fgp |= FGP_NOWAIT;
602 if (iter->flags & IOMAP_DONTCACHE)
603 fgp |= FGP_DONTCACHE;
604 fgp |= fgf_set_order(len);
605
606 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
607 fgp, mapping_gfp_mask(iter->inode->i_mapping));
608 }
609 EXPORT_SYMBOL_GPL(iomap_get_folio);
610
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)611 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
612 {
613 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
614 folio_size(folio));
615
616 /*
617 * If the folio is dirty, we refuse to release our metadata because
618 * it may be partially dirty. Once we track per-block dirty state,
619 * we can release the metadata if every block is dirty.
620 */
621 if (folio_test_dirty(folio))
622 return false;
623 ifs_free(folio);
624 return true;
625 }
626 EXPORT_SYMBOL_GPL(iomap_release_folio);
627
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)628 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
629 {
630 trace_iomap_invalidate_folio(folio->mapping->host,
631 folio_pos(folio) + offset, len);
632
633 /*
634 * If we're invalidating the entire folio, clear the dirty state
635 * from it and release it to avoid unnecessary buildup of the LRU.
636 */
637 if (offset == 0 && len == folio_size(folio)) {
638 WARN_ON_ONCE(folio_test_writeback(folio));
639 folio_cancel_dirty(folio);
640 ifs_free(folio);
641 }
642 }
643 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
644
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)645 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
646 {
647 struct inode *inode = mapping->host;
648 size_t len = folio_size(folio);
649
650 ifs_alloc(inode, folio, 0);
651 iomap_set_range_dirty(folio, 0, len);
652 return filemap_dirty_folio(mapping, folio);
653 }
654 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
655
656 static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)657 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
658 {
659 loff_t i_size = i_size_read(inode);
660
661 /*
662 * Only truncate newly allocated pages beyoned EOF, even if the
663 * write started inside the existing inode size.
664 */
665 if (pos + len > i_size)
666 truncate_pagecache_range(inode, max(pos, i_size),
667 pos + len - 1);
668 }
669
iomap_read_folio_sync(loff_t block_start,struct folio * folio,size_t poff,size_t plen,const struct iomap * iomap)670 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
671 size_t poff, size_t plen, const struct iomap *iomap)
672 {
673 struct bio_vec bvec;
674 struct bio bio;
675
676 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
677 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
678 bio_add_folio_nofail(&bio, folio, plen, poff);
679 return submit_bio_wait(&bio);
680 }
681
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,size_t len,struct folio * folio)682 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
683 size_t len, struct folio *folio)
684 {
685 const struct iomap *srcmap = iomap_iter_srcmap(iter);
686 struct iomap_folio_state *ifs;
687 loff_t block_size = i_blocksize(iter->inode);
688 loff_t block_start = round_down(pos, block_size);
689 loff_t block_end = round_up(pos + len, block_size);
690 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
691 size_t from = offset_in_folio(folio, pos), to = from + len;
692 size_t poff, plen;
693
694 /*
695 * If the write or zeroing completely overlaps the current folio, then
696 * entire folio will be dirtied so there is no need for
697 * per-block state tracking structures to be attached to this folio.
698 * For the unshare case, we must read in the ondisk contents because we
699 * are not changing pagecache contents.
700 */
701 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
702 pos + len >= folio_pos(folio) + folio_size(folio))
703 return 0;
704
705 ifs = ifs_alloc(iter->inode, folio, iter->flags);
706 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
707 return -EAGAIN;
708
709 if (folio_test_uptodate(folio))
710 return 0;
711
712 do {
713 iomap_adjust_read_range(iter->inode, folio, &block_start,
714 block_end - block_start, &poff, &plen);
715 if (plen == 0)
716 break;
717
718 if (!(iter->flags & IOMAP_UNSHARE) &&
719 (from <= poff || from >= poff + plen) &&
720 (to <= poff || to >= poff + plen))
721 continue;
722
723 if (iomap_block_needs_zeroing(iter, block_start)) {
724 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
725 return -EIO;
726 folio_zero_segments(folio, poff, from, to, poff + plen);
727 } else {
728 int status;
729
730 if (iter->flags & IOMAP_NOWAIT)
731 return -EAGAIN;
732
733 status = iomap_read_folio_sync(block_start, folio,
734 poff, plen, srcmap);
735 if (status)
736 return status;
737 }
738 iomap_set_range_uptodate(folio, poff, plen);
739 } while ((block_start += plen) < block_end);
740
741 return 0;
742 }
743
__iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)744 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
745 size_t len)
746 {
747 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
748
749 if (folio_ops && folio_ops->get_folio)
750 return folio_ops->get_folio(iter, pos, len);
751 else
752 return iomap_get_folio(iter, pos, len);
753 }
754
__iomap_put_folio(struct iomap_iter * iter,loff_t pos,size_t ret,struct folio * folio)755 static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
756 struct folio *folio)
757 {
758 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
759
760 if (folio_ops && folio_ops->put_folio) {
761 folio_ops->put_folio(iter->inode, pos, ret, folio);
762 } else {
763 folio_unlock(folio);
764 folio_put(folio);
765 }
766 }
767
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)768 static int iomap_write_begin_inline(const struct iomap_iter *iter,
769 struct folio *folio)
770 {
771 /* needs more work for the tailpacking case; disable for now */
772 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
773 return -EIO;
774 return iomap_read_inline_data(iter, folio);
775 }
776
iomap_write_begin(struct iomap_iter * iter,loff_t pos,size_t len,struct folio ** foliop)777 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
778 size_t len, struct folio **foliop)
779 {
780 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
781 const struct iomap *srcmap = iomap_iter_srcmap(iter);
782 struct folio *folio;
783 int status = 0;
784
785 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
786 if (srcmap != &iter->iomap)
787 BUG_ON(pos + len > srcmap->offset + srcmap->length);
788
789 if (fatal_signal_pending(current))
790 return -EINTR;
791
792 if (!mapping_large_folio_support(iter->inode->i_mapping))
793 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
794
795 folio = __iomap_get_folio(iter, pos, len);
796 if (IS_ERR(folio))
797 return PTR_ERR(folio);
798
799 /*
800 * Now we have a locked folio, before we do anything with it we need to
801 * check that the iomap we have cached is not stale. The inode extent
802 * mapping can change due to concurrent IO in flight (e.g.
803 * IOMAP_UNWRITTEN state can change and memory reclaim could have
804 * reclaimed a previously partially written page at this index after IO
805 * completion before this write reaches this file offset) and hence we
806 * could do the wrong thing here (zero a page range incorrectly or fail
807 * to zero) and corrupt data.
808 */
809 if (folio_ops && folio_ops->iomap_valid) {
810 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
811 &iter->iomap);
812 if (!iomap_valid) {
813 iter->iomap.flags |= IOMAP_F_STALE;
814 status = 0;
815 goto out_unlock;
816 }
817 }
818
819 if (pos + len > folio_pos(folio) + folio_size(folio))
820 len = folio_pos(folio) + folio_size(folio) - pos;
821
822 if (srcmap->type == IOMAP_INLINE)
823 status = iomap_write_begin_inline(iter, folio);
824 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
825 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
826 else
827 status = __iomap_write_begin(iter, pos, len, folio);
828
829 if (unlikely(status))
830 goto out_unlock;
831
832 *foliop = folio;
833 return 0;
834
835 out_unlock:
836 __iomap_put_folio(iter, pos, 0, folio);
837
838 return status;
839 }
840
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)841 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
842 size_t copied, struct folio *folio)
843 {
844 flush_dcache_folio(folio);
845
846 /*
847 * The blocks that were entirely written will now be uptodate, so we
848 * don't have to worry about a read_folio reading them and overwriting a
849 * partial write. However, if we've encountered a short write and only
850 * partially written into a block, it will not be marked uptodate, so a
851 * read_folio might come in and destroy our partial write.
852 *
853 * Do the simplest thing and just treat any short write to a
854 * non-uptodate page as a zero-length write, and force the caller to
855 * redo the whole thing.
856 */
857 if (unlikely(copied < len && !folio_test_uptodate(folio)))
858 return false;
859 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
860 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
861 filemap_dirty_folio(inode->i_mapping, folio);
862 return true;
863 }
864
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)865 static void iomap_write_end_inline(const struct iomap_iter *iter,
866 struct folio *folio, loff_t pos, size_t copied)
867 {
868 const struct iomap *iomap = &iter->iomap;
869 void *addr;
870
871 WARN_ON_ONCE(!folio_test_uptodate(folio));
872 BUG_ON(!iomap_inline_data_valid(iomap));
873
874 flush_dcache_folio(folio);
875 addr = kmap_local_folio(folio, pos);
876 memcpy(iomap_inline_data(iomap, pos), addr, copied);
877 kunmap_local(addr);
878
879 mark_inode_dirty(iter->inode);
880 }
881
882 /*
883 * Returns true if all copied bytes have been written to the pagecache,
884 * otherwise return false.
885 */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct folio * folio)886 static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
887 size_t copied, struct folio *folio)
888 {
889 const struct iomap *srcmap = iomap_iter_srcmap(iter);
890
891 if (srcmap->type == IOMAP_INLINE) {
892 iomap_write_end_inline(iter, folio, pos, copied);
893 return true;
894 }
895
896 if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
897 size_t bh_written;
898
899 bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
900 len, copied, folio, NULL);
901 WARN_ON_ONCE(bh_written != copied && bh_written != 0);
902 return bh_written == copied;
903 }
904
905 return __iomap_write_end(iter->inode, pos, len, copied, folio);
906 }
907
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)908 static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
909 {
910 ssize_t total_written = 0;
911 int status = 0;
912 struct address_space *mapping = iter->inode->i_mapping;
913 size_t chunk = mapping_max_folio_size(mapping);
914 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
915
916 do {
917 struct folio *folio;
918 loff_t old_size;
919 size_t offset; /* Offset into folio */
920 size_t bytes; /* Bytes to write to folio */
921 size_t copied; /* Bytes copied from user */
922 u64 written; /* Bytes have been written */
923 loff_t pos = iter->pos;
924
925 bytes = iov_iter_count(i);
926 retry:
927 offset = pos & (chunk - 1);
928 bytes = min(chunk - offset, bytes);
929 status = balance_dirty_pages_ratelimited_flags(mapping,
930 bdp_flags);
931 if (unlikely(status))
932 break;
933
934 if (bytes > iomap_length(iter))
935 bytes = iomap_length(iter);
936
937 /*
938 * Bring in the user page that we'll copy from _first_.
939 * Otherwise there's a nasty deadlock on copying from the
940 * same page as we're writing to, without it being marked
941 * up-to-date.
942 *
943 * For async buffered writes the assumption is that the user
944 * page has already been faulted in. This can be optimized by
945 * faulting the user page.
946 */
947 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
948 status = -EFAULT;
949 break;
950 }
951
952 status = iomap_write_begin(iter, pos, bytes, &folio);
953 if (unlikely(status)) {
954 iomap_write_failed(iter->inode, pos, bytes);
955 break;
956 }
957 if (iter->iomap.flags & IOMAP_F_STALE)
958 break;
959
960 offset = offset_in_folio(folio, pos);
961 if (bytes > folio_size(folio) - offset)
962 bytes = folio_size(folio) - offset;
963
964 if (mapping_writably_mapped(mapping))
965 flush_dcache_folio(folio);
966
967 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
968 written = iomap_write_end(iter, pos, bytes, copied, folio) ?
969 copied : 0;
970
971 /*
972 * Update the in-memory inode size after copying the data into
973 * the page cache. It's up to the file system to write the
974 * updated size to disk, preferably after I/O completion so that
975 * no stale data is exposed. Only once that's done can we
976 * unlock and release the folio.
977 */
978 old_size = iter->inode->i_size;
979 if (pos + written > old_size) {
980 i_size_write(iter->inode, pos + written);
981 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
982 }
983 __iomap_put_folio(iter, pos, written, folio);
984
985 if (old_size < pos)
986 pagecache_isize_extended(iter->inode, old_size, pos);
987
988 cond_resched();
989 if (unlikely(written == 0)) {
990 /*
991 * A short copy made iomap_write_end() reject the
992 * thing entirely. Might be memory poisoning
993 * halfway through, might be a race with munmap,
994 * might be severe memory pressure.
995 */
996 iomap_write_failed(iter->inode, pos, bytes);
997 iov_iter_revert(i, copied);
998
999 if (chunk > PAGE_SIZE)
1000 chunk /= 2;
1001 if (copied) {
1002 bytes = copied;
1003 goto retry;
1004 }
1005 } else {
1006 total_written += written;
1007 iomap_iter_advance(iter, &written);
1008 }
1009 } while (iov_iter_count(i) && iomap_length(iter));
1010
1011 return total_written ? 0 : status;
1012 }
1013
1014 ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops,void * private)1015 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1016 const struct iomap_ops *ops, void *private)
1017 {
1018 struct iomap_iter iter = {
1019 .inode = iocb->ki_filp->f_mapping->host,
1020 .pos = iocb->ki_pos,
1021 .len = iov_iter_count(i),
1022 .flags = IOMAP_WRITE,
1023 .private = private,
1024 };
1025 ssize_t ret;
1026
1027 if (iocb->ki_flags & IOCB_NOWAIT)
1028 iter.flags |= IOMAP_NOWAIT;
1029 if (iocb->ki_flags & IOCB_DONTCACHE)
1030 iter.flags |= IOMAP_DONTCACHE;
1031
1032 while ((ret = iomap_iter(&iter, ops)) > 0)
1033 iter.status = iomap_write_iter(&iter, i);
1034
1035 if (unlikely(iter.pos == iocb->ki_pos))
1036 return ret;
1037 ret = iter.pos - iocb->ki_pos;
1038 iocb->ki_pos = iter.pos;
1039 return ret;
1040 }
1041 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1042
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1043 static void iomap_write_delalloc_ifs_punch(struct inode *inode,
1044 struct folio *folio, loff_t start_byte, loff_t end_byte,
1045 struct iomap *iomap, iomap_punch_t punch)
1046 {
1047 unsigned int first_blk, last_blk, i;
1048 loff_t last_byte;
1049 u8 blkbits = inode->i_blkbits;
1050 struct iomap_folio_state *ifs;
1051
1052 /*
1053 * When we have per-block dirty tracking, there can be
1054 * blocks within a folio which are marked uptodate
1055 * but not dirty. In that case it is necessary to punch
1056 * out such blocks to avoid leaking any delalloc blocks.
1057 */
1058 ifs = folio->private;
1059 if (!ifs)
1060 return;
1061
1062 last_byte = min_t(loff_t, end_byte - 1,
1063 folio_pos(folio) + folio_size(folio) - 1);
1064 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1065 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1066 for (i = first_blk; i <= last_blk; i++) {
1067 if (!ifs_block_is_dirty(folio, ifs, i))
1068 punch(inode, folio_pos(folio) + (i << blkbits),
1069 1 << blkbits, iomap);
1070 }
1071 }
1072
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1073 static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1074 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1075 struct iomap *iomap, iomap_punch_t punch)
1076 {
1077 if (!folio_test_dirty(folio))
1078 return;
1079
1080 /* if dirty, punch up to offset */
1081 if (start_byte > *punch_start_byte) {
1082 punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
1083 iomap);
1084 }
1085
1086 /* Punch non-dirty blocks within folio */
1087 iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
1088 iomap, punch);
1089
1090 /*
1091 * Make sure the next punch start is correctly bound to
1092 * the end of this data range, not the end of the folio.
1093 */
1094 *punch_start_byte = min_t(loff_t, end_byte,
1095 folio_pos(folio) + folio_size(folio));
1096 }
1097
1098 /*
1099 * Scan the data range passed to us for dirty page cache folios. If we find a
1100 * dirty folio, punch out the preceding range and update the offset from which
1101 * the next punch will start from.
1102 *
1103 * We can punch out storage reservations under clean pages because they either
1104 * contain data that has been written back - in which case the delalloc punch
1105 * over that range is a no-op - or they have been read faults in which case they
1106 * contain zeroes and we can remove the delalloc backing range and any new
1107 * writes to those pages will do the normal hole filling operation...
1108 *
1109 * This makes the logic simple: we only need to keep the delalloc extents only
1110 * over the dirty ranges of the page cache.
1111 *
1112 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1113 * simplify range iterations.
1114 */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1115 static void iomap_write_delalloc_scan(struct inode *inode,
1116 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1117 struct iomap *iomap, iomap_punch_t punch)
1118 {
1119 while (start_byte < end_byte) {
1120 struct folio *folio;
1121
1122 /* grab locked page */
1123 folio = filemap_lock_folio(inode->i_mapping,
1124 start_byte >> PAGE_SHIFT);
1125 if (IS_ERR(folio)) {
1126 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1127 PAGE_SIZE;
1128 continue;
1129 }
1130
1131 iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1132 start_byte, end_byte, iomap, punch);
1133
1134 /* move offset to start of next folio in range */
1135 start_byte = folio_pos(folio) + folio_size(folio);
1136 folio_unlock(folio);
1137 folio_put(folio);
1138 }
1139 }
1140
1141 /*
1142 * When a short write occurs, the filesystem might need to use ->iomap_end
1143 * to remove space reservations created in ->iomap_begin.
1144 *
1145 * For filesystems that use delayed allocation, there can be dirty pages over
1146 * the delalloc extent outside the range of a short write but still within the
1147 * delalloc extent allocated for this iomap if the write raced with page
1148 * faults.
1149 *
1150 * Punch out all the delalloc blocks in the range given except for those that
1151 * have dirty data still pending in the page cache - those are going to be
1152 * written and so must still retain the delalloc backing for writeback.
1153 *
1154 * The punch() callback *must* only punch delalloc extents in the range passed
1155 * to it. It must skip over all other types of extents in the range and leave
1156 * them completely unchanged. It must do this punch atomically with respect to
1157 * other extent modifications.
1158 *
1159 * The punch() callback may be called with a folio locked to prevent writeback
1160 * extent allocation racing at the edge of the range we are currently punching.
1161 * The locked folio may or may not cover the range being punched, so it is not
1162 * safe for the punch() callback to lock folios itself.
1163 *
1164 * Lock order is:
1165 *
1166 * inode->i_rwsem (shared or exclusive)
1167 * inode->i_mapping->invalidate_lock (exclusive)
1168 * folio_lock()
1169 * ->punch
1170 * internal filesystem allocation lock
1171 *
1172 * As we are scanning the page cache for data, we don't need to reimplement the
1173 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1174 * start and end of data ranges correctly even for sub-folio block sizes. This
1175 * byte range based iteration is especially convenient because it means we
1176 * don't have to care about variable size folios, nor where the start or end of
1177 * the data range lies within a folio, if they lie within the same folio or even
1178 * if there are multiple discontiguous data ranges within the folio.
1179 *
1180 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1181 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1182 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1183 * date. A write page fault can then mark it dirty. If we then fail a write()
1184 * beyond EOF into that up to date cached range, we allocate a delalloc block
1185 * beyond EOF and then have to punch it out. Because the range is up to date,
1186 * mapping_seek_hole_data() will return it, and we will skip the punch because
1187 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1188 * beyond EOF in this case as writeback will never write back and covert that
1189 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1190 * resulting in always punching out the range from the EOF to the end of the
1191 * range the iomap spans.
1192 *
1193 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1194 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1195 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1196 * returns the end of the data range (data_end). Using closed intervals would
1197 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1198 * the code to subtle off-by-one bugs....
1199 */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,unsigned flags,struct iomap * iomap,iomap_punch_t punch)1200 void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1201 loff_t end_byte, unsigned flags, struct iomap *iomap,
1202 iomap_punch_t punch)
1203 {
1204 loff_t punch_start_byte = start_byte;
1205 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1206
1207 /*
1208 * The caller must hold invalidate_lock to avoid races with page faults
1209 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1210 * we walk the cache and perform delalloc extent removal. Failing to do
1211 * this can leave dirty pages with no space reservation in the cache.
1212 */
1213 lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
1214
1215 while (start_byte < scan_end_byte) {
1216 loff_t data_end;
1217
1218 start_byte = mapping_seek_hole_data(inode->i_mapping,
1219 start_byte, scan_end_byte, SEEK_DATA);
1220 /*
1221 * If there is no more data to scan, all that is left is to
1222 * punch out the remaining range.
1223 *
1224 * Note that mapping_seek_hole_data is only supposed to return
1225 * either an offset or -ENXIO, so WARN on any other error as
1226 * that would be an API change without updating the callers.
1227 */
1228 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1229 break;
1230 if (WARN_ON_ONCE(start_byte < 0))
1231 return;
1232 WARN_ON_ONCE(start_byte < punch_start_byte);
1233 WARN_ON_ONCE(start_byte > scan_end_byte);
1234
1235 /*
1236 * We find the end of this contiguous cached data range by
1237 * seeking from start_byte to the beginning of the next hole.
1238 */
1239 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1240 scan_end_byte, SEEK_HOLE);
1241 if (WARN_ON_ONCE(data_end < 0))
1242 return;
1243
1244 /*
1245 * If we race with post-direct I/O invalidation of the page cache,
1246 * there might be no data left at start_byte.
1247 */
1248 if (data_end == start_byte)
1249 continue;
1250
1251 WARN_ON_ONCE(data_end < start_byte);
1252 WARN_ON_ONCE(data_end > scan_end_byte);
1253
1254 iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
1255 data_end, iomap, punch);
1256
1257 /* The next data search starts at the end of this one. */
1258 start_byte = data_end;
1259 }
1260
1261 if (punch_start_byte < end_byte)
1262 punch(inode, punch_start_byte, end_byte - punch_start_byte,
1263 iomap);
1264 }
1265 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
1266
iomap_unshare_iter(struct iomap_iter * iter)1267 static int iomap_unshare_iter(struct iomap_iter *iter)
1268 {
1269 struct iomap *iomap = &iter->iomap;
1270 u64 bytes = iomap_length(iter);
1271 int status;
1272
1273 if (!iomap_want_unshare_iter(iter))
1274 return iomap_iter_advance(iter, &bytes);
1275
1276 do {
1277 struct folio *folio;
1278 size_t offset;
1279 loff_t pos = iter->pos;
1280 bool ret;
1281
1282 bytes = min_t(u64, SIZE_MAX, bytes);
1283 status = iomap_write_begin(iter, pos, bytes, &folio);
1284 if (unlikely(status))
1285 return status;
1286 if (iomap->flags & IOMAP_F_STALE)
1287 break;
1288
1289 offset = offset_in_folio(folio, pos);
1290 if (bytes > folio_size(folio) - offset)
1291 bytes = folio_size(folio) - offset;
1292
1293 ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1294 __iomap_put_folio(iter, pos, bytes, folio);
1295 if (WARN_ON_ONCE(!ret))
1296 return -EIO;
1297
1298 cond_resched();
1299
1300 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1301
1302 status = iomap_iter_advance(iter, &bytes);
1303 if (status)
1304 break;
1305 } while (bytes > 0);
1306
1307 return status;
1308 }
1309
1310 int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)1311 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1312 const struct iomap_ops *ops)
1313 {
1314 struct iomap_iter iter = {
1315 .inode = inode,
1316 .pos = pos,
1317 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1318 };
1319 loff_t size = i_size_read(inode);
1320 int ret;
1321
1322 if (pos < 0 || pos >= size)
1323 return 0;
1324
1325 iter.len = min(len, size - pos);
1326 while ((ret = iomap_iter(&iter, ops)) > 0)
1327 iter.status = iomap_unshare_iter(&iter);
1328 return ret;
1329 }
1330 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1331
1332 /*
1333 * Flush the remaining range of the iter and mark the current mapping stale.
1334 * This is used when zero range sees an unwritten mapping that may have had
1335 * dirty pagecache over it.
1336 */
iomap_zero_iter_flush_and_stale(struct iomap_iter * i)1337 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
1338 {
1339 struct address_space *mapping = i->inode->i_mapping;
1340 loff_t end = i->pos + i->len - 1;
1341
1342 i->iomap.flags |= IOMAP_F_STALE;
1343 return filemap_write_and_wait_range(mapping, i->pos, end);
1344 }
1345
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero)1346 static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1347 {
1348 u64 bytes = iomap_length(iter);
1349 int status;
1350
1351 do {
1352 struct folio *folio;
1353 size_t offset;
1354 loff_t pos = iter->pos;
1355 bool ret;
1356
1357 bytes = min_t(u64, SIZE_MAX, bytes);
1358 status = iomap_write_begin(iter, pos, bytes, &folio);
1359 if (status)
1360 return status;
1361 if (iter->iomap.flags & IOMAP_F_STALE)
1362 break;
1363
1364 /* warn about zeroing folios beyond eof that won't write back */
1365 WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size);
1366 offset = offset_in_folio(folio, pos);
1367 if (bytes > folio_size(folio) - offset)
1368 bytes = folio_size(folio) - offset;
1369
1370 folio_zero_range(folio, offset, bytes);
1371 folio_mark_accessed(folio);
1372
1373 ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1374 __iomap_put_folio(iter, pos, bytes, folio);
1375 if (WARN_ON_ONCE(!ret))
1376 return -EIO;
1377
1378 status = iomap_iter_advance(iter, &bytes);
1379 if (status)
1380 break;
1381 } while (bytes > 0);
1382
1383 if (did_zero)
1384 *did_zero = true;
1385 return status;
1386 }
1387
1388 int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops,void * private)1389 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1390 const struct iomap_ops *ops, void *private)
1391 {
1392 struct iomap_iter iter = {
1393 .inode = inode,
1394 .pos = pos,
1395 .len = len,
1396 .flags = IOMAP_ZERO,
1397 .private = private,
1398 };
1399 struct address_space *mapping = inode->i_mapping;
1400 unsigned int blocksize = i_blocksize(inode);
1401 unsigned int off = pos & (blocksize - 1);
1402 loff_t plen = min_t(loff_t, len, blocksize - off);
1403 int ret;
1404 bool range_dirty;
1405
1406 /*
1407 * Zero range can skip mappings that are zero on disk so long as
1408 * pagecache is clean. If pagecache was dirty prior to zero range, the
1409 * mapping converts on writeback completion and so must be zeroed.
1410 *
1411 * The simplest way to deal with this across a range is to flush
1412 * pagecache and process the updated mappings. To avoid excessive
1413 * flushing on partial eof zeroing, special case it to zero the
1414 * unaligned start portion if already dirty in pagecache.
1415 */
1416 if (off &&
1417 filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) {
1418 iter.len = plen;
1419 while ((ret = iomap_iter(&iter, ops)) > 0)
1420 iter.status = iomap_zero_iter(&iter, did_zero);
1421
1422 iter.len = len - (iter.pos - pos);
1423 if (ret || !iter.len)
1424 return ret;
1425 }
1426
1427 /*
1428 * To avoid an unconditional flush, check pagecache state and only flush
1429 * if dirty and the fs returns a mapping that might convert on
1430 * writeback.
1431 */
1432 range_dirty = filemap_range_needs_writeback(inode->i_mapping,
1433 iter.pos, iter.pos + iter.len - 1);
1434 while ((ret = iomap_iter(&iter, ops)) > 0) {
1435 const struct iomap *srcmap = iomap_iter_srcmap(&iter);
1436
1437 if (srcmap->type == IOMAP_HOLE ||
1438 srcmap->type == IOMAP_UNWRITTEN) {
1439 s64 status;
1440
1441 if (range_dirty) {
1442 range_dirty = false;
1443 status = iomap_zero_iter_flush_and_stale(&iter);
1444 } else {
1445 status = iomap_iter_advance_full(&iter);
1446 }
1447 iter.status = status;
1448 continue;
1449 }
1450
1451 iter.status = iomap_zero_iter(&iter, did_zero);
1452 }
1453 return ret;
1454 }
1455 EXPORT_SYMBOL_GPL(iomap_zero_range);
1456
1457 int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops,void * private)1458 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1459 const struct iomap_ops *ops, void *private)
1460 {
1461 unsigned int blocksize = i_blocksize(inode);
1462 unsigned int off = pos & (blocksize - 1);
1463
1464 /* Block boundary? Nothing to do */
1465 if (!off)
1466 return 0;
1467 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops,
1468 private);
1469 }
1470 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1471
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1472 static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1473 struct folio *folio)
1474 {
1475 loff_t length = iomap_length(iter);
1476 int ret;
1477
1478 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1479 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1480 &iter->iomap);
1481 if (ret)
1482 return ret;
1483 block_commit_write(folio, 0, length);
1484 } else {
1485 WARN_ON_ONCE(!folio_test_uptodate(folio));
1486 folio_mark_dirty(folio);
1487 }
1488
1489 return iomap_iter_advance(iter, &length);
1490 }
1491
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops,void * private)1492 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
1493 void *private)
1494 {
1495 struct iomap_iter iter = {
1496 .inode = file_inode(vmf->vma->vm_file),
1497 .flags = IOMAP_WRITE | IOMAP_FAULT,
1498 .private = private,
1499 };
1500 struct folio *folio = page_folio(vmf->page);
1501 ssize_t ret;
1502
1503 folio_lock(folio);
1504 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1505 if (ret < 0)
1506 goto out_unlock;
1507 iter.pos = folio_pos(folio);
1508 iter.len = ret;
1509 while ((ret = iomap_iter(&iter, ops)) > 0)
1510 iter.status = iomap_folio_mkwrite_iter(&iter, folio);
1511
1512 if (ret < 0)
1513 goto out_unlock;
1514 folio_wait_stable(folio);
1515 return VM_FAULT_LOCKED;
1516 out_unlock:
1517 folio_unlock(folio);
1518 return vmf_fs_error(ret);
1519 }
1520 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1521
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len)1522 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1523 size_t len)
1524 {
1525 struct iomap_folio_state *ifs = folio->private;
1526
1527 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1528 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1529
1530 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1531 folio_end_writeback(folio);
1532 }
1533
1534 /*
1535 * We're now finished for good with this ioend structure. Update the page
1536 * state, release holds on bios, and finally free up memory. Do not use the
1537 * ioend after this.
1538 */
iomap_finish_ioend_buffered(struct iomap_ioend * ioend)1539 u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
1540 {
1541 struct inode *inode = ioend->io_inode;
1542 struct bio *bio = &ioend->io_bio;
1543 struct folio_iter fi;
1544 u32 folio_count = 0;
1545
1546 if (ioend->io_error) {
1547 mapping_set_error(inode->i_mapping, ioend->io_error);
1548 if (!bio_flagged(bio, BIO_QUIET)) {
1549 pr_err_ratelimited(
1550 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1551 inode->i_sb->s_id, inode->i_ino,
1552 ioend->io_offset, ioend->io_sector);
1553 }
1554 }
1555
1556 /* walk all folios in bio, ending page IO on them */
1557 bio_for_each_folio_all(fi, bio) {
1558 iomap_finish_folio_write(inode, fi.folio, fi.length);
1559 folio_count++;
1560 }
1561
1562 bio_put(bio); /* frees the ioend */
1563 return folio_count;
1564 }
1565
iomap_writepage_end_bio(struct bio * bio)1566 static void iomap_writepage_end_bio(struct bio *bio)
1567 {
1568 struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
1569
1570 ioend->io_error = blk_status_to_errno(bio->bi_status);
1571 iomap_finish_ioend_buffered(ioend);
1572 }
1573
1574 /*
1575 * Submit an ioend.
1576 *
1577 * If @error is non-zero, it means that we have a situation where some part of
1578 * the submission process has failed after we've marked pages for writeback.
1579 * We cannot cancel ioend directly in that case, so call the bio end I/O handler
1580 * with the error status here to run the normal I/O completion handler to clear
1581 * the writeback bit and let the file system proess the errors.
1582 */
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,int error)1583 static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
1584 {
1585 if (!wpc->ioend)
1586 return error;
1587
1588 /*
1589 * Let the file systems prepare the I/O submission and hook in an I/O
1590 * comletion handler. This also needs to happen in case after a
1591 * failure happened so that the file system end I/O handler gets called
1592 * to clean up.
1593 */
1594 if (wpc->ops->submit_ioend) {
1595 error = wpc->ops->submit_ioend(wpc, error);
1596 } else {
1597 if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE))
1598 error = -EIO;
1599 if (!error)
1600 submit_bio(&wpc->ioend->io_bio);
1601 }
1602
1603 if (error) {
1604 wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
1605 bio_endio(&wpc->ioend->io_bio);
1606 }
1607
1608 wpc->ioend = NULL;
1609 return error;
1610 }
1611
iomap_alloc_ioend(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,loff_t pos,u16 ioend_flags)1612 static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1613 struct writeback_control *wbc, struct inode *inode, loff_t pos,
1614 u16 ioend_flags)
1615 {
1616 struct bio *bio;
1617
1618 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1619 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1620 GFP_NOFS, &iomap_ioend_bioset);
1621 bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1622 bio->bi_end_io = iomap_writepage_end_bio;
1623 bio->bi_write_hint = inode->i_write_hint;
1624 wbc_init_bio(wbc, bio);
1625 wpc->nr_folios = 0;
1626 return iomap_init_ioend(inode, bio, pos, ioend_flags);
1627 }
1628
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t pos,u16 ioend_flags)1629 static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
1630 u16 ioend_flags)
1631 {
1632 if (ioend_flags & IOMAP_IOEND_BOUNDARY)
1633 return false;
1634 if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
1635 (wpc->ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS))
1636 return false;
1637 if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
1638 return false;
1639 if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) &&
1640 iomap_sector(&wpc->iomap, pos) !=
1641 bio_end_sector(&wpc->ioend->io_bio))
1642 return false;
1643 /*
1644 * Limit ioend bio chain lengths to minimise IO completion latency. This
1645 * also prevents long tight loops ending page writeback on all the
1646 * folios in the ioend.
1647 */
1648 if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1649 return false;
1650 return true;
1651 }
1652
1653 /*
1654 * Test to see if we have an existing ioend structure that we could append to
1655 * first; otherwise finish off the current ioend and start another.
1656 *
1657 * If a new ioend is created and cached, the old ioend is submitted to the block
1658 * layer instantly. Batching optimisations are provided by higher level block
1659 * plugging.
1660 *
1661 * At the end of a writeback pass, there will be a cached ioend remaining on the
1662 * writepage context that the caller will need to submit.
1663 */
iomap_add_to_ioend(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio,struct inode * inode,loff_t pos,loff_t end_pos,unsigned len)1664 static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
1665 struct writeback_control *wbc, struct folio *folio,
1666 struct inode *inode, loff_t pos, loff_t end_pos,
1667 unsigned len)
1668 {
1669 struct iomap_folio_state *ifs = folio->private;
1670 size_t poff = offset_in_folio(folio, pos);
1671 unsigned int ioend_flags = 0;
1672 int error;
1673
1674 if (wpc->iomap.type == IOMAP_UNWRITTEN)
1675 ioend_flags |= IOMAP_IOEND_UNWRITTEN;
1676 if (wpc->iomap.flags & IOMAP_F_SHARED)
1677 ioend_flags |= IOMAP_IOEND_SHARED;
1678 if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
1679 ioend_flags |= IOMAP_IOEND_BOUNDARY;
1680
1681 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
1682 new_ioend:
1683 error = iomap_submit_ioend(wpc, 0);
1684 if (error)
1685 return error;
1686 wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos,
1687 ioend_flags);
1688 }
1689
1690 if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
1691 goto new_ioend;
1692
1693 if (ifs)
1694 atomic_add(len, &ifs->write_bytes_pending);
1695
1696 /*
1697 * Clamp io_offset and io_size to the incore EOF so that ondisk
1698 * file size updates in the ioend completion are byte-accurate.
1699 * This avoids recovering files with zeroed tail regions when
1700 * writeback races with appending writes:
1701 *
1702 * Thread 1: Thread 2:
1703 * ------------ -----------
1704 * write [A, A+B]
1705 * update inode size to A+B
1706 * submit I/O [A, A+BS]
1707 * write [A+B, A+B+C]
1708 * update inode size to A+B+C
1709 * <I/O completes, updates disk size to min(A+B+C, A+BS)>
1710 * <power failure>
1711 *
1712 * After reboot:
1713 * 1) with A+B+C < A+BS, the file has zero padding in range
1714 * [A+B, A+B+C]
1715 *
1716 * |< Block Size (BS) >|
1717 * |DDDDDDDDDDDD0000000000000|
1718 * ^ ^ ^
1719 * A A+B A+B+C
1720 * (EOF)
1721 *
1722 * 2) with A+B+C > A+BS, the file has zero padding in range
1723 * [A+B, A+BS]
1724 *
1725 * |< Block Size (BS) >|< Block Size (BS) >|
1726 * |DDDDDDDDDDDD0000000000000|00000000000000000000000000|
1727 * ^ ^ ^ ^
1728 * A A+B A+BS A+B+C
1729 * (EOF)
1730 *
1731 * D = Valid Data
1732 * 0 = Zero Padding
1733 *
1734 * Note that this defeats the ability to chain the ioends of
1735 * appending writes.
1736 */
1737 wpc->ioend->io_size += len;
1738 if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos)
1739 wpc->ioend->io_size = end_pos - wpc->ioend->io_offset;
1740
1741 wbc_account_cgroup_owner(wbc, folio, len);
1742 return 0;
1743 }
1744
iomap_writepage_map_blocks(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio,struct inode * inode,u64 pos,u64 end_pos,unsigned dirty_len,unsigned * count)1745 static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
1746 struct writeback_control *wbc, struct folio *folio,
1747 struct inode *inode, u64 pos, u64 end_pos,
1748 unsigned dirty_len, unsigned *count)
1749 {
1750 int error;
1751
1752 do {
1753 unsigned map_len;
1754
1755 error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
1756 if (error)
1757 break;
1758 trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
1759
1760 map_len = min_t(u64, dirty_len,
1761 wpc->iomap.offset + wpc->iomap.length - pos);
1762 WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1763
1764 switch (wpc->iomap.type) {
1765 case IOMAP_INLINE:
1766 WARN_ON_ONCE(1);
1767 error = -EIO;
1768 break;
1769 case IOMAP_HOLE:
1770 break;
1771 default:
1772 error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
1773 end_pos, map_len);
1774 if (!error)
1775 (*count)++;
1776 break;
1777 }
1778 dirty_len -= map_len;
1779 pos += map_len;
1780 } while (dirty_len && !error);
1781
1782 /*
1783 * We cannot cancel the ioend directly here on error. We may have
1784 * already set other pages under writeback and hence we have to run I/O
1785 * completion to mark the error state of the pages under writeback
1786 * appropriately.
1787 *
1788 * Just let the file system know what portion of the folio failed to
1789 * map.
1790 */
1791 if (error && wpc->ops->discard_folio)
1792 wpc->ops->discard_folio(folio, pos);
1793 return error;
1794 }
1795
1796 /*
1797 * Check interaction of the folio with the file end.
1798 *
1799 * If the folio is entirely beyond i_size, return false. If it straddles
1800 * i_size, adjust end_pos and zero all data beyond i_size.
1801 */
iomap_writepage_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos)1802 static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
1803 u64 *end_pos)
1804 {
1805 u64 isize = i_size_read(inode);
1806
1807 if (*end_pos > isize) {
1808 size_t poff = offset_in_folio(folio, isize);
1809 pgoff_t end_index = isize >> PAGE_SHIFT;
1810
1811 /*
1812 * If the folio is entirely ouside of i_size, skip it.
1813 *
1814 * This can happen due to a truncate operation that is in
1815 * progress and in that case truncate will finish it off once
1816 * we've dropped the folio lock.
1817 *
1818 * Note that the pgoff_t used for end_index is an unsigned long.
1819 * If the given offset is greater than 16TB on a 32-bit system,
1820 * then if we checked if the folio is fully outside i_size with
1821 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1822 * overflow and evaluate to 0. Hence this folio would be
1823 * redirtied and written out repeatedly, which would result in
1824 * an infinite loop; the user program performing this operation
1825 * would hang. Instead, we can detect this situation by
1826 * checking if the folio is totally beyond i_size or if its
1827 * offset is just equal to the EOF.
1828 */
1829 if (folio->index > end_index ||
1830 (folio->index == end_index && poff == 0))
1831 return false;
1832
1833 /*
1834 * The folio straddles i_size.
1835 *
1836 * It must be zeroed out on each and every writepage invocation
1837 * because it may be mmapped:
1838 *
1839 * A file is mapped in multiples of the page size. For a
1840 * file that is not a multiple of the page size, the
1841 * remaining memory is zeroed when mapped, and writes to that
1842 * region are not written out to the file.
1843 *
1844 * Also adjust the end_pos to the end of file and skip writeback
1845 * for all blocks entirely beyond i_size.
1846 */
1847 folio_zero_segment(folio, poff, folio_size(folio));
1848 *end_pos = isize;
1849 }
1850
1851 return true;
1852 }
1853
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio)1854 static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1855 struct writeback_control *wbc, struct folio *folio)
1856 {
1857 struct iomap_folio_state *ifs = folio->private;
1858 struct inode *inode = folio->mapping->host;
1859 u64 pos = folio_pos(folio);
1860 u64 end_pos = pos + folio_size(folio);
1861 u64 end_aligned = 0;
1862 unsigned count = 0;
1863 int error = 0;
1864 u32 rlen;
1865
1866 WARN_ON_ONCE(!folio_test_locked(folio));
1867 WARN_ON_ONCE(folio_test_dirty(folio));
1868 WARN_ON_ONCE(folio_test_writeback(folio));
1869
1870 trace_iomap_writepage(inode, pos, folio_size(folio));
1871
1872 if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
1873 folio_unlock(folio);
1874 return 0;
1875 }
1876 WARN_ON_ONCE(end_pos <= pos);
1877
1878 if (i_blocks_per_folio(inode, folio) > 1) {
1879 if (!ifs) {
1880 ifs = ifs_alloc(inode, folio, 0);
1881 iomap_set_range_dirty(folio, 0, end_pos - pos);
1882 }
1883
1884 /*
1885 * Keep the I/O completion handler from clearing the writeback
1886 * bit until we have submitted all blocks by adding a bias to
1887 * ifs->write_bytes_pending, which is dropped after submitting
1888 * all blocks.
1889 */
1890 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1891 atomic_inc(&ifs->write_bytes_pending);
1892 }
1893
1894 /*
1895 * Set the writeback bit ASAP, as the I/O completion for the single
1896 * block per folio case happen hit as soon as we're submitting the bio.
1897 */
1898 folio_start_writeback(folio);
1899
1900 /*
1901 * Walk through the folio to find dirty areas to write back.
1902 */
1903 end_aligned = round_up(end_pos, i_blocksize(inode));
1904 while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) {
1905 error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
1906 pos, end_pos, rlen, &count);
1907 if (error)
1908 break;
1909 pos += rlen;
1910 }
1911
1912 if (count)
1913 wpc->nr_folios++;
1914
1915 /*
1916 * We can have dirty bits set past end of file in page_mkwrite path
1917 * while mapping the last partial folio. Hence it's better to clear
1918 * all the dirty bits in the folio here.
1919 */
1920 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1921
1922 /*
1923 * Usually the writeback bit is cleared by the I/O completion handler.
1924 * But we may end up either not actually writing any blocks, or (when
1925 * there are multiple blocks in a folio) all I/O might have finished
1926 * already at this point. In that case we need to clear the writeback
1927 * bit ourselves right after unlocking the page.
1928 */
1929 folio_unlock(folio);
1930 if (ifs) {
1931 if (atomic_dec_and_test(&ifs->write_bytes_pending))
1932 folio_end_writeback(folio);
1933 } else {
1934 if (!count)
1935 folio_end_writeback(folio);
1936 }
1937 mapping_set_error(inode->i_mapping, error);
1938 return error;
1939 }
1940
1941 int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1942 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1943 struct iomap_writepage_ctx *wpc,
1944 const struct iomap_writeback_ops *ops)
1945 {
1946 struct folio *folio = NULL;
1947 int error;
1948
1949 /*
1950 * Writeback from reclaim context should never happen except in the case
1951 * of a VM regression so warn about it and refuse to write the data.
1952 */
1953 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1954 PF_MEMALLOC))
1955 return -EIO;
1956
1957 wpc->ops = ops;
1958 while ((folio = writeback_iter(mapping, wbc, folio, &error)))
1959 error = iomap_writepage_map(wpc, wbc, folio);
1960 return iomap_submit_ioend(wpc, error);
1961 }
1962 EXPORT_SYMBOL_GPL(iomap_writepages);
1963