1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of UBIFS.
4  *
5  * Copyright (C) 2006-2008 Nokia Corporation.
6  *
7  * Authors: Artem Bityutskiy (Битюцкий Артём)
8  *          Adrian Hunter
9  */
10 
11 /*
12  * This file implements VFS file and inode operations for regular files, device
13  * nodes and symlinks as well as address space operations.
14  *
15  * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
16  * the page is dirty and is used for optimization purposes - dirty pages are
17  * not budgeted so the flag shows that 'ubifs_write_end()' should not release
18  * the budget for this page. The @PG_checked flag is set if full budgeting is
19  * required for the page e.g., when it corresponds to a file hole or it is
20  * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
21  * it is OK to fail in this function, and the budget is released in
22  * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
23  * information about how the page was budgeted, to make it possible to release
24  * the budget properly.
25  *
26  * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
27  * implement. However, this is not true for 'ubifs_writepage()', which may be
28  * called with @i_mutex unlocked. For example, when flusher thread is doing
29  * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
30  * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
31  * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
32  * 'ubifs_writepage()' we are only guaranteed that the page is locked.
33  *
34  * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
35  * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
36  * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
37  * set as well. However, UBIFS disables readahead.
38  */
39 
40 #include "ubifs.h"
41 #include <linux/mount.h>
42 #include <linux/slab.h>
43 #include <linux/migrate.h>
44 
45 static int read_block(struct inode *inode, struct folio *folio, size_t offset,
46 		      unsigned int block, struct ubifs_data_node *dn)
47 {
48 	struct ubifs_info *c = inode->i_sb->s_fs_info;
49 	int err, len, out_len;
50 	union ubifs_key key;
51 	unsigned int dlen;
52 
53 	data_key_init(c, &key, inode->i_ino, block);
54 	err = ubifs_tnc_lookup(c, &key, dn);
55 	if (err) {
56 		if (err == -ENOENT)
57 			/* Not found, so it must be a hole */
58 			folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
59 		return err;
60 	}
61 
62 	ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 		     ubifs_inode(inode)->creat_sqnum);
64 	len = le32_to_cpu(dn->size);
65 	if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 		goto dump;
67 
68 	dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69 
70 	if (IS_ENCRYPTED(inode)) {
71 		err = ubifs_decrypt(inode, dn, &dlen, block);
72 		if (err)
73 			goto dump;
74 	}
75 
76 	out_len = UBIFS_BLOCK_SIZE;
77 	err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset,
78 				     &out_len, le16_to_cpu(dn->compr_type));
79 	if (err || len != out_len)
80 		goto dump;
81 
82 	/*
83 	 * Data length can be less than a full block, even for blocks that are
84 	 * not the last in the file (e.g., as a result of making a hole and
85 	 * appending data). Ensure that the remainder is zeroed out.
86 	 */
87 	if (len < UBIFS_BLOCK_SIZE)
88 		folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len);
89 
90 	return 0;
91 
92 dump:
93 	ubifs_err(c, "bad data node (block %u, inode %lu)",
94 		  block, inode->i_ino);
95 	ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
96 	return -EINVAL;
97 }
98 
99 static int do_readpage(struct folio *folio)
100 {
101 	int err = 0, i;
102 	unsigned int block, beyond;
103 	struct ubifs_data_node *dn = NULL;
104 	struct inode *inode = folio->mapping->host;
105 	struct ubifs_info *c = inode->i_sb->s_fs_info;
106 	loff_t i_size = i_size_read(inode);
107 	size_t offset = 0;
108 
109 	dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 		inode->i_ino, folio->index, i_size, folio->flags);
111 	ubifs_assert(c, !folio_test_checked(folio));
112 	ubifs_assert(c, !folio->private);
113 
114 	block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
115 	beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
116 	if (block >= beyond) {
117 		/* Reading beyond inode */
118 		folio_set_checked(folio);
119 		folio_zero_range(folio, 0, folio_size(folio));
120 		goto out;
121 	}
122 
123 	dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
124 	if (!dn) {
125 		err = -ENOMEM;
126 		goto out;
127 	}
128 
129 	i = 0;
130 	while (1) {
131 		int ret;
132 
133 		if (block >= beyond) {
134 			/* Reading beyond inode */
135 			err = -ENOENT;
136 			folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
137 		} else {
138 			ret = read_block(inode, folio, offset, block, dn);
139 			if (ret) {
140 				err = ret;
141 				if (err != -ENOENT)
142 					break;
143 			} else if (block + 1 == beyond) {
144 				int dlen = le32_to_cpu(dn->size);
145 				int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
146 
147 				if (ilen && ilen < dlen)
148 					folio_zero_range(folio, offset + ilen, dlen - ilen);
149 			}
150 		}
151 		if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
152 			break;
153 		block += 1;
154 		offset += UBIFS_BLOCK_SIZE;
155 	}
156 
157 	if (err) {
158 		struct ubifs_info *c = inode->i_sb->s_fs_info;
159 		if (err == -ENOENT) {
160 			/* Not found, so it must be a hole */
161 			folio_set_checked(folio);
162 			dbg_gen("hole");
163 			err = 0;
164 		} else {
165 			ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
166 				  folio->index, inode->i_ino, err);
167 		}
168 	}
169 
170 out:
171 	kfree(dn);
172 	if (!err)
173 		folio_mark_uptodate(folio);
174 	return err;
175 }
176 
177 /**
178  * release_new_page_budget - release budget of a new page.
179  * @c: UBIFS file-system description object
180  *
181  * This is a helper function which releases budget corresponding to the budget
182  * of one new page of data.
183  */
184 static void release_new_page_budget(struct ubifs_info *c)
185 {
186 	struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
187 
188 	ubifs_release_budget(c, &req);
189 }
190 
191 /**
192  * release_existing_page_budget - release budget of an existing page.
193  * @c: UBIFS file-system description object
194  *
195  * This is a helper function which releases budget corresponding to the budget
196  * of changing one page of data which already exists on the flash media.
197  */
198 static void release_existing_page_budget(struct ubifs_info *c)
199 {
200 	struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
201 
202 	ubifs_release_budget(c, &req);
203 }
204 
205 static int write_begin_slow(struct address_space *mapping,
206 			    loff_t pos, unsigned len, struct folio **foliop)
207 {
208 	struct inode *inode = mapping->host;
209 	struct ubifs_info *c = inode->i_sb->s_fs_info;
210 	pgoff_t index = pos >> PAGE_SHIFT;
211 	struct ubifs_budget_req req = { .new_page = 1 };
212 	int err, appending = !!(pos + len > inode->i_size);
213 	struct folio *folio;
214 
215 	dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
216 		inode->i_ino, pos, len, inode->i_size);
217 
218 	/*
219 	 * At the slow path we have to budget before locking the folio, because
220 	 * budgeting may force write-back, which would wait on locked folios and
221 	 * deadlock if we had the folio locked. At this point we do not know
222 	 * anything about the folio, so assume that this is a new folio which is
223 	 * written to a hole. This corresponds to largest budget. Later the
224 	 * budget will be amended if this is not true.
225 	 */
226 	if (appending)
227 		/* We are appending data, budget for inode change */
228 		req.dirtied_ino = 1;
229 
230 	err = ubifs_budget_space(c, &req);
231 	if (unlikely(err))
232 		return err;
233 
234 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
235 			mapping_gfp_mask(mapping));
236 	if (IS_ERR(folio)) {
237 		ubifs_release_budget(c, &req);
238 		return PTR_ERR(folio);
239 	}
240 
241 	if (!folio_test_uptodate(folio)) {
242 		if (pos == folio_pos(folio) && len >= folio_size(folio))
243 			folio_set_checked(folio);
244 		else {
245 			err = do_readpage(folio);
246 			if (err) {
247 				folio_unlock(folio);
248 				folio_put(folio);
249 				ubifs_release_budget(c, &req);
250 				return err;
251 			}
252 		}
253 	}
254 
255 	if (folio->private)
256 		/*
257 		 * The folio is dirty, which means it was budgeted twice:
258 		 *   o first time the budget was allocated by the task which
259 		 *     made the folio dirty and set the private field;
260 		 *   o and then we budgeted for it for the second time at the
261 		 *     very beginning of this function.
262 		 *
263 		 * So what we have to do is to release the folio budget we
264 		 * allocated.
265 		 */
266 		release_new_page_budget(c);
267 	else if (!folio_test_checked(folio))
268 		/*
269 		 * We are changing a folio which already exists on the media.
270 		 * This means that changing the folio does not make the amount
271 		 * of indexing information larger, and this part of the budget
272 		 * which we have already acquired may be released.
273 		 */
274 		ubifs_convert_page_budget(c);
275 
276 	if (appending) {
277 		struct ubifs_inode *ui = ubifs_inode(inode);
278 
279 		/*
280 		 * 'ubifs_write_end()' is optimized from the fast-path part of
281 		 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
282 		 * if data is appended.
283 		 */
284 		mutex_lock(&ui->ui_mutex);
285 		if (ui->dirty)
286 			/*
287 			 * The inode is dirty already, so we may free the
288 			 * budget we allocated.
289 			 */
290 			ubifs_release_dirty_inode_budget(c, ui);
291 	}
292 
293 	*foliop = folio;
294 	return 0;
295 }
296 
297 /**
298  * allocate_budget - allocate budget for 'ubifs_write_begin()'.
299  * @c: UBIFS file-system description object
300  * @folio: folio to allocate budget for
301  * @ui: UBIFS inode object the page belongs to
302  * @appending: non-zero if the page is appended
303  *
304  * This is a helper function for 'ubifs_write_begin()' which allocates budget
305  * for the operation. The budget is allocated differently depending on whether
306  * this is appending, whether the page is dirty or not, and so on. This
307  * function leaves the @ui->ui_mutex locked in case of appending.
308  *
309  * Returns: %0 in case of success and %-ENOSPC in case of failure.
310  */
311 static int allocate_budget(struct ubifs_info *c, struct folio *folio,
312 			   struct ubifs_inode *ui, int appending)
313 {
314 	struct ubifs_budget_req req = { .fast = 1 };
315 
316 	if (folio->private) {
317 		if (!appending)
318 			/*
319 			 * The folio is dirty and we are not appending, which
320 			 * means no budget is needed at all.
321 			 */
322 			return 0;
323 
324 		mutex_lock(&ui->ui_mutex);
325 		if (ui->dirty)
326 			/*
327 			 * The page is dirty and we are appending, so the inode
328 			 * has to be marked as dirty. However, it is already
329 			 * dirty, so we do not need any budget. We may return,
330 			 * but @ui->ui_mutex hast to be left locked because we
331 			 * should prevent write-back from flushing the inode
332 			 * and freeing the budget. The lock will be released in
333 			 * 'ubifs_write_end()'.
334 			 */
335 			return 0;
336 
337 		/*
338 		 * The page is dirty, we are appending, the inode is clean, so
339 		 * we need to budget the inode change.
340 		 */
341 		req.dirtied_ino = 1;
342 	} else {
343 		if (folio_test_checked(folio))
344 			/*
345 			 * The page corresponds to a hole and does not
346 			 * exist on the media. So changing it makes
347 			 * the amount of indexing information
348 			 * larger, and we have to budget for a new
349 			 * page.
350 			 */
351 			req.new_page = 1;
352 		else
353 			/*
354 			 * Not a hole, the change will not add any new
355 			 * indexing information, budget for page
356 			 * change.
357 			 */
358 			req.dirtied_page = 1;
359 
360 		if (appending) {
361 			mutex_lock(&ui->ui_mutex);
362 			if (!ui->dirty)
363 				/*
364 				 * The inode is clean but we will have to mark
365 				 * it as dirty because we are appending. This
366 				 * needs a budget.
367 				 */
368 				req.dirtied_ino = 1;
369 		}
370 	}
371 
372 	return ubifs_budget_space(c, &req);
373 }
374 
375 /*
376  * This function is called when a page of data is going to be written. Since
377  * the page of data will not necessarily go to the flash straight away, UBIFS
378  * has to reserve space on the media for it, which is done by means of
379  * budgeting.
380  *
381  * This is the hot-path of the file-system and we are trying to optimize it as
382  * much as possible. For this reasons it is split on 2 parts - slow and fast.
383  *
384  * There many budgeting cases:
385  *     o a new page is appended - we have to budget for a new page and for
386  *       changing the inode; however, if the inode is already dirty, there is
387  *       no need to budget for it;
388  *     o an existing clean page is changed - we have budget for it; if the page
389  *       does not exist on the media (a hole), we have to budget for a new
390  *       page; otherwise, we may budget for changing an existing page; the
391  *       difference between these cases is that changing an existing page does
392  *       not introduce anything new to the FS indexing information, so it does
393  *       not grow, and smaller budget is acquired in this case;
394  *     o an existing dirty page is changed - no need to budget at all, because
395  *       the page budget has been acquired by earlier, when the page has been
396  *       marked dirty.
397  *
398  * UBIFS budgeting sub-system may force write-back if it thinks there is no
399  * space to reserve. This imposes some locking restrictions and makes it
400  * impossible to take into account the above cases, and makes it impossible to
401  * optimize budgeting.
402  *
403  * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
404  * there is a plenty of flash space and the budget will be acquired quickly,
405  * without forcing write-back. The slow path does not make this assumption.
406  */
407 static int ubifs_write_begin(struct file *file, struct address_space *mapping,
408 			     loff_t pos, unsigned len,
409 			     struct folio **foliop, void **fsdata)
410 {
411 	struct inode *inode = mapping->host;
412 	struct ubifs_info *c = inode->i_sb->s_fs_info;
413 	struct ubifs_inode *ui = ubifs_inode(inode);
414 	pgoff_t index = pos >> PAGE_SHIFT;
415 	int err, appending = !!(pos + len > inode->i_size);
416 	int skipped_read = 0;
417 	struct folio *folio;
418 
419 	ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
420 	ubifs_assert(c, !c->ro_media && !c->ro_mount);
421 
422 	if (unlikely(c->ro_error))
423 		return -EROFS;
424 
425 	/* Try out the fast-path part first */
426 	folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
427 			mapping_gfp_mask(mapping));
428 	if (IS_ERR(folio))
429 		return PTR_ERR(folio);
430 
431 	if (!folio_test_uptodate(folio)) {
432 		/* The page is not loaded from the flash */
433 		if (pos == folio_pos(folio) && len >= folio_size(folio)) {
434 			/*
435 			 * We change whole page so no need to load it. But we
436 			 * do not know whether this page exists on the media or
437 			 * not, so we assume the latter because it requires
438 			 * larger budget. The assumption is that it is better
439 			 * to budget a bit more than to read the page from the
440 			 * media. Thus, we are setting the @PG_checked flag
441 			 * here.
442 			 */
443 			folio_set_checked(folio);
444 			skipped_read = 1;
445 		} else {
446 			err = do_readpage(folio);
447 			if (err) {
448 				folio_unlock(folio);
449 				folio_put(folio);
450 				return err;
451 			}
452 		}
453 	}
454 
455 	err = allocate_budget(c, folio, ui, appending);
456 	if (unlikely(err)) {
457 		ubifs_assert(c, err == -ENOSPC);
458 		/*
459 		 * If we skipped reading the page because we were going to
460 		 * write all of it, then it is not up to date.
461 		 */
462 		if (skipped_read)
463 			folio_clear_checked(folio);
464 		/*
465 		 * Budgeting failed which means it would have to force
466 		 * write-back but didn't, because we set the @fast flag in the
467 		 * request. Write-back cannot be done now, while we have the
468 		 * page locked, because it would deadlock. Unlock and free
469 		 * everything and fall-back to slow-path.
470 		 */
471 		if (appending) {
472 			ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
473 			mutex_unlock(&ui->ui_mutex);
474 		}
475 		folio_unlock(folio);
476 		folio_put(folio);
477 
478 		return write_begin_slow(mapping, pos, len, foliop);
479 	}
480 
481 	/*
482 	 * Whee, we acquired budgeting quickly - without involving
483 	 * garbage-collection, committing or forcing write-back. We return
484 	 * with @ui->ui_mutex locked if we are appending pages, and unlocked
485 	 * otherwise. This is an optimization (slightly hacky though).
486 	 */
487 	*foliop = folio;
488 	return 0;
489 }
490 
491 /**
492  * cancel_budget - cancel budget.
493  * @c: UBIFS file-system description object
494  * @folio: folio to cancel budget for
495  * @ui: UBIFS inode object the page belongs to
496  * @appending: non-zero if the page is appended
497  *
498  * This is a helper function for a page write operation. It unlocks the
499  * @ui->ui_mutex in case of appending.
500  */
501 static void cancel_budget(struct ubifs_info *c, struct folio *folio,
502 			  struct ubifs_inode *ui, int appending)
503 {
504 	if (appending) {
505 		if (!ui->dirty)
506 			ubifs_release_dirty_inode_budget(c, ui);
507 		mutex_unlock(&ui->ui_mutex);
508 	}
509 	if (!folio->private) {
510 		if (folio_test_checked(folio))
511 			release_new_page_budget(c);
512 		else
513 			release_existing_page_budget(c);
514 	}
515 }
516 
517 static int ubifs_write_end(struct file *file, struct address_space *mapping,
518 			   loff_t pos, unsigned len, unsigned copied,
519 			   struct folio *folio, void *fsdata)
520 {
521 	struct inode *inode = mapping->host;
522 	struct ubifs_inode *ui = ubifs_inode(inode);
523 	struct ubifs_info *c = inode->i_sb->s_fs_info;
524 	loff_t end_pos = pos + len;
525 	int appending = !!(end_pos > inode->i_size);
526 
527 	dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
528 		inode->i_ino, pos, folio->index, len, copied, inode->i_size);
529 
530 	if (unlikely(copied < len && !folio_test_uptodate(folio))) {
531 		/*
532 		 * VFS copied less data to the folio than it intended and
533 		 * declared in its '->write_begin()' call via the @len
534 		 * argument. If the folio was not up-to-date,
535 		 * the 'ubifs_write_begin()' function did
536 		 * not load it from the media (for optimization reasons). This
537 		 * means that part of the folio contains garbage. So read the
538 		 * folio now.
539 		 */
540 		dbg_gen("copied %d instead of %d, read page and repeat",
541 			copied, len);
542 		cancel_budget(c, folio, ui, appending);
543 		folio_clear_checked(folio);
544 
545 		/*
546 		 * Return 0 to force VFS to repeat the whole operation, or the
547 		 * error code if 'do_readpage()' fails.
548 		 */
549 		copied = do_readpage(folio);
550 		goto out;
551 	}
552 
553 	if (len == folio_size(folio))
554 		folio_mark_uptodate(folio);
555 
556 	if (!folio->private) {
557 		folio_attach_private(folio, (void *)1);
558 		atomic_long_inc(&c->dirty_pg_cnt);
559 		filemap_dirty_folio(mapping, folio);
560 	}
561 
562 	if (appending) {
563 		i_size_write(inode, end_pos);
564 		ui->ui_size = end_pos;
565 		/*
566 		 * We do not set @I_DIRTY_PAGES (which means that
567 		 * the inode has dirty pages), this was done in
568 		 * filemap_dirty_folio().
569 		 */
570 		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
571 		ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
572 		mutex_unlock(&ui->ui_mutex);
573 	}
574 
575 out:
576 	folio_unlock(folio);
577 	folio_put(folio);
578 	return copied;
579 }
580 
581 /**
582  * populate_page - copy data nodes into a page for bulk-read.
583  * @c: UBIFS file-system description object
584  * @folio: folio
585  * @bu: bulk-read information
586  * @n: next zbranch slot
587  *
588  * Returns: %0 on success and a negative error code on failure.
589  */
590 static int populate_page(struct ubifs_info *c, struct folio *folio,
591 			 struct bu_info *bu, int *n)
592 {
593 	int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
594 	struct inode *inode = folio->mapping->host;
595 	loff_t i_size = i_size_read(inode);
596 	unsigned int page_block;
597 	size_t offset = 0;
598 	pgoff_t end_index;
599 
600 	dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
601 		inode->i_ino, folio->index, i_size, folio->flags);
602 
603 	end_index = (i_size - 1) >> PAGE_SHIFT;
604 	if (!i_size || folio->index > end_index) {
605 		hole = 1;
606 		folio_zero_range(folio, 0, folio_size(folio));
607 		goto out_hole;
608 	}
609 
610 	page_block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
611 	while (1) {
612 		int err, len, out_len, dlen;
613 
614 		if (nn >= bu->cnt) {
615 			hole = 1;
616 			folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
617 		} else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
618 			struct ubifs_data_node *dn;
619 
620 			dn = bu->buf + (bu->zbranch[nn].offs - offs);
621 
622 			ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
623 				     ubifs_inode(inode)->creat_sqnum);
624 
625 			len = le32_to_cpu(dn->size);
626 			if (len <= 0 || len > UBIFS_BLOCK_SIZE)
627 				goto out_err;
628 
629 			dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
630 			out_len = UBIFS_BLOCK_SIZE;
631 
632 			if (IS_ENCRYPTED(inode)) {
633 				err = ubifs_decrypt(inode, dn, &dlen, page_block);
634 				if (err)
635 					goto out_err;
636 			}
637 
638 			err = ubifs_decompress_folio(
639 				c, &dn->data, dlen, folio, offset, &out_len,
640 				le16_to_cpu(dn->compr_type));
641 			if (err || len != out_len)
642 				goto out_err;
643 
644 			if (len < UBIFS_BLOCK_SIZE)
645 				folio_zero_range(folio, offset + len,
646 						 UBIFS_BLOCK_SIZE - len);
647 
648 			nn += 1;
649 			read = (i << UBIFS_BLOCK_SHIFT) + len;
650 		} else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
651 			nn += 1;
652 			continue;
653 		} else {
654 			hole = 1;
655 			folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
656 		}
657 		if (++i >= UBIFS_BLOCKS_PER_PAGE)
658 			break;
659 		offset += UBIFS_BLOCK_SIZE;
660 		page_block += 1;
661 	}
662 
663 	if (end_index == folio->index) {
664 		int len = i_size & (PAGE_SIZE - 1);
665 
666 		if (len && len < read)
667 			folio_zero_range(folio, len, read - len);
668 	}
669 
670 out_hole:
671 	if (hole) {
672 		folio_set_checked(folio);
673 		dbg_gen("hole");
674 	}
675 
676 	folio_mark_uptodate(folio);
677 	*n = nn;
678 	return 0;
679 
680 out_err:
681 	ubifs_err(c, "bad data node (block %u, inode %lu)",
682 		  page_block, inode->i_ino);
683 	return -EINVAL;
684 }
685 
686 /**
687  * ubifs_do_bulk_read - do bulk-read.
688  * @c: UBIFS file-system description object
689  * @bu: bulk-read information
690  * @folio1: first folio to read
691  *
692  * Returns: %1 if the bulk-read is done, otherwise %0 is returned.
693  */
694 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
695 			      struct folio *folio1)
696 {
697 	pgoff_t offset = folio1->index, end_index;
698 	struct address_space *mapping = folio1->mapping;
699 	struct inode *inode = mapping->host;
700 	struct ubifs_inode *ui = ubifs_inode(inode);
701 	int err, page_idx, page_cnt, ret = 0, n = 0;
702 	int allocate = bu->buf ? 0 : 1;
703 	loff_t isize;
704 	gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
705 
706 	err = ubifs_tnc_get_bu_keys(c, bu);
707 	if (err)
708 		goto out_warn;
709 
710 	if (bu->eof) {
711 		/* Turn off bulk-read at the end of the file */
712 		ui->read_in_a_row = 1;
713 		ui->bulk_read = 0;
714 	}
715 
716 	page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
717 	if (!page_cnt) {
718 		/*
719 		 * This happens when there are multiple blocks per page and the
720 		 * blocks for the first page we are looking for, are not
721 		 * together. If all the pages were like this, bulk-read would
722 		 * reduce performance, so we turn it off for a while.
723 		 */
724 		goto out_bu_off;
725 	}
726 
727 	if (bu->cnt) {
728 		if (allocate) {
729 			/*
730 			 * Allocate bulk-read buffer depending on how many data
731 			 * nodes we are going to read.
732 			 */
733 			bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
734 				      bu->zbranch[bu->cnt - 1].len -
735 				      bu->zbranch[0].offs;
736 			ubifs_assert(c, bu->buf_len > 0);
737 			ubifs_assert(c, bu->buf_len <= c->leb_size);
738 			bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
739 			if (!bu->buf)
740 				goto out_bu_off;
741 		}
742 
743 		err = ubifs_tnc_bulk_read(c, bu);
744 		if (err)
745 			goto out_warn;
746 	}
747 
748 	err = populate_page(c, folio1, bu, &n);
749 	if (err)
750 		goto out_warn;
751 
752 	folio_unlock(folio1);
753 	ret = 1;
754 
755 	isize = i_size_read(inode);
756 	if (isize == 0)
757 		goto out_free;
758 	end_index = ((isize - 1) >> PAGE_SHIFT);
759 
760 	for (page_idx = 1; page_idx < page_cnt; page_idx++) {
761 		pgoff_t page_offset = offset + page_idx;
762 		struct folio *folio;
763 
764 		if (page_offset > end_index)
765 			break;
766 		folio = __filemap_get_folio(mapping, page_offset,
767 				 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
768 				 ra_gfp_mask);
769 		if (IS_ERR(folio))
770 			break;
771 		if (!folio_test_uptodate(folio))
772 			err = populate_page(c, folio, bu, &n);
773 		folio_unlock(folio);
774 		folio_put(folio);
775 		if (err)
776 			break;
777 	}
778 
779 	ui->last_page_read = offset + page_idx - 1;
780 
781 out_free:
782 	if (allocate)
783 		kfree(bu->buf);
784 	return ret;
785 
786 out_warn:
787 	ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
788 	goto out_free;
789 
790 out_bu_off:
791 	ui->read_in_a_row = ui->bulk_read = 0;
792 	goto out_free;
793 }
794 
795 /**
796  * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
797  * @folio: folio from which to start bulk-read.
798  *
799  * Some flash media are capable of reading sequentially at faster rates. UBIFS
800  * bulk-read facility is designed to take advantage of that, by reading in one
801  * go consecutive data nodes that are also located consecutively in the same
802  * LEB.
803  *
804  * Returns: %1 if a bulk-read is done and %0 otherwise.
805  */
806 static int ubifs_bulk_read(struct folio *folio)
807 {
808 	struct inode *inode = folio->mapping->host;
809 	struct ubifs_info *c = inode->i_sb->s_fs_info;
810 	struct ubifs_inode *ui = ubifs_inode(inode);
811 	pgoff_t index = folio->index, last_page_read = ui->last_page_read;
812 	struct bu_info *bu;
813 	int err = 0, allocated = 0;
814 
815 	ui->last_page_read = index;
816 	if (!c->bulk_read)
817 		return 0;
818 
819 	/*
820 	 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
821 	 * so don't bother if we cannot lock the mutex.
822 	 */
823 	if (!mutex_trylock(&ui->ui_mutex))
824 		return 0;
825 
826 	if (index != last_page_read + 1) {
827 		/* Turn off bulk-read if we stop reading sequentially */
828 		ui->read_in_a_row = 1;
829 		if (ui->bulk_read)
830 			ui->bulk_read = 0;
831 		goto out_unlock;
832 	}
833 
834 	if (!ui->bulk_read) {
835 		ui->read_in_a_row += 1;
836 		if (ui->read_in_a_row < 3)
837 			goto out_unlock;
838 		/* Three reads in a row, so switch on bulk-read */
839 		ui->bulk_read = 1;
840 	}
841 
842 	/*
843 	 * If possible, try to use pre-allocated bulk-read information, which
844 	 * is protected by @c->bu_mutex.
845 	 */
846 	if (mutex_trylock(&c->bu_mutex))
847 		bu = &c->bu;
848 	else {
849 		bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
850 		if (!bu)
851 			goto out_unlock;
852 
853 		bu->buf = NULL;
854 		allocated = 1;
855 	}
856 
857 	bu->buf_len = c->max_bu_buf_len;
858 	data_key_init(c, &bu->key, inode->i_ino,
859 		      folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
860 	err = ubifs_do_bulk_read(c, bu, folio);
861 
862 	if (!allocated)
863 		mutex_unlock(&c->bu_mutex);
864 	else
865 		kfree(bu);
866 
867 out_unlock:
868 	mutex_unlock(&ui->ui_mutex);
869 	return err;
870 }
871 
872 static int ubifs_read_folio(struct file *file, struct folio *folio)
873 {
874 	if (ubifs_bulk_read(folio))
875 		return 0;
876 	do_readpage(folio);
877 	folio_unlock(folio);
878 	return 0;
879 }
880 
881 static int do_writepage(struct folio *folio, size_t len)
882 {
883 	int err = 0, blen;
884 	unsigned int block;
885 	size_t offset = 0;
886 	union ubifs_key key;
887 	struct inode *inode = folio->mapping->host;
888 	struct ubifs_info *c = inode->i_sb->s_fs_info;
889 
890 #ifdef UBIFS_DEBUG
891 	struct ubifs_inode *ui = ubifs_inode(inode);
892 	spin_lock(&ui->ui_lock);
893 	ubifs_assert(c, folio->index <= ui->synced_i_size >> PAGE_SHIFT);
894 	spin_unlock(&ui->ui_lock);
895 #endif
896 
897 	folio_start_writeback(folio);
898 
899 	block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
900 	for (;;) {
901 		blen = min_t(size_t, len, UBIFS_BLOCK_SIZE);
902 		data_key_init(c, &key, inode->i_ino, block);
903 		err = ubifs_jnl_write_data(c, inode, &key, folio, offset, blen);
904 		if (err)
905 			break;
906 		len -= blen;
907 		if (!len)
908 			break;
909 		block += 1;
910 		offset += blen;
911 	}
912 	if (err) {
913 		mapping_set_error(folio->mapping, err);
914 		ubifs_err(c, "cannot write folio %lu of inode %lu, error %d",
915 			  folio->index, inode->i_ino, err);
916 		ubifs_ro_mode(c, err);
917 	}
918 
919 	ubifs_assert(c, folio->private != NULL);
920 	if (folio_test_checked(folio))
921 		release_new_page_budget(c);
922 	else
923 		release_existing_page_budget(c);
924 
925 	atomic_long_dec(&c->dirty_pg_cnt);
926 	folio_detach_private(folio);
927 	folio_clear_checked(folio);
928 
929 	folio_unlock(folio);
930 	folio_end_writeback(folio);
931 	return err;
932 }
933 
934 /*
935  * When writing-back dirty inodes, VFS first writes-back pages belonging to the
936  * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
937  * situation when a we have an inode with size 0, then a megabyte of data is
938  * appended to the inode, then write-back starts and flushes some amount of the
939  * dirty pages, the journal becomes full, commit happens and finishes, and then
940  * an unclean reboot happens. When the file system is mounted next time, the
941  * inode size would still be 0, but there would be many pages which are beyond
942  * the inode size, they would be indexed and consume flash space. Because the
943  * journal has been committed, the replay would not be able to detect this
944  * situation and correct the inode size. This means UBIFS would have to scan
945  * whole index and correct all inode sizes, which is long an unacceptable.
946  *
947  * To prevent situations like this, UBIFS writes pages back only if they are
948  * within the last synchronized inode size, i.e. the size which has been
949  * written to the flash media last time. Otherwise, UBIFS forces inode
950  * write-back, thus making sure the on-flash inode contains current inode size,
951  * and then keeps writing pages back.
952  *
953  * Some locking issues explanation. 'ubifs_writepage()' first is called with
954  * the page locked, and it locks @ui_mutex. However, write-back does take inode
955  * @i_mutex, which means other VFS operations may be run on this inode at the
956  * same time. And the problematic one is truncation to smaller size, from where
957  * we have to call 'truncate_setsize()', which first changes @inode->i_size,
958  * then drops the truncated pages. And while dropping the pages, it takes the
959  * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
960  * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
961  * This means that @inode->i_size is changed while @ui_mutex is unlocked.
962  *
963  * XXX(truncate): with the new truncate sequence this is not true anymore,
964  * and the calls to truncate_setsize can be move around freely.  They should
965  * be moved to the very end of the truncate sequence.
966  *
967  * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
968  * inode size. How do we do this if @inode->i_size may became smaller while we
969  * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
970  * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
971  * internally and updates it under @ui_mutex.
972  *
973  * Q: why we do not worry that if we race with truncation, we may end up with a
974  * situation when the inode is truncated while we are in the middle of
975  * 'do_writepage()', so we do write beyond inode size?
976  * A: If we are in the middle of 'do_writepage()', truncation would be locked
977  * on the page lock and it would not write the truncated inode node to the
978  * journal before we have finished.
979  */
980 static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc,
981 		void *data)
982 {
983 	struct inode *inode = folio->mapping->host;
984 	struct ubifs_info *c = inode->i_sb->s_fs_info;
985 	struct ubifs_inode *ui = ubifs_inode(inode);
986 	loff_t i_size =  i_size_read(inode), synced_i_size;
987 	int err, len = folio_size(folio);
988 
989 	dbg_gen("ino %lu, pg %lu, pg flags %#lx",
990 		inode->i_ino, folio->index, folio->flags);
991 	ubifs_assert(c, folio->private != NULL);
992 
993 	/* Is the folio fully outside @i_size? (truncate in progress) */
994 	if (folio_pos(folio) >= i_size) {
995 		err = 0;
996 		goto out_unlock;
997 	}
998 
999 	spin_lock(&ui->ui_lock);
1000 	synced_i_size = ui->synced_i_size;
1001 	spin_unlock(&ui->ui_lock);
1002 
1003 	/* Is the folio fully inside i_size? */
1004 	if (folio_pos(folio) + len <= i_size) {
1005 		if (folio_pos(folio) + len > synced_i_size) {
1006 			err = inode->i_sb->s_op->write_inode(inode, NULL);
1007 			if (err)
1008 				goto out_redirty;
1009 			/*
1010 			 * The inode has been written, but the write-buffer has
1011 			 * not been synchronized, so in case of an unclean
1012 			 * reboot we may end up with some pages beyond inode
1013 			 * size, but they would be in the journal (because
1014 			 * commit flushes write buffers) and recovery would deal
1015 			 * with this.
1016 			 */
1017 		}
1018 		return do_writepage(folio, len);
1019 	}
1020 
1021 	/*
1022 	 * The folio straddles @i_size. It must be zeroed out on each and every
1023 	 * writepage invocation because it may be mmapped. "A file is mapped
1024 	 * in multiples of the page size. For a file that is not a multiple of
1025 	 * the page size, the remaining memory is zeroed when mapped, and
1026 	 * writes to that region are not written out to the file."
1027 	 */
1028 	len = i_size - folio_pos(folio);
1029 	folio_zero_segment(folio, len, folio_size(folio));
1030 
1031 	if (i_size > synced_i_size) {
1032 		err = inode->i_sb->s_op->write_inode(inode, NULL);
1033 		if (err)
1034 			goto out_redirty;
1035 	}
1036 
1037 	return do_writepage(folio, len);
1038 out_redirty:
1039 	/*
1040 	 * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
1041 	 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
1042 	 * there is no need to do space budget for dirty inode.
1043 	 */
1044 	folio_redirty_for_writepage(wbc, folio);
1045 out_unlock:
1046 	folio_unlock(folio);
1047 	return err;
1048 }
1049 
1050 static int ubifs_writepages(struct address_space *mapping,
1051 		struct writeback_control *wbc)
1052 {
1053 	return write_cache_pages(mapping, wbc, ubifs_writepage, NULL);
1054 }
1055 
1056 /**
1057  * do_attr_changes - change inode attributes.
1058  * @inode: inode to change attributes for
1059  * @attr: describes attributes to change
1060  */
1061 static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1062 {
1063 	if (attr->ia_valid & ATTR_UID)
1064 		inode->i_uid = attr->ia_uid;
1065 	if (attr->ia_valid & ATTR_GID)
1066 		inode->i_gid = attr->ia_gid;
1067 	if (attr->ia_valid & ATTR_ATIME)
1068 		inode_set_atime_to_ts(inode, attr->ia_atime);
1069 	if (attr->ia_valid & ATTR_MTIME)
1070 		inode_set_mtime_to_ts(inode, attr->ia_mtime);
1071 	if (attr->ia_valid & ATTR_CTIME)
1072 		inode_set_ctime_to_ts(inode, attr->ia_ctime);
1073 	if (attr->ia_valid & ATTR_MODE) {
1074 		umode_t mode = attr->ia_mode;
1075 
1076 		if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1077 			mode &= ~S_ISGID;
1078 		inode->i_mode = mode;
1079 	}
1080 }
1081 
1082 /**
1083  * do_truncation - truncate an inode.
1084  * @c: UBIFS file-system description object
1085  * @inode: inode to truncate
1086  * @attr: inode attribute changes description
1087  *
1088  * This function implements VFS '->setattr()' call when the inode is truncated
1089  * to a smaller size.
1090  *
1091  * Returns: %0 in case of success and a negative error code
1092  * in case of failure.
1093  */
1094 static int do_truncation(struct ubifs_info *c, struct inode *inode,
1095 			 const struct iattr *attr)
1096 {
1097 	int err;
1098 	struct ubifs_budget_req req;
1099 	loff_t old_size = inode->i_size, new_size = attr->ia_size;
1100 	int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1101 	struct ubifs_inode *ui = ubifs_inode(inode);
1102 
1103 	dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1104 	memset(&req, 0, sizeof(struct ubifs_budget_req));
1105 
1106 	/*
1107 	 * If this is truncation to a smaller size, and we do not truncate on a
1108 	 * block boundary, budget for changing one data block, because the last
1109 	 * block will be re-written.
1110 	 */
1111 	if (new_size & (UBIFS_BLOCK_SIZE - 1))
1112 		req.dirtied_page = 1;
1113 
1114 	req.dirtied_ino = 1;
1115 	/* A funny way to budget for truncation node */
1116 	req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1117 	err = ubifs_budget_space(c, &req);
1118 	if (err) {
1119 		/*
1120 		 * Treat truncations to zero as deletion and always allow them,
1121 		 * just like we do for '->unlink()'.
1122 		 */
1123 		if (new_size || err != -ENOSPC)
1124 			return err;
1125 		budgeted = 0;
1126 	}
1127 
1128 	truncate_setsize(inode, new_size);
1129 
1130 	if (offset) {
1131 		pgoff_t index = new_size >> PAGE_SHIFT;
1132 		struct folio *folio;
1133 
1134 		folio = filemap_lock_folio(inode->i_mapping, index);
1135 		if (!IS_ERR(folio)) {
1136 			if (folio_test_dirty(folio)) {
1137 				/*
1138 				 * 'ubifs_jnl_truncate()' will try to truncate
1139 				 * the last data node, but it contains
1140 				 * out-of-date data because the page is dirty.
1141 				 * Write the page now, so that
1142 				 * 'ubifs_jnl_truncate()' will see an already
1143 				 * truncated (and up to date) data node.
1144 				 */
1145 				ubifs_assert(c, folio->private != NULL);
1146 
1147 				folio_clear_dirty_for_io(folio);
1148 				if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1149 					offset = offset_in_folio(folio,
1150 							new_size);
1151 				err = do_writepage(folio, offset);
1152 				folio_put(folio);
1153 				if (err)
1154 					goto out_budg;
1155 				/*
1156 				 * We could now tell 'ubifs_jnl_truncate()' not
1157 				 * to read the last block.
1158 				 */
1159 			} else {
1160 				/*
1161 				 * We could 'kmap()' the page and pass the data
1162 				 * to 'ubifs_jnl_truncate()' to save it from
1163 				 * having to read it.
1164 				 */
1165 				folio_unlock(folio);
1166 				folio_put(folio);
1167 			}
1168 		}
1169 	}
1170 
1171 	mutex_lock(&ui->ui_mutex);
1172 	ui->ui_size = inode->i_size;
1173 	/* Truncation changes inode [mc]time */
1174 	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1175 	/* Other attributes may be changed at the same time as well */
1176 	do_attr_changes(inode, attr);
1177 	err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1178 	mutex_unlock(&ui->ui_mutex);
1179 
1180 out_budg:
1181 	if (budgeted)
1182 		ubifs_release_budget(c, &req);
1183 	else {
1184 		c->bi.nospace = c->bi.nospace_rp = 0;
1185 		smp_wmb();
1186 	}
1187 	return err;
1188 }
1189 
1190 /**
1191  * do_setattr - change inode attributes.
1192  * @c: UBIFS file-system description object
1193  * @inode: inode to change attributes for
1194  * @attr: inode attribute changes description
1195  *
1196  * This function implements VFS '->setattr()' call for all cases except
1197  * truncations to smaller size.
1198  *
1199  * Returns: %0 in case of success and a negative
1200  * error code in case of failure.
1201  */
1202 static int do_setattr(struct ubifs_info *c, struct inode *inode,
1203 		      const struct iattr *attr)
1204 {
1205 	int err, release;
1206 	loff_t new_size = attr->ia_size;
1207 	struct ubifs_inode *ui = ubifs_inode(inode);
1208 	struct ubifs_budget_req req = { .dirtied_ino = 1,
1209 				.dirtied_ino_d = ALIGN(ui->data_len, 8) };
1210 
1211 	err = ubifs_budget_space(c, &req);
1212 	if (err)
1213 		return err;
1214 
1215 	if (attr->ia_valid & ATTR_SIZE) {
1216 		dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1217 		truncate_setsize(inode, new_size);
1218 	}
1219 
1220 	mutex_lock(&ui->ui_mutex);
1221 	if (attr->ia_valid & ATTR_SIZE) {
1222 		/* Truncation changes inode [mc]time */
1223 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1224 		/* 'truncate_setsize()' changed @i_size, update @ui_size */
1225 		ui->ui_size = inode->i_size;
1226 	}
1227 
1228 	do_attr_changes(inode, attr);
1229 
1230 	release = ui->dirty;
1231 	if (attr->ia_valid & ATTR_SIZE)
1232 		/*
1233 		 * Inode length changed, so we have to make sure
1234 		 * @I_DIRTY_DATASYNC is set.
1235 		 */
1236 		 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1237 	else
1238 		mark_inode_dirty_sync(inode);
1239 	mutex_unlock(&ui->ui_mutex);
1240 
1241 	if (release)
1242 		ubifs_release_budget(c, &req);
1243 	if (IS_SYNC(inode))
1244 		err = inode->i_sb->s_op->write_inode(inode, NULL);
1245 	return err;
1246 }
1247 
1248 int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1249 		  struct iattr *attr)
1250 {
1251 	int err;
1252 	struct inode *inode = d_inode(dentry);
1253 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1254 
1255 	dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1256 		inode->i_ino, inode->i_mode, attr->ia_valid);
1257 	err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1258 	if (err)
1259 		return err;
1260 
1261 	err = dbg_check_synced_i_size(c, inode);
1262 	if (err)
1263 		return err;
1264 
1265 	err = fscrypt_prepare_setattr(dentry, attr);
1266 	if (err)
1267 		return err;
1268 
1269 	if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1270 		/* Truncation to a smaller size */
1271 		err = do_truncation(c, inode, attr);
1272 	else
1273 		err = do_setattr(c, inode, attr);
1274 
1275 	return err;
1276 }
1277 
1278 static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1279 				 size_t length)
1280 {
1281 	struct inode *inode = folio->mapping->host;
1282 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1283 
1284 	ubifs_assert(c, folio_test_private(folio));
1285 	if (offset || length < folio_size(folio))
1286 		/* Partial folio remains dirty */
1287 		return;
1288 
1289 	if (folio_test_checked(folio))
1290 		release_new_page_budget(c);
1291 	else
1292 		release_existing_page_budget(c);
1293 
1294 	atomic_long_dec(&c->dirty_pg_cnt);
1295 	folio_detach_private(folio);
1296 	folio_clear_checked(folio);
1297 }
1298 
1299 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1300 {
1301 	struct inode *inode = file->f_mapping->host;
1302 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1303 	int err;
1304 
1305 	dbg_gen("syncing inode %lu", inode->i_ino);
1306 
1307 	if (c->ro_mount)
1308 		/*
1309 		 * For some really strange reasons VFS does not filter out
1310 		 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1311 		 */
1312 		return 0;
1313 
1314 	err = file_write_and_wait_range(file, start, end);
1315 	if (err)
1316 		return err;
1317 	inode_lock(inode);
1318 
1319 	/* Synchronize the inode unless this is a 'datasync()' call. */
1320 	if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1321 		err = inode->i_sb->s_op->write_inode(inode, NULL);
1322 		if (err)
1323 			goto out;
1324 	}
1325 
1326 	/*
1327 	 * Nodes related to this inode may still sit in a write-buffer. Flush
1328 	 * them.
1329 	 */
1330 	err = ubifs_sync_wbufs_by_inode(c, inode);
1331 out:
1332 	inode_unlock(inode);
1333 	return err;
1334 }
1335 
1336 /**
1337  * mctime_update_needed - check if mtime or ctime update is needed.
1338  * @inode: the inode to do the check for
1339  * @now: current time
1340  *
1341  * This helper function checks if the inode mtime/ctime should be updated or
1342  * not. If current values of the time-stamps are within the UBIFS inode time
1343  * granularity, they are not updated. This is an optimization.
1344  *
1345  * Returns: %1 if time update is needed, %0 if not
1346  */
1347 static inline int mctime_update_needed(const struct inode *inode,
1348 				       const struct timespec64 *now)
1349 {
1350 	struct timespec64 ctime = inode_get_ctime(inode);
1351 	struct timespec64 mtime = inode_get_mtime(inode);
1352 
1353 	if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now))
1354 		return 1;
1355 	return 0;
1356 }
1357 
1358 /**
1359  * ubifs_update_time - update time of inode.
1360  * @inode: inode to update
1361  * @flags: time updating control flag determines updating
1362  *	    which time fields of @inode
1363  *
1364  * This function updates time of the inode.
1365  *
1366  * Returns: %0 for success or a negative error code otherwise.
1367  */
1368 int ubifs_update_time(struct inode *inode, int flags)
1369 {
1370 	struct ubifs_inode *ui = ubifs_inode(inode);
1371 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1372 	struct ubifs_budget_req req = { .dirtied_ino = 1,
1373 			.dirtied_ino_d = ALIGN(ui->data_len, 8) };
1374 	int err, release;
1375 
1376 	if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
1377 		generic_update_time(inode, flags);
1378 		return 0;
1379 	}
1380 
1381 	err = ubifs_budget_space(c, &req);
1382 	if (err)
1383 		return err;
1384 
1385 	mutex_lock(&ui->ui_mutex);
1386 	inode_update_timestamps(inode, flags);
1387 	release = ui->dirty;
1388 	__mark_inode_dirty(inode, I_DIRTY_SYNC);
1389 	mutex_unlock(&ui->ui_mutex);
1390 	if (release)
1391 		ubifs_release_budget(c, &req);
1392 	return 0;
1393 }
1394 
1395 /**
1396  * update_mctime - update mtime and ctime of an inode.
1397  * @inode: inode to update
1398  *
1399  * This function updates mtime and ctime of the inode if it is not equivalent to
1400  * current time.
1401  *
1402  * Returns: %0 in case of success and a negative error code in
1403  * case of failure.
1404  */
1405 static int update_mctime(struct inode *inode)
1406 {
1407 	struct timespec64 now = current_time(inode);
1408 	struct ubifs_inode *ui = ubifs_inode(inode);
1409 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1410 
1411 	if (mctime_update_needed(inode, &now)) {
1412 		int err, release;
1413 		struct ubifs_budget_req req = { .dirtied_ino = 1,
1414 				.dirtied_ino_d = ALIGN(ui->data_len, 8) };
1415 
1416 		err = ubifs_budget_space(c, &req);
1417 		if (err)
1418 			return err;
1419 
1420 		mutex_lock(&ui->ui_mutex);
1421 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1422 		release = ui->dirty;
1423 		mark_inode_dirty_sync(inode);
1424 		mutex_unlock(&ui->ui_mutex);
1425 		if (release)
1426 			ubifs_release_budget(c, &req);
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1433 {
1434 	int err = update_mctime(file_inode(iocb->ki_filp));
1435 	if (err)
1436 		return err;
1437 
1438 	return generic_file_write_iter(iocb, from);
1439 }
1440 
1441 static bool ubifs_dirty_folio(struct address_space *mapping,
1442 		struct folio *folio)
1443 {
1444 	bool ret;
1445 	struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1446 
1447 	ret = filemap_dirty_folio(mapping, folio);
1448 	/*
1449 	 * An attempt to dirty a page without budgeting for it - should not
1450 	 * happen.
1451 	 */
1452 	ubifs_assert(c, ret == false);
1453 	return ret;
1454 }
1455 
1456 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1457 {
1458 	struct inode *inode = folio->mapping->host;
1459 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1460 
1461 	if (folio_test_writeback(folio))
1462 		return false;
1463 
1464 	/*
1465 	 * Page is private but not dirty, weird? There is one condition
1466 	 * making it happened. ubifs_writepage skipped the page because
1467 	 * page index beyonds isize (for example. truncated by other
1468 	 * process named A), then the page is invalidated by fadvise64
1469 	 * syscall before being truncated by process A.
1470 	 */
1471 	ubifs_assert(c, folio_test_private(folio));
1472 	if (folio_test_checked(folio))
1473 		release_new_page_budget(c);
1474 	else
1475 		release_existing_page_budget(c);
1476 
1477 	atomic_long_dec(&c->dirty_pg_cnt);
1478 	folio_detach_private(folio);
1479 	folio_clear_checked(folio);
1480 	return true;
1481 }
1482 
1483 /*
1484  * mmap()d file has taken write protection fault and is being made writable.
1485  * UBIFS must ensure page is budgeted for.
1486  */
1487 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1488 {
1489 	struct folio *folio = page_folio(vmf->page);
1490 	struct inode *inode = file_inode(vmf->vma->vm_file);
1491 	struct ubifs_info *c = inode->i_sb->s_fs_info;
1492 	struct timespec64 now = current_time(inode);
1493 	struct ubifs_budget_req req = { .new_page = 1 };
1494 	int err, update_time;
1495 
1496 	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, folio->index,
1497 		i_size_read(inode));
1498 	ubifs_assert(c, !c->ro_media && !c->ro_mount);
1499 
1500 	if (unlikely(c->ro_error))
1501 		return VM_FAULT_SIGBUS; /* -EROFS */
1502 
1503 	/*
1504 	 * We have not locked @folio so far so we may budget for changing the
1505 	 * folio. Note, we cannot do this after we locked the folio, because
1506 	 * budgeting may cause write-back which would cause deadlock.
1507 	 *
1508 	 * At the moment we do not know whether the folio is dirty or not, so we
1509 	 * assume that it is not and budget for a new folio. We could look at
1510 	 * the @PG_private flag and figure this out, but we may race with write
1511 	 * back and the folio state may change by the time we lock it, so this
1512 	 * would need additional care. We do not bother with this at the
1513 	 * moment, although it might be good idea to do. Instead, we allocate
1514 	 * budget for a new folio and amend it later on if the folio was in fact
1515 	 * dirty.
1516 	 *
1517 	 * The budgeting-related logic of this function is similar to what we
1518 	 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1519 	 * for more comments.
1520 	 */
1521 	update_time = mctime_update_needed(inode, &now);
1522 	if (update_time)
1523 		/*
1524 		 * We have to change inode time stamp which requires extra
1525 		 * budgeting.
1526 		 */
1527 		req.dirtied_ino = 1;
1528 
1529 	err = ubifs_budget_space(c, &req);
1530 	if (unlikely(err)) {
1531 		if (err == -ENOSPC)
1532 			ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1533 				   inode->i_ino);
1534 		return VM_FAULT_SIGBUS;
1535 	}
1536 
1537 	folio_lock(folio);
1538 	if (unlikely(folio->mapping != inode->i_mapping ||
1539 		     folio_pos(folio) >= i_size_read(inode))) {
1540 		/* Folio got truncated out from underneath us */
1541 		goto sigbus;
1542 	}
1543 
1544 	if (folio->private)
1545 		release_new_page_budget(c);
1546 	else {
1547 		if (!folio_test_checked(folio))
1548 			ubifs_convert_page_budget(c);
1549 		folio_attach_private(folio, (void *)1);
1550 		atomic_long_inc(&c->dirty_pg_cnt);
1551 		filemap_dirty_folio(folio->mapping, folio);
1552 	}
1553 
1554 	if (update_time) {
1555 		int release;
1556 		struct ubifs_inode *ui = ubifs_inode(inode);
1557 
1558 		mutex_lock(&ui->ui_mutex);
1559 		inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1560 		release = ui->dirty;
1561 		mark_inode_dirty_sync(inode);
1562 		mutex_unlock(&ui->ui_mutex);
1563 		if (release)
1564 			ubifs_release_dirty_inode_budget(c, ui);
1565 	}
1566 
1567 	folio_wait_stable(folio);
1568 	return VM_FAULT_LOCKED;
1569 
1570 sigbus:
1571 	folio_unlock(folio);
1572 	ubifs_release_budget(c, &req);
1573 	return VM_FAULT_SIGBUS;
1574 }
1575 
1576 static const struct vm_operations_struct ubifs_file_vm_ops = {
1577 	.fault        = filemap_fault,
1578 	.map_pages = filemap_map_pages,
1579 	.page_mkwrite = ubifs_vm_page_mkwrite,
1580 };
1581 
1582 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1583 {
1584 	int err;
1585 
1586 	err = generic_file_mmap(file, vma);
1587 	if (err)
1588 		return err;
1589 	vma->vm_ops = &ubifs_file_vm_ops;
1590 
1591 	if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1592 		file_accessed(file);
1593 
1594 	return 0;
1595 }
1596 
1597 static const char *ubifs_get_link(struct dentry *dentry,
1598 					    struct inode *inode,
1599 					    struct delayed_call *done)
1600 {
1601 	struct ubifs_inode *ui = ubifs_inode(inode);
1602 
1603 	if (!IS_ENCRYPTED(inode))
1604 		return ui->data;
1605 
1606 	if (!dentry)
1607 		return ERR_PTR(-ECHILD);
1608 
1609 	return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1610 }
1611 
1612 static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
1613 				 const struct path *path, struct kstat *stat,
1614 				 u32 request_mask, unsigned int query_flags)
1615 {
1616 	ubifs_getattr(idmap, path, stat, request_mask, query_flags);
1617 
1618 	if (IS_ENCRYPTED(d_inode(path->dentry)))
1619 		return fscrypt_symlink_getattr(path, stat);
1620 	return 0;
1621 }
1622 
1623 const struct address_space_operations ubifs_file_address_operations = {
1624 	.read_folio     = ubifs_read_folio,
1625 	.writepages     = ubifs_writepages,
1626 	.write_begin    = ubifs_write_begin,
1627 	.write_end      = ubifs_write_end,
1628 	.invalidate_folio = ubifs_invalidate_folio,
1629 	.dirty_folio	= ubifs_dirty_folio,
1630 	.migrate_folio	= filemap_migrate_folio,
1631 	.release_folio	= ubifs_release_folio,
1632 };
1633 
1634 const struct inode_operations ubifs_file_inode_operations = {
1635 	.setattr     = ubifs_setattr,
1636 	.getattr     = ubifs_getattr,
1637 	.listxattr   = ubifs_listxattr,
1638 	.update_time = ubifs_update_time,
1639 	.fileattr_get = ubifs_fileattr_get,
1640 	.fileattr_set = ubifs_fileattr_set,
1641 };
1642 
1643 const struct inode_operations ubifs_symlink_inode_operations = {
1644 	.get_link    = ubifs_get_link,
1645 	.setattr     = ubifs_setattr,
1646 	.getattr     = ubifs_symlink_getattr,
1647 	.listxattr   = ubifs_listxattr,
1648 	.update_time = ubifs_update_time,
1649 };
1650 
1651 const struct file_operations ubifs_file_operations = {
1652 	.llseek         = generic_file_llseek,
1653 	.read_iter      = generic_file_read_iter,
1654 	.write_iter     = ubifs_write_iter,
1655 	.mmap           = ubifs_file_mmap,
1656 	.fsync          = ubifs_fsync,
1657 	.unlocked_ioctl = ubifs_ioctl,
1658 	.splice_read	= filemap_splice_read,
1659 	.splice_write	= iter_file_splice_write,
1660 	.open		= fscrypt_file_open,
1661 #ifdef CONFIG_COMPAT
1662 	.compat_ioctl   = ubifs_compat_ioctl,
1663 #endif
1664 };
1665