1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of UBIFS.
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation.
6 *
7 * Authors: Artem Bityutskiy (Битюцкий Артём)
8 * Adrian Hunter
9 */
10
11 /*
12 * This file implements VFS file and inode operations for regular files, device
13 * nodes and symlinks as well as address space operations.
14 *
15 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
16 * the page is dirty and is used for optimization purposes - dirty pages are
17 * not budgeted so the flag shows that 'ubifs_write_end()' should not release
18 * the budget for this page. The @PG_checked flag is set if full budgeting is
19 * required for the page e.g., when it corresponds to a file hole or it is
20 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
21 * it is OK to fail in this function, and the budget is released in
22 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
23 * information about how the page was budgeted, to make it possible to release
24 * the budget properly.
25 *
26 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
27 * implement. However, this is not true for 'ubifs_writepage()', which may be
28 * called with @i_mutex unlocked. For example, when flusher thread is doing
29 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
30 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
31 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
32 * 'ubifs_writepage()' we are only guaranteed that the page is locked.
33 *
34 * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the
35 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
36 * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not
37 * set as well. However, UBIFS disables readahead.
38 */
39
40 #include "ubifs.h"
41 #include <linux/mount.h>
42 #include <linux/slab.h>
43 #include <linux/migrate.h>
44
read_block(struct inode * inode,struct folio * folio,size_t offset,unsigned int block,struct ubifs_data_node * dn)45 static int read_block(struct inode *inode, struct folio *folio, size_t offset,
46 unsigned int block, struct ubifs_data_node *dn)
47 {
48 struct ubifs_info *c = inode->i_sb->s_fs_info;
49 int err, len, out_len;
50 union ubifs_key key;
51 unsigned int dlen;
52
53 data_key_init(c, &key, inode->i_ino, block);
54 err = ubifs_tnc_lookup(c, &key, dn);
55 if (err) {
56 if (err == -ENOENT)
57 /* Not found, so it must be a hole */
58 folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
59 return err;
60 }
61
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
63 ubifs_inode(inode)->creat_sqnum);
64 len = le32_to_cpu(dn->size);
65 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
66 goto dump;
67
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
69
70 if (IS_ENCRYPTED(inode)) {
71 err = ubifs_decrypt(inode, dn, &dlen, block);
72 if (err)
73 goto dump;
74 }
75
76 out_len = UBIFS_BLOCK_SIZE;
77 err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset,
78 &out_len, le16_to_cpu(dn->compr_type));
79 if (err || len != out_len)
80 goto dump;
81
82 /*
83 * Data length can be less than a full block, even for blocks that are
84 * not the last in the file (e.g., as a result of making a hole and
85 * appending data). Ensure that the remainder is zeroed out.
86 */
87 if (len < UBIFS_BLOCK_SIZE)
88 folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len);
89
90 return 0;
91
92 dump:
93 ubifs_err(c, "bad data node (block %u, inode %lu)",
94 block, inode->i_ino);
95 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ);
96 return -EINVAL;
97 }
98
do_readpage(struct folio * folio)99 static int do_readpage(struct folio *folio)
100 {
101 int err = 0, i;
102 unsigned int block, beyond;
103 struct ubifs_data_node *dn = NULL;
104 struct inode *inode = folio->mapping->host;
105 struct ubifs_info *c = inode->i_sb->s_fs_info;
106 loff_t i_size = i_size_read(inode);
107 size_t offset = 0;
108
109 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
110 inode->i_ino, folio->index, i_size, folio->flags);
111 ubifs_assert(c, !folio_test_checked(folio));
112 ubifs_assert(c, !folio->private);
113
114 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
115 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
116 if (block >= beyond) {
117 /* Reading beyond inode */
118 folio_set_checked(folio);
119 folio_zero_range(folio, 0, folio_size(folio));
120 goto out;
121 }
122
123 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
124 if (!dn) {
125 err = -ENOMEM;
126 goto out;
127 }
128
129 i = 0;
130 while (1) {
131 int ret;
132
133 if (block >= beyond) {
134 /* Reading beyond inode */
135 err = -ENOENT;
136 folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
137 } else {
138 ret = read_block(inode, folio, offset, block, dn);
139 if (ret) {
140 err = ret;
141 if (err != -ENOENT)
142 break;
143 } else if (block + 1 == beyond) {
144 int dlen = le32_to_cpu(dn->size);
145 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
146
147 if (ilen && ilen < dlen)
148 folio_zero_range(folio, offset + ilen, dlen - ilen);
149 }
150 }
151 if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
152 break;
153 block += 1;
154 offset += UBIFS_BLOCK_SIZE;
155 }
156
157 if (err) {
158 struct ubifs_info *c = inode->i_sb->s_fs_info;
159 if (err == -ENOENT) {
160 /* Not found, so it must be a hole */
161 folio_set_checked(folio);
162 dbg_gen("hole");
163 err = 0;
164 } else {
165 ubifs_err(c, "cannot read page %lu of inode %lu, error %d",
166 folio->index, inode->i_ino, err);
167 }
168 }
169
170 out:
171 kfree(dn);
172 if (!err)
173 folio_mark_uptodate(folio);
174 return err;
175 }
176
177 /**
178 * release_new_page_budget - release budget of a new page.
179 * @c: UBIFS file-system description object
180 *
181 * This is a helper function which releases budget corresponding to the budget
182 * of one new page of data.
183 */
release_new_page_budget(struct ubifs_info * c)184 static void release_new_page_budget(struct ubifs_info *c)
185 {
186 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 };
187
188 ubifs_release_budget(c, &req);
189 }
190
191 /**
192 * release_existing_page_budget - release budget of an existing page.
193 * @c: UBIFS file-system description object
194 *
195 * This is a helper function which releases budget corresponding to the budget
196 * of changing one page of data which already exists on the flash media.
197 */
release_existing_page_budget(struct ubifs_info * c)198 static void release_existing_page_budget(struct ubifs_info *c)
199 {
200 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget};
201
202 ubifs_release_budget(c, &req);
203 }
204
write_begin_slow(struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop)205 static int write_begin_slow(struct address_space *mapping,
206 loff_t pos, unsigned len, struct folio **foliop)
207 {
208 struct inode *inode = mapping->host;
209 struct ubifs_info *c = inode->i_sb->s_fs_info;
210 pgoff_t index = pos >> PAGE_SHIFT;
211 struct ubifs_budget_req req = { .new_page = 1 };
212 int err, appending = !!(pos + len > inode->i_size);
213 struct folio *folio;
214
215 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
216 inode->i_ino, pos, len, inode->i_size);
217
218 /*
219 * At the slow path we have to budget before locking the folio, because
220 * budgeting may force write-back, which would wait on locked folios and
221 * deadlock if we had the folio locked. At this point we do not know
222 * anything about the folio, so assume that this is a new folio which is
223 * written to a hole. This corresponds to largest budget. Later the
224 * budget will be amended if this is not true.
225 */
226 if (appending)
227 /* We are appending data, budget for inode change */
228 req.dirtied_ino = 1;
229
230 err = ubifs_budget_space(c, &req);
231 if (unlikely(err))
232 return err;
233
234 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
235 mapping_gfp_mask(mapping));
236 if (IS_ERR(folio)) {
237 ubifs_release_budget(c, &req);
238 return PTR_ERR(folio);
239 }
240
241 if (!folio_test_uptodate(folio)) {
242 if (pos == folio_pos(folio) && len >= folio_size(folio))
243 folio_set_checked(folio);
244 else {
245 err = do_readpage(folio);
246 if (err) {
247 folio_unlock(folio);
248 folio_put(folio);
249 ubifs_release_budget(c, &req);
250 return err;
251 }
252 }
253 }
254
255 if (folio->private)
256 /*
257 * The folio is dirty, which means it was budgeted twice:
258 * o first time the budget was allocated by the task which
259 * made the folio dirty and set the private field;
260 * o and then we budgeted for it for the second time at the
261 * very beginning of this function.
262 *
263 * So what we have to do is to release the folio budget we
264 * allocated.
265 */
266 release_new_page_budget(c);
267 else if (!folio_test_checked(folio))
268 /*
269 * We are changing a folio which already exists on the media.
270 * This means that changing the folio does not make the amount
271 * of indexing information larger, and this part of the budget
272 * which we have already acquired may be released.
273 */
274 ubifs_convert_page_budget(c);
275
276 if (appending) {
277 struct ubifs_inode *ui = ubifs_inode(inode);
278
279 /*
280 * 'ubifs_write_end()' is optimized from the fast-path part of
281 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
282 * if data is appended.
283 */
284 mutex_lock(&ui->ui_mutex);
285 if (ui->dirty)
286 /*
287 * The inode is dirty already, so we may free the
288 * budget we allocated.
289 */
290 ubifs_release_dirty_inode_budget(c, ui);
291 }
292
293 *foliop = folio;
294 return 0;
295 }
296
297 /**
298 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
299 * @c: UBIFS file-system description object
300 * @folio: folio to allocate budget for
301 * @ui: UBIFS inode object the page belongs to
302 * @appending: non-zero if the page is appended
303 *
304 * This is a helper function for 'ubifs_write_begin()' which allocates budget
305 * for the operation. The budget is allocated differently depending on whether
306 * this is appending, whether the page is dirty or not, and so on. This
307 * function leaves the @ui->ui_mutex locked in case of appending.
308 *
309 * Returns: %0 in case of success and %-ENOSPC in case of failure.
310 */
allocate_budget(struct ubifs_info * c,struct folio * folio,struct ubifs_inode * ui,int appending)311 static int allocate_budget(struct ubifs_info *c, struct folio *folio,
312 struct ubifs_inode *ui, int appending)
313 {
314 struct ubifs_budget_req req = { .fast = 1 };
315
316 if (folio->private) {
317 if (!appending)
318 /*
319 * The folio is dirty and we are not appending, which
320 * means no budget is needed at all.
321 */
322 return 0;
323
324 mutex_lock(&ui->ui_mutex);
325 if (ui->dirty)
326 /*
327 * The page is dirty and we are appending, so the inode
328 * has to be marked as dirty. However, it is already
329 * dirty, so we do not need any budget. We may return,
330 * but @ui->ui_mutex hast to be left locked because we
331 * should prevent write-back from flushing the inode
332 * and freeing the budget. The lock will be released in
333 * 'ubifs_write_end()'.
334 */
335 return 0;
336
337 /*
338 * The page is dirty, we are appending, the inode is clean, so
339 * we need to budget the inode change.
340 */
341 req.dirtied_ino = 1;
342 } else {
343 if (folio_test_checked(folio))
344 /*
345 * The page corresponds to a hole and does not
346 * exist on the media. So changing it makes
347 * the amount of indexing information
348 * larger, and we have to budget for a new
349 * page.
350 */
351 req.new_page = 1;
352 else
353 /*
354 * Not a hole, the change will not add any new
355 * indexing information, budget for page
356 * change.
357 */
358 req.dirtied_page = 1;
359
360 if (appending) {
361 mutex_lock(&ui->ui_mutex);
362 if (!ui->dirty)
363 /*
364 * The inode is clean but we will have to mark
365 * it as dirty because we are appending. This
366 * needs a budget.
367 */
368 req.dirtied_ino = 1;
369 }
370 }
371
372 return ubifs_budget_space(c, &req);
373 }
374
375 /*
376 * This function is called when a page of data is going to be written. Since
377 * the page of data will not necessarily go to the flash straight away, UBIFS
378 * has to reserve space on the media for it, which is done by means of
379 * budgeting.
380 *
381 * This is the hot-path of the file-system and we are trying to optimize it as
382 * much as possible. For this reasons it is split on 2 parts - slow and fast.
383 *
384 * There many budgeting cases:
385 * o a new page is appended - we have to budget for a new page and for
386 * changing the inode; however, if the inode is already dirty, there is
387 * no need to budget for it;
388 * o an existing clean page is changed - we have budget for it; if the page
389 * does not exist on the media (a hole), we have to budget for a new
390 * page; otherwise, we may budget for changing an existing page; the
391 * difference between these cases is that changing an existing page does
392 * not introduce anything new to the FS indexing information, so it does
393 * not grow, and smaller budget is acquired in this case;
394 * o an existing dirty page is changed - no need to budget at all, because
395 * the page budget has been acquired by earlier, when the page has been
396 * marked dirty.
397 *
398 * UBIFS budgeting sub-system may force write-back if it thinks there is no
399 * space to reserve. This imposes some locking restrictions and makes it
400 * impossible to take into account the above cases, and makes it impossible to
401 * optimize budgeting.
402 *
403 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
404 * there is a plenty of flash space and the budget will be acquired quickly,
405 * without forcing write-back. The slow path does not make this assumption.
406 */
ubifs_write_begin(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)407 static int ubifs_write_begin(const struct kiocb *iocb,
408 struct address_space *mapping,
409 loff_t pos, unsigned len,
410 struct folio **foliop, void **fsdata)
411 {
412 struct inode *inode = mapping->host;
413 struct ubifs_info *c = inode->i_sb->s_fs_info;
414 struct ubifs_inode *ui = ubifs_inode(inode);
415 pgoff_t index = pos >> PAGE_SHIFT;
416 int err, appending = !!(pos + len > inode->i_size);
417 int skipped_read = 0;
418 struct folio *folio;
419
420 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size);
421 ubifs_assert(c, !c->ro_media && !c->ro_mount);
422
423 if (unlikely(c->ro_error))
424 return -EROFS;
425
426 /* Try out the fast-path part first */
427 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
428 mapping_gfp_mask(mapping));
429 if (IS_ERR(folio))
430 return PTR_ERR(folio);
431
432 if (!folio_test_uptodate(folio)) {
433 /* The page is not loaded from the flash */
434 if (pos == folio_pos(folio) && len >= folio_size(folio)) {
435 /*
436 * We change whole page so no need to load it. But we
437 * do not know whether this page exists on the media or
438 * not, so we assume the latter because it requires
439 * larger budget. The assumption is that it is better
440 * to budget a bit more than to read the page from the
441 * media. Thus, we are setting the @PG_checked flag
442 * here.
443 */
444 folio_set_checked(folio);
445 skipped_read = 1;
446 } else {
447 err = do_readpage(folio);
448 if (err) {
449 folio_unlock(folio);
450 folio_put(folio);
451 return err;
452 }
453 }
454 }
455
456 err = allocate_budget(c, folio, ui, appending);
457 if (unlikely(err)) {
458 ubifs_assert(c, err == -ENOSPC);
459 /*
460 * If we skipped reading the page because we were going to
461 * write all of it, then it is not up to date.
462 */
463 if (skipped_read)
464 folio_clear_checked(folio);
465 /*
466 * Budgeting failed which means it would have to force
467 * write-back but didn't, because we set the @fast flag in the
468 * request. Write-back cannot be done now, while we have the
469 * page locked, because it would deadlock. Unlock and free
470 * everything and fall-back to slow-path.
471 */
472 if (appending) {
473 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
474 mutex_unlock(&ui->ui_mutex);
475 }
476 folio_unlock(folio);
477 folio_put(folio);
478
479 return write_begin_slow(mapping, pos, len, foliop);
480 }
481
482 /*
483 * Whee, we acquired budgeting quickly - without involving
484 * garbage-collection, committing or forcing write-back. We return
485 * with @ui->ui_mutex locked if we are appending pages, and unlocked
486 * otherwise. This is an optimization (slightly hacky though).
487 */
488 *foliop = folio;
489 return 0;
490 }
491
492 /**
493 * cancel_budget - cancel budget.
494 * @c: UBIFS file-system description object
495 * @folio: folio to cancel budget for
496 * @ui: UBIFS inode object the page belongs to
497 * @appending: non-zero if the page is appended
498 *
499 * This is a helper function for a page write operation. It unlocks the
500 * @ui->ui_mutex in case of appending.
501 */
cancel_budget(struct ubifs_info * c,struct folio * folio,struct ubifs_inode * ui,int appending)502 static void cancel_budget(struct ubifs_info *c, struct folio *folio,
503 struct ubifs_inode *ui, int appending)
504 {
505 if (appending) {
506 if (!ui->dirty)
507 ubifs_release_dirty_inode_budget(c, ui);
508 mutex_unlock(&ui->ui_mutex);
509 }
510 if (!folio->private) {
511 if (folio_test_checked(folio))
512 release_new_page_budget(c);
513 else
514 release_existing_page_budget(c);
515 }
516 }
517
ubifs_write_end(const struct kiocb * iocb,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)518 static int ubifs_write_end(const struct kiocb *iocb,
519 struct address_space *mapping, loff_t pos,
520 unsigned len, unsigned copied,
521 struct folio *folio, void *fsdata)
522 {
523 struct inode *inode = mapping->host;
524 struct ubifs_inode *ui = ubifs_inode(inode);
525 struct ubifs_info *c = inode->i_sb->s_fs_info;
526 loff_t end_pos = pos + len;
527 int appending = !!(end_pos > inode->i_size);
528
529 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
530 inode->i_ino, pos, folio->index, len, copied, inode->i_size);
531
532 if (unlikely(copied < len && !folio_test_uptodate(folio))) {
533 /*
534 * VFS copied less data to the folio than it intended and
535 * declared in its '->write_begin()' call via the @len
536 * argument. If the folio was not up-to-date,
537 * the 'ubifs_write_begin()' function did
538 * not load it from the media (for optimization reasons). This
539 * means that part of the folio contains garbage. So read the
540 * folio now.
541 */
542 dbg_gen("copied %d instead of %d, read page and repeat",
543 copied, len);
544 cancel_budget(c, folio, ui, appending);
545 folio_clear_checked(folio);
546
547 /*
548 * Return 0 to force VFS to repeat the whole operation, or the
549 * error code if 'do_readpage()' fails.
550 */
551 copied = do_readpage(folio);
552 goto out;
553 }
554
555 if (len == folio_size(folio))
556 folio_mark_uptodate(folio);
557
558 if (!folio->private) {
559 folio_attach_private(folio, (void *)1);
560 atomic_long_inc(&c->dirty_pg_cnt);
561 filemap_dirty_folio(mapping, folio);
562 }
563
564 if (appending) {
565 i_size_write(inode, end_pos);
566 ui->ui_size = end_pos;
567 /*
568 * We do not set @I_DIRTY_PAGES (which means that
569 * the inode has dirty pages), this was done in
570 * filemap_dirty_folio().
571 */
572 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
573 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex));
574 mutex_unlock(&ui->ui_mutex);
575 }
576
577 out:
578 folio_unlock(folio);
579 folio_put(folio);
580 return copied;
581 }
582
583 /**
584 * populate_page - copy data nodes into a page for bulk-read.
585 * @c: UBIFS file-system description object
586 * @folio: folio
587 * @bu: bulk-read information
588 * @n: next zbranch slot
589 *
590 * Returns: %0 on success and a negative error code on failure.
591 */
populate_page(struct ubifs_info * c,struct folio * folio,struct bu_info * bu,int * n)592 static int populate_page(struct ubifs_info *c, struct folio *folio,
593 struct bu_info *bu, int *n)
594 {
595 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0;
596 struct inode *inode = folio->mapping->host;
597 loff_t i_size = i_size_read(inode);
598 unsigned int page_block;
599 size_t offset = 0;
600 pgoff_t end_index;
601
602 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
603 inode->i_ino, folio->index, i_size, folio->flags);
604
605 end_index = (i_size - 1) >> PAGE_SHIFT;
606 if (!i_size || folio->index > end_index) {
607 hole = 1;
608 folio_zero_range(folio, 0, folio_size(folio));
609 goto out_hole;
610 }
611
612 page_block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
613 while (1) {
614 int err, len, out_len, dlen;
615
616 if (nn >= bu->cnt) {
617 hole = 1;
618 folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
619 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
620 struct ubifs_data_node *dn;
621
622 dn = bu->buf + (bu->zbranch[nn].offs - offs);
623
624 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) >
625 ubifs_inode(inode)->creat_sqnum);
626
627 len = le32_to_cpu(dn->size);
628 if (len <= 0 || len > UBIFS_BLOCK_SIZE)
629 goto out_err;
630
631 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
632 out_len = UBIFS_BLOCK_SIZE;
633
634 if (IS_ENCRYPTED(inode)) {
635 err = ubifs_decrypt(inode, dn, &dlen, page_block);
636 if (err)
637 goto out_err;
638 }
639
640 err = ubifs_decompress_folio(
641 c, &dn->data, dlen, folio, offset, &out_len,
642 le16_to_cpu(dn->compr_type));
643 if (err || len != out_len)
644 goto out_err;
645
646 if (len < UBIFS_BLOCK_SIZE)
647 folio_zero_range(folio, offset + len,
648 UBIFS_BLOCK_SIZE - len);
649
650 nn += 1;
651 read = (i << UBIFS_BLOCK_SHIFT) + len;
652 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) {
653 nn += 1;
654 continue;
655 } else {
656 hole = 1;
657 folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
658 }
659 if (++i >= UBIFS_BLOCKS_PER_PAGE)
660 break;
661 offset += UBIFS_BLOCK_SIZE;
662 page_block += 1;
663 }
664
665 if (end_index == folio->index) {
666 int len = i_size & (PAGE_SIZE - 1);
667
668 if (len && len < read)
669 folio_zero_range(folio, len, read - len);
670 }
671
672 out_hole:
673 if (hole) {
674 folio_set_checked(folio);
675 dbg_gen("hole");
676 }
677
678 folio_mark_uptodate(folio);
679 *n = nn;
680 return 0;
681
682 out_err:
683 ubifs_err(c, "bad data node (block %u, inode %lu)",
684 page_block, inode->i_ino);
685 return -EINVAL;
686 }
687
688 /**
689 * ubifs_do_bulk_read - do bulk-read.
690 * @c: UBIFS file-system description object
691 * @bu: bulk-read information
692 * @folio1: first folio to read
693 *
694 * Returns: %1 if the bulk-read is done, otherwise %0 is returned.
695 */
ubifs_do_bulk_read(struct ubifs_info * c,struct bu_info * bu,struct folio * folio1)696 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
697 struct folio *folio1)
698 {
699 pgoff_t offset = folio1->index, end_index;
700 struct address_space *mapping = folio1->mapping;
701 struct inode *inode = mapping->host;
702 struct ubifs_inode *ui = ubifs_inode(inode);
703 int err, page_idx, page_cnt, ret = 0, n = 0;
704 int allocate = bu->buf ? 0 : 1;
705 loff_t isize;
706 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS;
707
708 err = ubifs_tnc_get_bu_keys(c, bu);
709 if (err)
710 goto out_warn;
711
712 if (bu->eof) {
713 /* Turn off bulk-read at the end of the file */
714 ui->read_in_a_row = 1;
715 ui->bulk_read = 0;
716 }
717
718 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
719 if (!page_cnt) {
720 /*
721 * This happens when there are multiple blocks per page and the
722 * blocks for the first page we are looking for, are not
723 * together. If all the pages were like this, bulk-read would
724 * reduce performance, so we turn it off for a while.
725 */
726 goto out_bu_off;
727 }
728
729 if (bu->cnt) {
730 if (allocate) {
731 /*
732 * Allocate bulk-read buffer depending on how many data
733 * nodes we are going to read.
734 */
735 bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
736 bu->zbranch[bu->cnt - 1].len -
737 bu->zbranch[0].offs;
738 ubifs_assert(c, bu->buf_len > 0);
739 ubifs_assert(c, bu->buf_len <= c->leb_size);
740 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
741 if (!bu->buf)
742 goto out_bu_off;
743 }
744
745 err = ubifs_tnc_bulk_read(c, bu);
746 if (err)
747 goto out_warn;
748 }
749
750 err = populate_page(c, folio1, bu, &n);
751 if (err)
752 goto out_warn;
753
754 folio_unlock(folio1);
755 ret = 1;
756
757 isize = i_size_read(inode);
758 if (isize == 0)
759 goto out_free;
760 end_index = ((isize - 1) >> PAGE_SHIFT);
761
762 for (page_idx = 1; page_idx < page_cnt; page_idx++) {
763 pgoff_t page_offset = offset + page_idx;
764 struct folio *folio;
765
766 if (page_offset > end_index)
767 break;
768 folio = __filemap_get_folio(mapping, page_offset,
769 FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT,
770 ra_gfp_mask);
771 if (IS_ERR(folio))
772 break;
773 if (!folio_test_uptodate(folio))
774 err = populate_page(c, folio, bu, &n);
775 folio_unlock(folio);
776 folio_put(folio);
777 if (err)
778 break;
779 }
780
781 ui->last_page_read = offset + page_idx - 1;
782
783 out_free:
784 if (allocate)
785 kfree(bu->buf);
786 return ret;
787
788 out_warn:
789 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err);
790 goto out_free;
791
792 out_bu_off:
793 ui->read_in_a_row = ui->bulk_read = 0;
794 goto out_free;
795 }
796
797 /**
798 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
799 * @folio: folio from which to start bulk-read.
800 *
801 * Some flash media are capable of reading sequentially at faster rates. UBIFS
802 * bulk-read facility is designed to take advantage of that, by reading in one
803 * go consecutive data nodes that are also located consecutively in the same
804 * LEB.
805 *
806 * Returns: %1 if a bulk-read is done and %0 otherwise.
807 */
ubifs_bulk_read(struct folio * folio)808 static int ubifs_bulk_read(struct folio *folio)
809 {
810 struct inode *inode = folio->mapping->host;
811 struct ubifs_info *c = inode->i_sb->s_fs_info;
812 struct ubifs_inode *ui = ubifs_inode(inode);
813 pgoff_t index = folio->index, last_page_read = ui->last_page_read;
814 struct bu_info *bu;
815 int err = 0, allocated = 0;
816
817 ui->last_page_read = index;
818 if (!c->bulk_read)
819 return 0;
820
821 /*
822 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
823 * so don't bother if we cannot lock the mutex.
824 */
825 if (!mutex_trylock(&ui->ui_mutex))
826 return 0;
827
828 if (index != last_page_read + 1) {
829 /* Turn off bulk-read if we stop reading sequentially */
830 ui->read_in_a_row = 1;
831 if (ui->bulk_read)
832 ui->bulk_read = 0;
833 goto out_unlock;
834 }
835
836 if (!ui->bulk_read) {
837 ui->read_in_a_row += 1;
838 if (ui->read_in_a_row < 3)
839 goto out_unlock;
840 /* Three reads in a row, so switch on bulk-read */
841 ui->bulk_read = 1;
842 }
843
844 /*
845 * If possible, try to use pre-allocated bulk-read information, which
846 * is protected by @c->bu_mutex.
847 */
848 if (mutex_trylock(&c->bu_mutex))
849 bu = &c->bu;
850 else {
851 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
852 if (!bu)
853 goto out_unlock;
854
855 bu->buf = NULL;
856 allocated = 1;
857 }
858
859 bu->buf_len = c->max_bu_buf_len;
860 data_key_init(c, &bu->key, inode->i_ino,
861 folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
862 err = ubifs_do_bulk_read(c, bu, folio);
863
864 if (!allocated)
865 mutex_unlock(&c->bu_mutex);
866 else
867 kfree(bu);
868
869 out_unlock:
870 mutex_unlock(&ui->ui_mutex);
871 return err;
872 }
873
ubifs_read_folio(struct file * file,struct folio * folio)874 static int ubifs_read_folio(struct file *file, struct folio *folio)
875 {
876 if (ubifs_bulk_read(folio))
877 return 0;
878 do_readpage(folio);
879 folio_unlock(folio);
880 return 0;
881 }
882
do_writepage(struct folio * folio,size_t len)883 static int do_writepage(struct folio *folio, size_t len)
884 {
885 int err = 0, blen;
886 unsigned int block;
887 size_t offset = 0;
888 union ubifs_key key;
889 struct inode *inode = folio->mapping->host;
890 struct ubifs_info *c = inode->i_sb->s_fs_info;
891
892 #ifdef UBIFS_DEBUG
893 struct ubifs_inode *ui = ubifs_inode(inode);
894 spin_lock(&ui->ui_lock);
895 ubifs_assert(c, folio->index <= ui->synced_i_size >> PAGE_SHIFT);
896 spin_unlock(&ui->ui_lock);
897 #endif
898
899 folio_start_writeback(folio);
900
901 block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
902 for (;;) {
903 blen = min_t(size_t, len, UBIFS_BLOCK_SIZE);
904 data_key_init(c, &key, inode->i_ino, block);
905 err = ubifs_jnl_write_data(c, inode, &key, folio, offset, blen);
906 if (err)
907 break;
908 len -= blen;
909 if (!len)
910 break;
911 block += 1;
912 offset += blen;
913 }
914 if (err) {
915 mapping_set_error(folio->mapping, err);
916 ubifs_err(c, "cannot write folio %lu of inode %lu, error %d",
917 folio->index, inode->i_ino, err);
918 ubifs_ro_mode(c, err);
919 }
920
921 ubifs_assert(c, folio->private != NULL);
922 if (folio_test_checked(folio))
923 release_new_page_budget(c);
924 else
925 release_existing_page_budget(c);
926
927 atomic_long_dec(&c->dirty_pg_cnt);
928 folio_detach_private(folio);
929 folio_clear_checked(folio);
930
931 folio_unlock(folio);
932 folio_end_writeback(folio);
933 return err;
934 }
935
936 /*
937 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
938 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
939 * situation when a we have an inode with size 0, then a megabyte of data is
940 * appended to the inode, then write-back starts and flushes some amount of the
941 * dirty pages, the journal becomes full, commit happens and finishes, and then
942 * an unclean reboot happens. When the file system is mounted next time, the
943 * inode size would still be 0, but there would be many pages which are beyond
944 * the inode size, they would be indexed and consume flash space. Because the
945 * journal has been committed, the replay would not be able to detect this
946 * situation and correct the inode size. This means UBIFS would have to scan
947 * whole index and correct all inode sizes, which is long an unacceptable.
948 *
949 * To prevent situations like this, UBIFS writes pages back only if they are
950 * within the last synchronized inode size, i.e. the size which has been
951 * written to the flash media last time. Otherwise, UBIFS forces inode
952 * write-back, thus making sure the on-flash inode contains current inode size,
953 * and then keeps writing pages back.
954 *
955 * Some locking issues explanation. 'ubifs_writepage()' first is called with
956 * the page locked, and it locks @ui_mutex. However, write-back does take inode
957 * @i_mutex, which means other VFS operations may be run on this inode at the
958 * same time. And the problematic one is truncation to smaller size, from where
959 * we have to call 'truncate_setsize()', which first changes @inode->i_size,
960 * then drops the truncated pages. And while dropping the pages, it takes the
961 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()'
962 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'.
963 * This means that @inode->i_size is changed while @ui_mutex is unlocked.
964 *
965 * XXX(truncate): with the new truncate sequence this is not true anymore,
966 * and the calls to truncate_setsize can be move around freely. They should
967 * be moved to the very end of the truncate sequence.
968 *
969 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
970 * inode size. How do we do this if @inode->i_size may became smaller while we
971 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
972 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
973 * internally and updates it under @ui_mutex.
974 *
975 * Q: why we do not worry that if we race with truncation, we may end up with a
976 * situation when the inode is truncated while we are in the middle of
977 * 'do_writepage()', so we do write beyond inode size?
978 * A: If we are in the middle of 'do_writepage()', truncation would be locked
979 * on the page lock and it would not write the truncated inode node to the
980 * journal before we have finished.
981 */
ubifs_writepage(struct folio * folio,struct writeback_control * wbc)982 static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc)
983 {
984 struct inode *inode = folio->mapping->host;
985 struct ubifs_info *c = inode->i_sb->s_fs_info;
986 struct ubifs_inode *ui = ubifs_inode(inode);
987 loff_t i_size = i_size_read(inode), synced_i_size;
988 int err, len = folio_size(folio);
989
990 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
991 inode->i_ino, folio->index, folio->flags);
992 ubifs_assert(c, folio->private != NULL);
993
994 /* Is the folio fully outside @i_size? (truncate in progress) */
995 if (folio_pos(folio) >= i_size) {
996 err = 0;
997 goto out_unlock;
998 }
999
1000 spin_lock(&ui->ui_lock);
1001 synced_i_size = ui->synced_i_size;
1002 spin_unlock(&ui->ui_lock);
1003
1004 /* Is the folio fully inside i_size? */
1005 if (folio_pos(folio) + len <= i_size) {
1006 if (folio_pos(folio) + len > synced_i_size) {
1007 err = inode->i_sb->s_op->write_inode(inode, NULL);
1008 if (err)
1009 goto out_redirty;
1010 /*
1011 * The inode has been written, but the write-buffer has
1012 * not been synchronized, so in case of an unclean
1013 * reboot we may end up with some pages beyond inode
1014 * size, but they would be in the journal (because
1015 * commit flushes write buffers) and recovery would deal
1016 * with this.
1017 */
1018 }
1019 return do_writepage(folio, len);
1020 }
1021
1022 /*
1023 * The folio straddles @i_size. It must be zeroed out on each and every
1024 * writepage invocation because it may be mmapped. "A file is mapped
1025 * in multiples of the page size. For a file that is not a multiple of
1026 * the page size, the remaining memory is zeroed when mapped, and
1027 * writes to that region are not written out to the file."
1028 */
1029 len = i_size - folio_pos(folio);
1030 folio_zero_segment(folio, len, folio_size(folio));
1031
1032 if (i_size > synced_i_size) {
1033 err = inode->i_sb->s_op->write_inode(inode, NULL);
1034 if (err)
1035 goto out_redirty;
1036 }
1037
1038 return do_writepage(folio, len);
1039 out_redirty:
1040 /*
1041 * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because
1042 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
1043 * there is no need to do space budget for dirty inode.
1044 */
1045 folio_redirty_for_writepage(wbc, folio);
1046 out_unlock:
1047 folio_unlock(folio);
1048 return err;
1049 }
1050
ubifs_writepages(struct address_space * mapping,struct writeback_control * wbc)1051 static int ubifs_writepages(struct address_space *mapping,
1052 struct writeback_control *wbc)
1053 {
1054 struct folio *folio = NULL;
1055 int error;
1056
1057 while ((folio = writeback_iter(mapping, wbc, folio, &error)))
1058 error = ubifs_writepage(folio, wbc);
1059 return error;
1060 }
1061
1062 /**
1063 * do_attr_changes - change inode attributes.
1064 * @inode: inode to change attributes for
1065 * @attr: describes attributes to change
1066 */
do_attr_changes(struct inode * inode,const struct iattr * attr)1067 static void do_attr_changes(struct inode *inode, const struct iattr *attr)
1068 {
1069 if (attr->ia_valid & ATTR_UID)
1070 inode->i_uid = attr->ia_uid;
1071 if (attr->ia_valid & ATTR_GID)
1072 inode->i_gid = attr->ia_gid;
1073 if (attr->ia_valid & ATTR_ATIME)
1074 inode_set_atime_to_ts(inode, attr->ia_atime);
1075 if (attr->ia_valid & ATTR_MTIME)
1076 inode_set_mtime_to_ts(inode, attr->ia_mtime);
1077 if (attr->ia_valid & ATTR_CTIME)
1078 inode_set_ctime_to_ts(inode, attr->ia_ctime);
1079 if (attr->ia_valid & ATTR_MODE) {
1080 umode_t mode = attr->ia_mode;
1081
1082 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
1083 mode &= ~S_ISGID;
1084 inode->i_mode = mode;
1085 }
1086 }
1087
1088 /**
1089 * do_truncation - truncate an inode.
1090 * @c: UBIFS file-system description object
1091 * @inode: inode to truncate
1092 * @attr: inode attribute changes description
1093 *
1094 * This function implements VFS '->setattr()' call when the inode is truncated
1095 * to a smaller size.
1096 *
1097 * Returns: %0 in case of success and a negative error code
1098 * in case of failure.
1099 */
do_truncation(struct ubifs_info * c,struct inode * inode,const struct iattr * attr)1100 static int do_truncation(struct ubifs_info *c, struct inode *inode,
1101 const struct iattr *attr)
1102 {
1103 int err;
1104 struct ubifs_budget_req req;
1105 loff_t old_size = inode->i_size, new_size = attr->ia_size;
1106 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1;
1107 struct ubifs_inode *ui = ubifs_inode(inode);
1108
1109 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size);
1110 memset(&req, 0, sizeof(struct ubifs_budget_req));
1111
1112 /*
1113 * If this is truncation to a smaller size, and we do not truncate on a
1114 * block boundary, budget for changing one data block, because the last
1115 * block will be re-written.
1116 */
1117 if (new_size & (UBIFS_BLOCK_SIZE - 1))
1118 req.dirtied_page = 1;
1119
1120 req.dirtied_ino = 1;
1121 /* A funny way to budget for truncation node */
1122 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ;
1123 err = ubifs_budget_space(c, &req);
1124 if (err) {
1125 /*
1126 * Treat truncations to zero as deletion and always allow them,
1127 * just like we do for '->unlink()'.
1128 */
1129 if (new_size || err != -ENOSPC)
1130 return err;
1131 budgeted = 0;
1132 }
1133
1134 truncate_setsize(inode, new_size);
1135
1136 if (offset) {
1137 pgoff_t index = new_size >> PAGE_SHIFT;
1138 struct folio *folio;
1139
1140 folio = filemap_lock_folio(inode->i_mapping, index);
1141 if (!IS_ERR(folio)) {
1142 if (folio_test_dirty(folio)) {
1143 /*
1144 * 'ubifs_jnl_truncate()' will try to truncate
1145 * the last data node, but it contains
1146 * out-of-date data because the page is dirty.
1147 * Write the page now, so that
1148 * 'ubifs_jnl_truncate()' will see an already
1149 * truncated (and up to date) data node.
1150 */
1151 ubifs_assert(c, folio->private != NULL);
1152
1153 folio_clear_dirty_for_io(folio);
1154 if (UBIFS_BLOCKS_PER_PAGE_SHIFT)
1155 offset = offset_in_folio(folio,
1156 new_size);
1157 err = do_writepage(folio, offset);
1158 folio_put(folio);
1159 if (err)
1160 goto out_budg;
1161 /*
1162 * We could now tell 'ubifs_jnl_truncate()' not
1163 * to read the last block.
1164 */
1165 } else {
1166 /*
1167 * We could 'kmap()' the page and pass the data
1168 * to 'ubifs_jnl_truncate()' to save it from
1169 * having to read it.
1170 */
1171 folio_unlock(folio);
1172 folio_put(folio);
1173 }
1174 }
1175 }
1176
1177 mutex_lock(&ui->ui_mutex);
1178 ui->ui_size = inode->i_size;
1179 /* Truncation changes inode [mc]time */
1180 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1181 /* Other attributes may be changed at the same time as well */
1182 do_attr_changes(inode, attr);
1183 err = ubifs_jnl_truncate(c, inode, old_size, new_size);
1184 mutex_unlock(&ui->ui_mutex);
1185
1186 out_budg:
1187 if (budgeted)
1188 ubifs_release_budget(c, &req);
1189 else {
1190 c->bi.nospace = c->bi.nospace_rp = 0;
1191 smp_wmb();
1192 }
1193 return err;
1194 }
1195
1196 /**
1197 * do_setattr - change inode attributes.
1198 * @c: UBIFS file-system description object
1199 * @inode: inode to change attributes for
1200 * @attr: inode attribute changes description
1201 *
1202 * This function implements VFS '->setattr()' call for all cases except
1203 * truncations to smaller size.
1204 *
1205 * Returns: %0 in case of success and a negative
1206 * error code in case of failure.
1207 */
do_setattr(struct ubifs_info * c,struct inode * inode,const struct iattr * attr)1208 static int do_setattr(struct ubifs_info *c, struct inode *inode,
1209 const struct iattr *attr)
1210 {
1211 int err, release;
1212 loff_t new_size = attr->ia_size;
1213 struct ubifs_inode *ui = ubifs_inode(inode);
1214 struct ubifs_budget_req req = { .dirtied_ino = 1,
1215 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1216
1217 err = ubifs_budget_space(c, &req);
1218 if (err)
1219 return err;
1220
1221 if (attr->ia_valid & ATTR_SIZE) {
1222 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1223 truncate_setsize(inode, new_size);
1224 }
1225
1226 mutex_lock(&ui->ui_mutex);
1227 if (attr->ia_valid & ATTR_SIZE) {
1228 /* Truncation changes inode [mc]time */
1229 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1230 /* 'truncate_setsize()' changed @i_size, update @ui_size */
1231 ui->ui_size = inode->i_size;
1232 }
1233
1234 do_attr_changes(inode, attr);
1235
1236 release = ui->dirty;
1237 if (attr->ia_valid & ATTR_SIZE)
1238 /*
1239 * Inode length changed, so we have to make sure
1240 * @I_DIRTY_DATASYNC is set.
1241 */
1242 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1243 else
1244 mark_inode_dirty_sync(inode);
1245 mutex_unlock(&ui->ui_mutex);
1246
1247 if (release)
1248 ubifs_release_budget(c, &req);
1249 if (IS_SYNC(inode))
1250 err = inode->i_sb->s_op->write_inode(inode, NULL);
1251 return err;
1252 }
1253
ubifs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)1254 int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1255 struct iattr *attr)
1256 {
1257 int err;
1258 struct inode *inode = d_inode(dentry);
1259 struct ubifs_info *c = inode->i_sb->s_fs_info;
1260
1261 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1262 inode->i_ino, inode->i_mode, attr->ia_valid);
1263 err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1264 if (err)
1265 return err;
1266
1267 err = dbg_check_synced_i_size(c, inode);
1268 if (err)
1269 return err;
1270
1271 err = fscrypt_prepare_setattr(dentry, attr);
1272 if (err)
1273 return err;
1274
1275 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
1276 /* Truncation to a smaller size */
1277 err = do_truncation(c, inode, attr);
1278 else
1279 err = do_setattr(c, inode, attr);
1280
1281 return err;
1282 }
1283
ubifs_invalidate_folio(struct folio * folio,size_t offset,size_t length)1284 static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
1285 size_t length)
1286 {
1287 struct inode *inode = folio->mapping->host;
1288 struct ubifs_info *c = inode->i_sb->s_fs_info;
1289
1290 ubifs_assert(c, folio_test_private(folio));
1291 if (offset || length < folio_size(folio))
1292 /* Partial folio remains dirty */
1293 return;
1294
1295 if (folio_test_checked(folio))
1296 release_new_page_budget(c);
1297 else
1298 release_existing_page_budget(c);
1299
1300 atomic_long_dec(&c->dirty_pg_cnt);
1301 folio_detach_private(folio);
1302 folio_clear_checked(folio);
1303 }
1304
ubifs_fsync(struct file * file,loff_t start,loff_t end,int datasync)1305 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1306 {
1307 struct inode *inode = file->f_mapping->host;
1308 struct ubifs_info *c = inode->i_sb->s_fs_info;
1309 int err;
1310
1311 dbg_gen("syncing inode %lu", inode->i_ino);
1312
1313 if (c->ro_mount)
1314 /*
1315 * For some really strange reasons VFS does not filter out
1316 * 'fsync()' for R/O mounted file-systems as per 2.6.39.
1317 */
1318 return 0;
1319
1320 err = file_write_and_wait_range(file, start, end);
1321 if (err)
1322 return err;
1323 inode_lock(inode);
1324
1325 /* Synchronize the inode unless this is a 'datasync()' call. */
1326 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) {
1327 err = inode->i_sb->s_op->write_inode(inode, NULL);
1328 if (err)
1329 goto out;
1330 }
1331
1332 /*
1333 * Nodes related to this inode may still sit in a write-buffer. Flush
1334 * them.
1335 */
1336 err = ubifs_sync_wbufs_by_inode(c, inode);
1337 out:
1338 inode_unlock(inode);
1339 return err;
1340 }
1341
1342 /**
1343 * mctime_update_needed - check if mtime or ctime update is needed.
1344 * @inode: the inode to do the check for
1345 * @now: current time
1346 *
1347 * This helper function checks if the inode mtime/ctime should be updated or
1348 * not. If current values of the time-stamps are within the UBIFS inode time
1349 * granularity, they are not updated. This is an optimization.
1350 *
1351 * Returns: %1 if time update is needed, %0 if not
1352 */
mctime_update_needed(const struct inode * inode,const struct timespec64 * now)1353 static inline int mctime_update_needed(const struct inode *inode,
1354 const struct timespec64 *now)
1355 {
1356 struct timespec64 ctime = inode_get_ctime(inode);
1357 struct timespec64 mtime = inode_get_mtime(inode);
1358
1359 if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now))
1360 return 1;
1361 return 0;
1362 }
1363
1364 /**
1365 * ubifs_update_time - update time of inode.
1366 * @inode: inode to update
1367 * @flags: time updating control flag determines updating
1368 * which time fields of @inode
1369 *
1370 * This function updates time of the inode.
1371 *
1372 * Returns: %0 for success or a negative error code otherwise.
1373 */
ubifs_update_time(struct inode * inode,int flags)1374 int ubifs_update_time(struct inode *inode, int flags)
1375 {
1376 struct ubifs_inode *ui = ubifs_inode(inode);
1377 struct ubifs_info *c = inode->i_sb->s_fs_info;
1378 struct ubifs_budget_req req = { .dirtied_ino = 1,
1379 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1380 int err, release;
1381
1382 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
1383 generic_update_time(inode, flags);
1384 return 0;
1385 }
1386
1387 err = ubifs_budget_space(c, &req);
1388 if (err)
1389 return err;
1390
1391 mutex_lock(&ui->ui_mutex);
1392 inode_update_timestamps(inode, flags);
1393 release = ui->dirty;
1394 __mark_inode_dirty(inode, I_DIRTY_SYNC);
1395 mutex_unlock(&ui->ui_mutex);
1396 if (release)
1397 ubifs_release_budget(c, &req);
1398 return 0;
1399 }
1400
1401 /**
1402 * update_mctime - update mtime and ctime of an inode.
1403 * @inode: inode to update
1404 *
1405 * This function updates mtime and ctime of the inode if it is not equivalent to
1406 * current time.
1407 *
1408 * Returns: %0 in case of success and a negative error code in
1409 * case of failure.
1410 */
update_mctime(struct inode * inode)1411 static int update_mctime(struct inode *inode)
1412 {
1413 struct timespec64 now = current_time(inode);
1414 struct ubifs_inode *ui = ubifs_inode(inode);
1415 struct ubifs_info *c = inode->i_sb->s_fs_info;
1416
1417 if (mctime_update_needed(inode, &now)) {
1418 int err, release;
1419 struct ubifs_budget_req req = { .dirtied_ino = 1,
1420 .dirtied_ino_d = ALIGN(ui->data_len, 8) };
1421
1422 err = ubifs_budget_space(c, &req);
1423 if (err)
1424 return err;
1425
1426 mutex_lock(&ui->ui_mutex);
1427 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1428 release = ui->dirty;
1429 mark_inode_dirty_sync(inode);
1430 mutex_unlock(&ui->ui_mutex);
1431 if (release)
1432 ubifs_release_budget(c, &req);
1433 }
1434
1435 return 0;
1436 }
1437
ubifs_write_iter(struct kiocb * iocb,struct iov_iter * from)1438 static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
1439 {
1440 int err = update_mctime(file_inode(iocb->ki_filp));
1441 if (err)
1442 return err;
1443
1444 return generic_file_write_iter(iocb, from);
1445 }
1446
ubifs_dirty_folio(struct address_space * mapping,struct folio * folio)1447 static bool ubifs_dirty_folio(struct address_space *mapping,
1448 struct folio *folio)
1449 {
1450 bool ret;
1451 struct ubifs_info *c = mapping->host->i_sb->s_fs_info;
1452
1453 ret = filemap_dirty_folio(mapping, folio);
1454 /*
1455 * An attempt to dirty a page without budgeting for it - should not
1456 * happen.
1457 */
1458 ubifs_assert(c, ret == false);
1459 return ret;
1460 }
1461
ubifs_release_folio(struct folio * folio,gfp_t unused_gfp_flags)1462 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
1463 {
1464 struct inode *inode = folio->mapping->host;
1465 struct ubifs_info *c = inode->i_sb->s_fs_info;
1466
1467 if (folio_test_writeback(folio))
1468 return false;
1469
1470 /*
1471 * Page is private but not dirty, weird? There is one condition
1472 * making it happened. ubifs_writepage skipped the page because
1473 * page index beyonds isize (for example. truncated by other
1474 * process named A), then the page is invalidated by fadvise64
1475 * syscall before being truncated by process A.
1476 */
1477 ubifs_assert(c, folio_test_private(folio));
1478 if (folio_test_checked(folio))
1479 release_new_page_budget(c);
1480 else
1481 release_existing_page_budget(c);
1482
1483 atomic_long_dec(&c->dirty_pg_cnt);
1484 folio_detach_private(folio);
1485 folio_clear_checked(folio);
1486 return true;
1487 }
1488
1489 /*
1490 * mmap()d file has taken write protection fault and is being made writable.
1491 * UBIFS must ensure page is budgeted for.
1492 */
ubifs_vm_page_mkwrite(struct vm_fault * vmf)1493 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
1494 {
1495 struct folio *folio = page_folio(vmf->page);
1496 struct inode *inode = file_inode(vmf->vma->vm_file);
1497 struct ubifs_info *c = inode->i_sb->s_fs_info;
1498 struct timespec64 now = current_time(inode);
1499 struct ubifs_budget_req req = { .new_page = 1 };
1500 int err, update_time;
1501
1502 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, folio->index,
1503 i_size_read(inode));
1504 ubifs_assert(c, !c->ro_media && !c->ro_mount);
1505
1506 if (unlikely(c->ro_error))
1507 return VM_FAULT_SIGBUS; /* -EROFS */
1508
1509 /*
1510 * We have not locked @folio so far so we may budget for changing the
1511 * folio. Note, we cannot do this after we locked the folio, because
1512 * budgeting may cause write-back which would cause deadlock.
1513 *
1514 * At the moment we do not know whether the folio is dirty or not, so we
1515 * assume that it is not and budget for a new folio. We could look at
1516 * the @PG_private flag and figure this out, but we may race with write
1517 * back and the folio state may change by the time we lock it, so this
1518 * would need additional care. We do not bother with this at the
1519 * moment, although it might be good idea to do. Instead, we allocate
1520 * budget for a new folio and amend it later on if the folio was in fact
1521 * dirty.
1522 *
1523 * The budgeting-related logic of this function is similar to what we
1524 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1525 * for more comments.
1526 */
1527 update_time = mctime_update_needed(inode, &now);
1528 if (update_time)
1529 /*
1530 * We have to change inode time stamp which requires extra
1531 * budgeting.
1532 */
1533 req.dirtied_ino = 1;
1534
1535 err = ubifs_budget_space(c, &req);
1536 if (unlikely(err)) {
1537 if (err == -ENOSPC)
1538 ubifs_warn(c, "out of space for mmapped file (inode number %lu)",
1539 inode->i_ino);
1540 return VM_FAULT_SIGBUS;
1541 }
1542
1543 folio_lock(folio);
1544 if (unlikely(folio->mapping != inode->i_mapping ||
1545 folio_pos(folio) >= i_size_read(inode))) {
1546 /* Folio got truncated out from underneath us */
1547 goto sigbus;
1548 }
1549
1550 if (folio->private)
1551 release_new_page_budget(c);
1552 else {
1553 if (!folio_test_checked(folio))
1554 ubifs_convert_page_budget(c);
1555 folio_attach_private(folio, (void *)1);
1556 atomic_long_inc(&c->dirty_pg_cnt);
1557 filemap_dirty_folio(folio->mapping, folio);
1558 }
1559
1560 if (update_time) {
1561 int release;
1562 struct ubifs_inode *ui = ubifs_inode(inode);
1563
1564 mutex_lock(&ui->ui_mutex);
1565 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
1566 release = ui->dirty;
1567 mark_inode_dirty_sync(inode);
1568 mutex_unlock(&ui->ui_mutex);
1569 if (release)
1570 ubifs_release_dirty_inode_budget(c, ui);
1571 }
1572
1573 folio_wait_stable(folio);
1574 return VM_FAULT_LOCKED;
1575
1576 sigbus:
1577 folio_unlock(folio);
1578 ubifs_release_budget(c, &req);
1579 return VM_FAULT_SIGBUS;
1580 }
1581
1582 static const struct vm_operations_struct ubifs_file_vm_ops = {
1583 .fault = filemap_fault,
1584 .map_pages = filemap_map_pages,
1585 .page_mkwrite = ubifs_vm_page_mkwrite,
1586 };
1587
ubifs_file_mmap_prepare(struct vm_area_desc * desc)1588 static int ubifs_file_mmap_prepare(struct vm_area_desc *desc)
1589 {
1590 int err;
1591
1592 err = generic_file_mmap_prepare(desc);
1593 if (err)
1594 return err;
1595 desc->vm_ops = &ubifs_file_vm_ops;
1596
1597 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
1598 file_accessed(desc->file);
1599
1600 return 0;
1601 }
1602
ubifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1603 static const char *ubifs_get_link(struct dentry *dentry,
1604 struct inode *inode,
1605 struct delayed_call *done)
1606 {
1607 struct ubifs_inode *ui = ubifs_inode(inode);
1608
1609 if (!IS_ENCRYPTED(inode))
1610 return ui->data;
1611
1612 if (!dentry)
1613 return ERR_PTR(-ECHILD);
1614
1615 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done);
1616 }
1617
ubifs_symlink_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)1618 static int ubifs_symlink_getattr(struct mnt_idmap *idmap,
1619 const struct path *path, struct kstat *stat,
1620 u32 request_mask, unsigned int query_flags)
1621 {
1622 ubifs_getattr(idmap, path, stat, request_mask, query_flags);
1623
1624 if (IS_ENCRYPTED(d_inode(path->dentry)))
1625 return fscrypt_symlink_getattr(path, stat);
1626 return 0;
1627 }
1628
1629 const struct address_space_operations ubifs_file_address_operations = {
1630 .read_folio = ubifs_read_folio,
1631 .writepages = ubifs_writepages,
1632 .write_begin = ubifs_write_begin,
1633 .write_end = ubifs_write_end,
1634 .invalidate_folio = ubifs_invalidate_folio,
1635 .dirty_folio = ubifs_dirty_folio,
1636 .migrate_folio = filemap_migrate_folio,
1637 .release_folio = ubifs_release_folio,
1638 };
1639
1640 const struct inode_operations ubifs_file_inode_operations = {
1641 .setattr = ubifs_setattr,
1642 .getattr = ubifs_getattr,
1643 .listxattr = ubifs_listxattr,
1644 .update_time = ubifs_update_time,
1645 .fileattr_get = ubifs_fileattr_get,
1646 .fileattr_set = ubifs_fileattr_set,
1647 };
1648
1649 const struct inode_operations ubifs_symlink_inode_operations = {
1650 .get_link = ubifs_get_link,
1651 .setattr = ubifs_setattr,
1652 .getattr = ubifs_symlink_getattr,
1653 .listxattr = ubifs_listxattr,
1654 .update_time = ubifs_update_time,
1655 };
1656
1657 const struct file_operations ubifs_file_operations = {
1658 .llseek = generic_file_llseek,
1659 .read_iter = generic_file_read_iter,
1660 .write_iter = ubifs_write_iter,
1661 .mmap_prepare = ubifs_file_mmap_prepare,
1662 .fsync = ubifs_fsync,
1663 .unlocked_ioctl = ubifs_ioctl,
1664 .splice_read = filemap_splice_read,
1665 .splice_write = iter_file_splice_write,
1666 .open = fscrypt_file_open,
1667 #ifdef CONFIG_COMPAT
1668 .compat_ioctl = ubifs_compat_ioctl,
1669 #endif
1670 };
1671