1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/node.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
is_invalid_nid(struct f2fs_sb_info * sbi,nid_t nid)30 static inline bool is_invalid_nid(struct f2fs_sb_info *sbi, nid_t nid)
31 {
32 return nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid;
33 }
34
35 /*
36 * Check whether the given nid is within node id range.
37 */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)38 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
39 {
40 if (unlikely(is_invalid_nid(sbi, nid))) {
41 set_sbi_flag(sbi, SBI_NEED_FSCK);
42 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
43 __func__, nid);
44 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
45 return -EFSCORRUPTED;
46 }
47 return 0;
48 }
49
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)50 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
51 {
52 struct f2fs_nm_info *nm_i = NM_I(sbi);
53 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
54 struct sysinfo val;
55 unsigned long avail_ram;
56 unsigned long mem_size = 0;
57 bool res = false;
58
59 if (!nm_i)
60 return true;
61
62 si_meminfo(&val);
63
64 /* only uses low memory */
65 avail_ram = val.totalram - val.totalhigh;
66
67 /*
68 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
69 */
70 if (type == FREE_NIDS) {
71 mem_size = (nm_i->nid_cnt[FREE_NID] *
72 sizeof(struct free_nid)) >> PAGE_SHIFT;
73 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
74 } else if (type == NAT_ENTRIES) {
75 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
76 sizeof(struct nat_entry)) >> PAGE_SHIFT;
77 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
78 if (excess_cached_nats(sbi))
79 res = false;
80 } else if (type == DIRTY_DENTS) {
81 if (sbi->sb->s_bdi->wb.dirty_exceeded)
82 return false;
83 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
84 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
85 } else if (type == INO_ENTRIES) {
86 int i;
87
88 for (i = 0; i < MAX_INO_ENTRY; i++)
89 mem_size += sbi->im[i].ino_num *
90 sizeof(struct ino_entry);
91 mem_size >>= PAGE_SHIFT;
92 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
93 } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
94 enum extent_type etype = type == READ_EXTENT_CACHE ?
95 EX_READ : EX_BLOCK_AGE;
96 struct extent_tree_info *eti = &sbi->extent_tree[etype];
97
98 mem_size = (atomic_read(&eti->total_ext_tree) *
99 sizeof(struct extent_tree) +
100 atomic_read(&eti->total_ext_node) *
101 sizeof(struct extent_node)) >> PAGE_SHIFT;
102 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
103 } else if (type == DISCARD_CACHE) {
104 mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
105 sizeof(struct discard_cmd)) >> PAGE_SHIFT;
106 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
107 } else if (type == COMPRESS_PAGE) {
108 #ifdef CONFIG_F2FS_FS_COMPRESSION
109 unsigned long free_ram = val.freeram;
110
111 /*
112 * free memory is lower than watermark or cached page count
113 * exceed threshold, deny caching compress page.
114 */
115 res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
116 (COMPRESS_MAPPING(sbi)->nrpages <
117 free_ram * sbi->compress_percent / 100);
118 #else
119 res = false;
120 #endif
121 } else {
122 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
123 return true;
124 }
125 return res;
126 }
127
clear_node_folio_dirty(struct folio * folio)128 static void clear_node_folio_dirty(struct folio *folio)
129 {
130 if (folio_test_dirty(folio)) {
131 f2fs_clear_page_cache_dirty_tag(folio);
132 folio_clear_dirty_for_io(folio);
133 dec_page_count(F2FS_F_SB(folio), F2FS_DIRTY_NODES);
134 }
135 folio_clear_uptodate(folio);
136 }
137
get_current_nat_folio(struct f2fs_sb_info * sbi,nid_t nid)138 static struct folio *get_current_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
139 {
140 return f2fs_get_meta_folio_retry(sbi, current_nat_addr(sbi, nid));
141 }
142
get_next_nat_folio(struct f2fs_sb_info * sbi,nid_t nid)143 static struct folio *get_next_nat_folio(struct f2fs_sb_info *sbi, nid_t nid)
144 {
145 struct folio *src_folio;
146 struct folio *dst_folio;
147 pgoff_t dst_off;
148 void *src_addr;
149 void *dst_addr;
150 struct f2fs_nm_info *nm_i = NM_I(sbi);
151
152 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
153
154 /* get current nat block page with lock */
155 src_folio = get_current_nat_folio(sbi, nid);
156 if (IS_ERR(src_folio))
157 return src_folio;
158 dst_folio = f2fs_grab_meta_folio(sbi, dst_off);
159 f2fs_bug_on(sbi, folio_test_dirty(src_folio));
160
161 src_addr = folio_address(src_folio);
162 dst_addr = folio_address(dst_folio);
163 memcpy(dst_addr, src_addr, PAGE_SIZE);
164 folio_mark_dirty(dst_folio);
165 f2fs_folio_put(src_folio, true);
166
167 set_to_next_nat(nm_i, nid);
168
169 return dst_folio;
170 }
171
__alloc_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,bool no_fail)172 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
173 nid_t nid, bool no_fail)
174 {
175 struct nat_entry *new;
176
177 new = f2fs_kmem_cache_alloc(nat_entry_slab,
178 GFP_F2FS_ZERO, no_fail, sbi);
179 if (new) {
180 nat_set_nid(new, nid);
181 nat_reset_flag(new);
182 }
183 return new;
184 }
185
__free_nat_entry(struct nat_entry * e)186 static void __free_nat_entry(struct nat_entry *e)
187 {
188 kmem_cache_free(nat_entry_slab, e);
189 }
190
191 /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail,bool init_dirty)192 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
193 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail, bool init_dirty)
194 {
195 if (no_fail)
196 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
197 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
198 return NULL;
199
200 if (raw_ne)
201 node_info_from_raw_nat(&ne->ni, raw_ne);
202
203 if (init_dirty) {
204 INIT_LIST_HEAD(&ne->list);
205 nm_i->nat_cnt[TOTAL_NAT]++;
206 return ne;
207 }
208
209 spin_lock(&nm_i->nat_list_lock);
210 list_add_tail(&ne->list, &nm_i->nat_entries);
211 spin_unlock(&nm_i->nat_list_lock);
212
213 nm_i->nat_cnt[TOTAL_NAT]++;
214 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
215 return ne;
216 }
217
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n,bool for_dirty)218 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n, bool for_dirty)
219 {
220 struct nat_entry *ne;
221
222 ne = radix_tree_lookup(&nm_i->nat_root, n);
223
224 /*
225 * for recent accessed nat entry which will not be dirtied soon
226 * later, move it to tail of lru list.
227 */
228 if (ne && !get_nat_flag(ne, IS_DIRTY) && !for_dirty) {
229 spin_lock(&nm_i->nat_list_lock);
230 if (!list_empty(&ne->list))
231 list_move_tail(&ne->list, &nm_i->nat_entries);
232 spin_unlock(&nm_i->nat_list_lock);
233 }
234
235 return ne;
236 }
237
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)238 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
239 nid_t start, unsigned int nr, struct nat_entry **ep)
240 {
241 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
242 }
243
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)244 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
245 {
246 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
247 nm_i->nat_cnt[TOTAL_NAT]--;
248 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
249 __free_nat_entry(e);
250 }
251
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)252 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
253 struct nat_entry *ne)
254 {
255 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
256 struct nat_entry_set *head;
257
258 head = radix_tree_lookup(&nm_i->nat_set_root, set);
259 if (!head) {
260 head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
261 GFP_NOFS, true, NULL);
262
263 INIT_LIST_HEAD(&head->entry_list);
264 INIT_LIST_HEAD(&head->set_list);
265 head->set = set;
266 head->entry_cnt = 0;
267 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
268 }
269 return head;
270 }
271
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne,bool init_dirty)272 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
273 struct nat_entry *ne, bool init_dirty)
274 {
275 struct nat_entry_set *head;
276 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
277
278 if (!new_ne)
279 head = __grab_nat_entry_set(nm_i, ne);
280
281 /*
282 * update entry_cnt in below condition:
283 * 1. update NEW_ADDR to valid block address;
284 * 2. update old block address to new one;
285 */
286 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
287 !get_nat_flag(ne, IS_DIRTY)))
288 head->entry_cnt++;
289
290 set_nat_flag(ne, IS_PREALLOC, new_ne);
291
292 if (get_nat_flag(ne, IS_DIRTY))
293 goto refresh_list;
294
295 nm_i->nat_cnt[DIRTY_NAT]++;
296 if (!init_dirty)
297 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
298 set_nat_flag(ne, IS_DIRTY, true);
299 refresh_list:
300 spin_lock(&nm_i->nat_list_lock);
301 if (new_ne)
302 list_del_init(&ne->list);
303 else
304 list_move_tail(&ne->list, &head->entry_list);
305 spin_unlock(&nm_i->nat_list_lock);
306 }
307
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)308 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
309 struct nat_entry_set *set, struct nat_entry *ne)
310 {
311 spin_lock(&nm_i->nat_list_lock);
312 list_move_tail(&ne->list, &nm_i->nat_entries);
313 spin_unlock(&nm_i->nat_list_lock);
314
315 set_nat_flag(ne, IS_DIRTY, false);
316 set->entry_cnt--;
317 nm_i->nat_cnt[DIRTY_NAT]--;
318 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
319 }
320
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)321 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
322 nid_t start, unsigned int nr, struct nat_entry_set **ep)
323 {
324 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
325 start, nr);
326 }
327
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct folio * folio)328 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct folio *folio)
329 {
330 return is_node_folio(folio) && IS_DNODE(folio) && is_cold_node(folio);
331 }
332
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)333 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
334 {
335 spin_lock_init(&sbi->fsync_node_lock);
336 INIT_LIST_HEAD(&sbi->fsync_node_list);
337 sbi->fsync_seg_id = 0;
338 sbi->fsync_node_num = 0;
339 }
340
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct folio * folio)341 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
342 struct folio *folio)
343 {
344 struct fsync_node_entry *fn;
345 unsigned long flags;
346 unsigned int seq_id;
347
348 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
349 GFP_NOFS, true, NULL);
350
351 folio_get(folio);
352 fn->folio = folio;
353 INIT_LIST_HEAD(&fn->list);
354
355 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
356 list_add_tail(&fn->list, &sbi->fsync_node_list);
357 fn->seq_id = sbi->fsync_seg_id++;
358 seq_id = fn->seq_id;
359 sbi->fsync_node_num++;
360 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
361
362 return seq_id;
363 }
364
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct folio * folio)365 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct folio *folio)
366 {
367 struct fsync_node_entry *fn;
368 unsigned long flags;
369
370 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
371 list_for_each_entry(fn, &sbi->fsync_node_list, list) {
372 if (fn->folio == folio) {
373 list_del(&fn->list);
374 sbi->fsync_node_num--;
375 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
376 kmem_cache_free(fsync_node_entry_slab, fn);
377 folio_put(folio);
378 return;
379 }
380 }
381 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
382 f2fs_bug_on(sbi, 1);
383 }
384
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)385 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
386 {
387 unsigned long flags;
388
389 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
390 sbi->fsync_seg_id = 0;
391 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
392 }
393
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)394 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
395 {
396 struct f2fs_nm_info *nm_i = NM_I(sbi);
397 struct nat_entry *e;
398 bool need = false;
399
400 f2fs_down_read(&nm_i->nat_tree_lock);
401 e = __lookup_nat_cache(nm_i, nid, false);
402 if (e) {
403 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
404 !get_nat_flag(e, HAS_FSYNCED_INODE))
405 need = true;
406 }
407 f2fs_up_read(&nm_i->nat_tree_lock);
408 return need;
409 }
410
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)411 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
412 {
413 struct f2fs_nm_info *nm_i = NM_I(sbi);
414 struct nat_entry *e;
415 bool is_cp = true;
416
417 f2fs_down_read(&nm_i->nat_tree_lock);
418 e = __lookup_nat_cache(nm_i, nid, false);
419 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
420 is_cp = false;
421 f2fs_up_read(&nm_i->nat_tree_lock);
422 return is_cp;
423 }
424
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)425 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
426 {
427 struct f2fs_nm_info *nm_i = NM_I(sbi);
428 struct nat_entry *e;
429 bool need_update = true;
430
431 f2fs_down_read(&nm_i->nat_tree_lock);
432 e = __lookup_nat_cache(nm_i, ino, false);
433 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
434 (get_nat_flag(e, IS_CHECKPOINTED) ||
435 get_nat_flag(e, HAS_FSYNCED_INODE)))
436 need_update = false;
437 f2fs_up_read(&nm_i->nat_tree_lock);
438 return need_update;
439 }
440
441 /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)442 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
443 struct f2fs_nat_entry *ne)
444 {
445 struct f2fs_nm_info *nm_i = NM_I(sbi);
446 struct nat_entry *new, *e;
447
448 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
449 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
450 return;
451
452 new = __alloc_nat_entry(sbi, nid, false);
453 if (!new)
454 return;
455
456 f2fs_down_write(&nm_i->nat_tree_lock);
457 e = __lookup_nat_cache(nm_i, nid, false);
458 if (!e)
459 e = __init_nat_entry(nm_i, new, ne, false, false);
460 else
461 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
462 nat_get_blkaddr(e) !=
463 le32_to_cpu(ne->block_addr) ||
464 nat_get_version(e) != ne->version);
465 f2fs_up_write(&nm_i->nat_tree_lock);
466 if (e != new)
467 __free_nat_entry(new);
468 }
469
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)470 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
471 block_t new_blkaddr, bool fsync_done)
472 {
473 struct f2fs_nm_info *nm_i = NM_I(sbi);
474 struct nat_entry *e;
475 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
476 bool init_dirty = false;
477
478 f2fs_down_write(&nm_i->nat_tree_lock);
479 e = __lookup_nat_cache(nm_i, ni->nid, true);
480 if (!e) {
481 init_dirty = true;
482 e = __init_nat_entry(nm_i, new, NULL, true, true);
483 copy_node_info(&e->ni, ni);
484 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
485 } else if (new_blkaddr == NEW_ADDR) {
486 /*
487 * when nid is reallocated,
488 * previous nat entry can be remained in nat cache.
489 * So, reinitialize it with new information.
490 */
491 copy_node_info(&e->ni, ni);
492 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
493 }
494 /* let's free early to reduce memory consumption */
495 if (e != new)
496 __free_nat_entry(new);
497
498 /* sanity check */
499 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
500 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
501 new_blkaddr == NULL_ADDR);
502 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
503 new_blkaddr == NEW_ADDR);
504 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
505 new_blkaddr == NEW_ADDR);
506
507 /* increment version no as node is removed */
508 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
509 unsigned char version = nat_get_version(e);
510
511 nat_set_version(e, inc_node_version(version));
512 }
513
514 /* change address */
515 nat_set_blkaddr(e, new_blkaddr);
516 if (!__is_valid_data_blkaddr(new_blkaddr))
517 set_nat_flag(e, IS_CHECKPOINTED, false);
518 __set_nat_cache_dirty(nm_i, e, init_dirty);
519
520 /* update fsync_mark if its inode nat entry is still alive */
521 if (ni->nid != ni->ino)
522 e = __lookup_nat_cache(nm_i, ni->ino, false);
523 if (e) {
524 if (fsync_done && ni->nid == ni->ino)
525 set_nat_flag(e, HAS_FSYNCED_INODE, true);
526 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
527 }
528 f2fs_up_write(&nm_i->nat_tree_lock);
529 }
530
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)531 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
532 {
533 struct f2fs_nm_info *nm_i = NM_I(sbi);
534 int nr = nr_shrink;
535
536 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
537 return 0;
538
539 spin_lock(&nm_i->nat_list_lock);
540 while (nr_shrink) {
541 struct nat_entry *ne;
542
543 if (list_empty(&nm_i->nat_entries))
544 break;
545
546 ne = list_first_entry(&nm_i->nat_entries,
547 struct nat_entry, list);
548 list_del(&ne->list);
549 spin_unlock(&nm_i->nat_list_lock);
550
551 __del_from_nat_cache(nm_i, ne);
552 nr_shrink--;
553
554 spin_lock(&nm_i->nat_list_lock);
555 }
556 spin_unlock(&nm_i->nat_list_lock);
557
558 f2fs_up_write(&nm_i->nat_tree_lock);
559 return nr - nr_shrink;
560 }
561
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)562 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
563 struct node_info *ni, bool checkpoint_context)
564 {
565 struct f2fs_nm_info *nm_i = NM_I(sbi);
566 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
567 struct f2fs_journal *journal = curseg->journal;
568 nid_t start_nid = START_NID(nid);
569 struct f2fs_nat_block *nat_blk;
570 struct folio *folio = NULL;
571 struct f2fs_nat_entry ne;
572 struct nat_entry *e;
573 pgoff_t index;
574 int i;
575 bool need_cache = true;
576
577 ni->flag = 0;
578 ni->nid = nid;
579 retry:
580 /* Check nat cache */
581 f2fs_down_read(&nm_i->nat_tree_lock);
582 e = __lookup_nat_cache(nm_i, nid, false);
583 if (e) {
584 ni->ino = nat_get_ino(e);
585 ni->blk_addr = nat_get_blkaddr(e);
586 ni->version = nat_get_version(e);
587 f2fs_up_read(&nm_i->nat_tree_lock);
588 if (IS_ENABLED(CONFIG_F2FS_CHECK_FS)) {
589 need_cache = false;
590 goto sanity_check;
591 }
592 return 0;
593 }
594
595 /*
596 * Check current segment summary by trying to grab journal_rwsem first.
597 * This sem is on the critical path on the checkpoint requiring the above
598 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
599 * while not bothering checkpoint.
600 */
601 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
602 down_read(&curseg->journal_rwsem);
603 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
604 !down_read_trylock(&curseg->journal_rwsem)) {
605 f2fs_up_read(&nm_i->nat_tree_lock);
606 goto retry;
607 }
608
609 i = f2fs_lookup_journal_in_cursum(sbi, journal, NAT_JOURNAL, nid, 0);
610 if (i >= 0) {
611 ne = nat_in_journal(journal, i);
612 node_info_from_raw_nat(ni, &ne);
613 }
614 up_read(&curseg->journal_rwsem);
615 if (i >= 0) {
616 f2fs_up_read(&nm_i->nat_tree_lock);
617 goto sanity_check;
618 }
619
620 /* Fill node_info from nat page */
621 index = current_nat_addr(sbi, nid);
622 f2fs_up_read(&nm_i->nat_tree_lock);
623
624 folio = f2fs_get_meta_folio(sbi, index);
625 if (IS_ERR(folio))
626 return PTR_ERR(folio);
627
628 nat_blk = folio_address(folio);
629 ne = nat_blk->entries[nid - start_nid];
630 node_info_from_raw_nat(ni, &ne);
631 f2fs_folio_put(folio, true);
632 sanity_check:
633 if (__is_valid_data_blkaddr(ni->blk_addr) &&
634 !f2fs_is_valid_blkaddr(sbi, ni->blk_addr,
635 DATA_GENERIC_ENHANCE)) {
636 set_sbi_flag(sbi, SBI_NEED_FSCK);
637 f2fs_err_ratelimited(sbi,
638 "f2fs_get_node_info of %pS: inconsistent nat entry, "
639 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
640 __builtin_return_address(0),
641 ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag);
642 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
643 return -EFSCORRUPTED;
644 }
645
646 if (unlikely(f2fs_quota_file(sbi, ni->nid) &&
647 !__is_valid_data_blkaddr(ni->blk_addr))) {
648 set_sbi_flag(sbi, SBI_NEED_FSCK);
649 f2fs_err_ratelimited(sbi,
650 "f2fs_get_node_info of %pS: inconsistent nat entry from qf_ino, "
651 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
652 __builtin_return_address(0),
653 ni->ino, ni->nid, ni->blk_addr, ni->version, ni->flag);
654 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
655 }
656
657 /* cache nat entry */
658 if (need_cache)
659 cache_nat_entry(sbi, nid, &ne);
660 return 0;
661 }
662
663 /*
664 * readahead MAX_RA_NODE number of node pages.
665 */
f2fs_ra_node_pages(struct folio * parent,int start,int n)666 static void f2fs_ra_node_pages(struct folio *parent, int start, int n)
667 {
668 struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
669 struct blk_plug plug;
670 int i, end;
671 nid_t nid;
672
673 blk_start_plug(&plug);
674
675 /* Then, try readahead for siblings of the desired node */
676 end = start + n;
677 end = min(end, (int)NIDS_PER_BLOCK);
678 for (i = start; i < end; i++) {
679 nid = get_nid(parent, i, false);
680 f2fs_ra_node_page(sbi, nid);
681 }
682
683 blk_finish_plug(&plug);
684 }
685
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)686 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
687 {
688 const long direct_index = ADDRS_PER_INODE(dn->inode);
689 const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
690 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
691 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
692 int cur_level = dn->cur_level;
693 int max_level = dn->max_level;
694 pgoff_t base = 0;
695
696 if (!dn->max_level)
697 return pgofs + 1;
698
699 while (max_level-- > cur_level)
700 skipped_unit *= NIDS_PER_BLOCK;
701
702 switch (dn->max_level) {
703 case 3:
704 base += 2 * indirect_blks;
705 fallthrough;
706 case 2:
707 base += 2 * direct_blks;
708 fallthrough;
709 case 1:
710 base += direct_index;
711 break;
712 default:
713 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
714 }
715
716 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
717 }
718
719 /*
720 * The maximum depth is four.
721 * Offset[0] will have raw inode offset.
722 */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])723 static int get_node_path(struct inode *inode, long block,
724 int offset[4], unsigned int noffset[4])
725 {
726 const long direct_index = ADDRS_PER_INODE(inode);
727 const long direct_blks = ADDRS_PER_BLOCK(inode);
728 const long dptrs_per_blk = NIDS_PER_BLOCK;
729 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
730 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
731 int n = 0;
732 int level = 0;
733
734 noffset[0] = 0;
735
736 if (block < direct_index) {
737 offset[n] = block;
738 goto got;
739 }
740 block -= direct_index;
741 if (block < direct_blks) {
742 offset[n++] = NODE_DIR1_BLOCK;
743 noffset[n] = 1;
744 offset[n] = block;
745 level = 1;
746 goto got;
747 }
748 block -= direct_blks;
749 if (block < direct_blks) {
750 offset[n++] = NODE_DIR2_BLOCK;
751 noffset[n] = 2;
752 offset[n] = block;
753 level = 1;
754 goto got;
755 }
756 block -= direct_blks;
757 if (block < indirect_blks) {
758 offset[n++] = NODE_IND1_BLOCK;
759 noffset[n] = 3;
760 offset[n++] = block / direct_blks;
761 noffset[n] = 4 + offset[n - 1];
762 offset[n] = block % direct_blks;
763 level = 2;
764 goto got;
765 }
766 block -= indirect_blks;
767 if (block < indirect_blks) {
768 offset[n++] = NODE_IND2_BLOCK;
769 noffset[n] = 4 + dptrs_per_blk;
770 offset[n++] = block / direct_blks;
771 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
772 offset[n] = block % direct_blks;
773 level = 2;
774 goto got;
775 }
776 block -= indirect_blks;
777 if (block < dindirect_blks) {
778 offset[n++] = NODE_DIND_BLOCK;
779 noffset[n] = 5 + (dptrs_per_blk * 2);
780 offset[n++] = block / indirect_blks;
781 noffset[n] = 6 + (dptrs_per_blk * 2) +
782 offset[n - 1] * (dptrs_per_blk + 1);
783 offset[n++] = (block / direct_blks) % dptrs_per_blk;
784 noffset[n] = 7 + (dptrs_per_blk * 2) +
785 offset[n - 2] * (dptrs_per_blk + 1) +
786 offset[n - 1];
787 offset[n] = block % direct_blks;
788 level = 3;
789 goto got;
790 } else {
791 return -E2BIG;
792 }
793 got:
794 return level;
795 }
796
797 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start);
798
799 /*
800 * Caller should call f2fs_put_dnode(dn).
801 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
802 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
803 */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)804 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
805 {
806 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
807 struct folio *nfolio[4];
808 struct folio *parent = NULL;
809 int offset[4];
810 unsigned int noffset[4];
811 nid_t nids[4];
812 int level, i = 0;
813 int err = 0;
814
815 level = get_node_path(dn->inode, index, offset, noffset);
816 if (level < 0)
817 return level;
818
819 nids[0] = dn->inode->i_ino;
820
821 if (!dn->inode_folio) {
822 nfolio[0] = f2fs_get_inode_folio(sbi, nids[0]);
823 if (IS_ERR(nfolio[0]))
824 return PTR_ERR(nfolio[0]);
825 } else {
826 nfolio[0] = dn->inode_folio;
827 }
828
829 /* if inline_data is set, should not report any block indices */
830 if (f2fs_has_inline_data(dn->inode) && index) {
831 err = -ENOENT;
832 f2fs_folio_put(nfolio[0], true);
833 goto release_out;
834 }
835
836 parent = nfolio[0];
837 if (level != 0)
838 nids[1] = get_nid(parent, offset[0], true);
839 dn->inode_folio = nfolio[0];
840 dn->inode_folio_locked = true;
841
842 /* get indirect or direct nodes */
843 for (i = 1; i <= level; i++) {
844 bool done = false;
845
846 if (nids[i] && nids[i] == dn->inode->i_ino) {
847 err = -EFSCORRUPTED;
848 f2fs_err_ratelimited(sbi,
849 "inode mapping table is corrupted, run fsck to fix it, "
850 "ino:%lu, nid:%u, level:%d, offset:%d",
851 dn->inode->i_ino, nids[i], level, offset[level]);
852 set_sbi_flag(sbi, SBI_NEED_FSCK);
853 goto release_pages;
854 }
855
856 if (!nids[i] && mode == ALLOC_NODE) {
857 /* alloc new node */
858 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
859 err = -ENOSPC;
860 goto release_pages;
861 }
862
863 dn->nid = nids[i];
864 nfolio[i] = f2fs_new_node_folio(dn, noffset[i]);
865 if (IS_ERR(nfolio[i])) {
866 f2fs_alloc_nid_failed(sbi, nids[i]);
867 err = PTR_ERR(nfolio[i]);
868 goto release_pages;
869 }
870
871 set_nid(parent, offset[i - 1], nids[i], i == 1);
872 f2fs_alloc_nid_done(sbi, nids[i]);
873 done = true;
874 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
875 nfolio[i] = f2fs_get_node_folio_ra(parent, offset[i - 1]);
876 if (IS_ERR(nfolio[i])) {
877 err = PTR_ERR(nfolio[i]);
878 goto release_pages;
879 }
880 done = true;
881 }
882 if (i == 1) {
883 dn->inode_folio_locked = false;
884 folio_unlock(parent);
885 } else {
886 f2fs_folio_put(parent, true);
887 }
888
889 if (!done) {
890 nfolio[i] = f2fs_get_node_folio(sbi, nids[i],
891 NODE_TYPE_NON_INODE);
892 if (IS_ERR(nfolio[i])) {
893 err = PTR_ERR(nfolio[i]);
894 f2fs_folio_put(nfolio[0], false);
895 goto release_out;
896 }
897 }
898 if (i < level) {
899 parent = nfolio[i];
900 nids[i + 1] = get_nid(parent, offset[i], false);
901 }
902 }
903 dn->nid = nids[level];
904 dn->ofs_in_node = offset[level];
905 dn->node_folio = nfolio[level];
906 dn->data_blkaddr = f2fs_data_blkaddr(dn);
907
908 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
909 f2fs_sb_has_readonly(sbi)) {
910 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
911 unsigned int ofs_in_node = dn->ofs_in_node;
912 pgoff_t fofs = index;
913 unsigned int c_len;
914 block_t blkaddr;
915
916 /* should align fofs and ofs_in_node to cluster_size */
917 if (fofs % cluster_size) {
918 fofs = round_down(fofs, cluster_size);
919 ofs_in_node = round_down(ofs_in_node, cluster_size);
920 }
921
922 c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
923 if (!c_len)
924 goto out;
925
926 blkaddr = data_blkaddr(dn->inode, dn->node_folio, ofs_in_node);
927 if (blkaddr == COMPRESS_ADDR)
928 blkaddr = data_blkaddr(dn->inode, dn->node_folio,
929 ofs_in_node + 1);
930
931 f2fs_update_read_extent_tree_range_compressed(dn->inode,
932 fofs, blkaddr, cluster_size, c_len);
933 }
934 out:
935 return 0;
936
937 release_pages:
938 f2fs_folio_put(parent, true);
939 if (i > 1)
940 f2fs_folio_put(nfolio[0], false);
941 release_out:
942 dn->inode_folio = NULL;
943 dn->node_folio = NULL;
944 if (err == -ENOENT) {
945 dn->cur_level = i;
946 dn->max_level = level;
947 dn->ofs_in_node = offset[level];
948 }
949 return err;
950 }
951
truncate_node(struct dnode_of_data * dn)952 static int truncate_node(struct dnode_of_data *dn)
953 {
954 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
955 struct node_info ni;
956 int err;
957 pgoff_t index;
958
959 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
960 if (err)
961 return err;
962
963 if (ni.blk_addr != NEW_ADDR &&
964 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
965 f2fs_err_ratelimited(sbi,
966 "nat entry is corrupted, run fsck to fix it, ino:%u, "
967 "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
968 set_sbi_flag(sbi, SBI_NEED_FSCK);
969 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
970 return -EFSCORRUPTED;
971 }
972
973 /* Deallocate node address */
974 f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
975 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
976 set_node_addr(sbi, &ni, NULL_ADDR, false);
977
978 if (dn->nid == dn->inode->i_ino) {
979 f2fs_remove_orphan_inode(sbi, dn->nid);
980 dec_valid_inode_count(sbi);
981 f2fs_inode_synced(dn->inode);
982 }
983
984 clear_node_folio_dirty(dn->node_folio);
985 set_sbi_flag(sbi, SBI_IS_DIRTY);
986
987 index = dn->node_folio->index;
988 f2fs_folio_put(dn->node_folio, true);
989
990 invalidate_mapping_pages(NODE_MAPPING(sbi),
991 index, index);
992
993 dn->node_folio = NULL;
994 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
995
996 return 0;
997 }
998
truncate_dnode(struct dnode_of_data * dn)999 static int truncate_dnode(struct dnode_of_data *dn)
1000 {
1001 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1002 struct folio *folio;
1003 int err;
1004
1005 if (dn->nid == 0)
1006 return 1;
1007
1008 /* get direct node */
1009 folio = f2fs_get_node_folio(sbi, dn->nid, NODE_TYPE_NON_INODE);
1010 if (PTR_ERR(folio) == -ENOENT)
1011 return 1;
1012 else if (IS_ERR(folio))
1013 return PTR_ERR(folio);
1014
1015 if (IS_INODE(folio) || ino_of_node(folio) != dn->inode->i_ino) {
1016 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
1017 dn->inode->i_ino, dn->nid, ino_of_node(folio));
1018 set_sbi_flag(sbi, SBI_NEED_FSCK);
1019 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
1020 f2fs_folio_put(folio, true);
1021 return -EFSCORRUPTED;
1022 }
1023
1024 /* Make dnode_of_data for parameter */
1025 dn->node_folio = folio;
1026 dn->ofs_in_node = 0;
1027 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
1028 err = truncate_node(dn);
1029 if (err) {
1030 f2fs_folio_put(folio, true);
1031 return err;
1032 }
1033
1034 return 1;
1035 }
1036
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)1037 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
1038 int ofs, int depth)
1039 {
1040 struct dnode_of_data rdn = *dn;
1041 struct folio *folio;
1042 struct f2fs_node *rn;
1043 nid_t child_nid;
1044 unsigned int child_nofs;
1045 int freed = 0;
1046 int i, ret;
1047
1048 if (dn->nid == 0)
1049 return NIDS_PER_BLOCK + 1;
1050
1051 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
1052
1053 folio = f2fs_get_node_folio(F2FS_I_SB(dn->inode), dn->nid,
1054 NODE_TYPE_NON_INODE);
1055 if (IS_ERR(folio)) {
1056 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(folio));
1057 return PTR_ERR(folio);
1058 }
1059
1060 f2fs_ra_node_pages(folio, ofs, NIDS_PER_BLOCK);
1061
1062 rn = F2FS_NODE(folio);
1063 if (depth < 3) {
1064 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
1065 child_nid = le32_to_cpu(rn->in.nid[i]);
1066 if (child_nid == 0)
1067 continue;
1068 rdn.nid = child_nid;
1069 ret = truncate_dnode(&rdn);
1070 if (ret < 0)
1071 goto out_err;
1072 if (set_nid(folio, i, 0, false))
1073 dn->node_changed = true;
1074 }
1075 } else {
1076 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1077 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1078 child_nid = le32_to_cpu(rn->in.nid[i]);
1079 if (child_nid == 0) {
1080 child_nofs += NIDS_PER_BLOCK + 1;
1081 continue;
1082 }
1083 rdn.nid = child_nid;
1084 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1085 if (ret == (NIDS_PER_BLOCK + 1)) {
1086 if (set_nid(folio, i, 0, false))
1087 dn->node_changed = true;
1088 child_nofs += ret;
1089 } else if (ret < 0 && ret != -ENOENT) {
1090 goto out_err;
1091 }
1092 }
1093 freed = child_nofs;
1094 }
1095
1096 if (!ofs) {
1097 /* remove current indirect node */
1098 dn->node_folio = folio;
1099 ret = truncate_node(dn);
1100 if (ret)
1101 goto out_err;
1102 freed++;
1103 } else {
1104 f2fs_folio_put(folio, true);
1105 }
1106 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1107 return freed;
1108
1109 out_err:
1110 f2fs_folio_put(folio, true);
1111 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1112 return ret;
1113 }
1114
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1115 static int truncate_partial_nodes(struct dnode_of_data *dn,
1116 struct f2fs_inode *ri, int *offset, int depth)
1117 {
1118 struct folio *folios[2];
1119 nid_t nid[3];
1120 nid_t child_nid;
1121 int err = 0;
1122 int i;
1123 int idx = depth - 2;
1124
1125 nid[0] = get_nid(dn->inode_folio, offset[0], true);
1126 if (!nid[0])
1127 return 0;
1128
1129 /* get indirect nodes in the path */
1130 for (i = 0; i < idx + 1; i++) {
1131 /* reference count'll be increased */
1132 folios[i] = f2fs_get_node_folio(F2FS_I_SB(dn->inode), nid[i],
1133 NODE_TYPE_NON_INODE);
1134 if (IS_ERR(folios[i])) {
1135 err = PTR_ERR(folios[i]);
1136 idx = i - 1;
1137 goto fail;
1138 }
1139 nid[i + 1] = get_nid(folios[i], offset[i + 1], false);
1140 }
1141
1142 f2fs_ra_node_pages(folios[idx], offset[idx + 1], NIDS_PER_BLOCK);
1143
1144 /* free direct nodes linked to a partial indirect node */
1145 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1146 child_nid = get_nid(folios[idx], i, false);
1147 if (!child_nid)
1148 continue;
1149 dn->nid = child_nid;
1150 err = truncate_dnode(dn);
1151 if (err < 0)
1152 goto fail;
1153 if (set_nid(folios[idx], i, 0, false))
1154 dn->node_changed = true;
1155 }
1156
1157 if (offset[idx + 1] == 0) {
1158 dn->node_folio = folios[idx];
1159 dn->nid = nid[idx];
1160 err = truncate_node(dn);
1161 if (err)
1162 goto fail;
1163 } else {
1164 f2fs_folio_put(folios[idx], true);
1165 }
1166 offset[idx]++;
1167 offset[idx + 1] = 0;
1168 idx--;
1169 fail:
1170 for (i = idx; i >= 0; i--)
1171 f2fs_folio_put(folios[i], true);
1172
1173 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1174
1175 return err;
1176 }
1177
1178 /*
1179 * All the block addresses of data and nodes should be nullified.
1180 */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)1181 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1182 {
1183 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1184 int err = 0, cont = 1;
1185 int level, offset[4], noffset[4];
1186 unsigned int nofs = 0;
1187 struct f2fs_inode *ri;
1188 struct dnode_of_data dn;
1189 struct folio *folio;
1190
1191 trace_f2fs_truncate_inode_blocks_enter(inode, from);
1192
1193 level = get_node_path(inode, from, offset, noffset);
1194 if (level <= 0) {
1195 if (!level) {
1196 level = -EFSCORRUPTED;
1197 f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
1198 __func__, inode->i_ino,
1199 from, ADDRS_PER_INODE(inode));
1200 set_sbi_flag(sbi, SBI_NEED_FSCK);
1201 }
1202 trace_f2fs_truncate_inode_blocks_exit(inode, level);
1203 return level;
1204 }
1205
1206 folio = f2fs_get_inode_folio(sbi, inode->i_ino);
1207 if (IS_ERR(folio)) {
1208 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
1209 return PTR_ERR(folio);
1210 }
1211
1212 set_new_dnode(&dn, inode, folio, NULL, 0);
1213 folio_unlock(folio);
1214
1215 ri = F2FS_INODE(folio);
1216 switch (level) {
1217 case 0:
1218 case 1:
1219 nofs = noffset[1];
1220 break;
1221 case 2:
1222 nofs = noffset[1];
1223 if (!offset[level - 1])
1224 goto skip_partial;
1225 err = truncate_partial_nodes(&dn, ri, offset, level);
1226 if (err < 0 && err != -ENOENT)
1227 goto fail;
1228 nofs += 1 + NIDS_PER_BLOCK;
1229 break;
1230 case 3:
1231 nofs = 5 + 2 * NIDS_PER_BLOCK;
1232 if (!offset[level - 1])
1233 goto skip_partial;
1234 err = truncate_partial_nodes(&dn, ri, offset, level);
1235 if (err < 0 && err != -ENOENT)
1236 goto fail;
1237 break;
1238 default:
1239 BUG();
1240 }
1241
1242 skip_partial:
1243 while (cont) {
1244 dn.nid = get_nid(folio, offset[0], true);
1245 switch (offset[0]) {
1246 case NODE_DIR1_BLOCK:
1247 case NODE_DIR2_BLOCK:
1248 err = truncate_dnode(&dn);
1249 break;
1250
1251 case NODE_IND1_BLOCK:
1252 case NODE_IND2_BLOCK:
1253 err = truncate_nodes(&dn, nofs, offset[1], 2);
1254 break;
1255
1256 case NODE_DIND_BLOCK:
1257 err = truncate_nodes(&dn, nofs, offset[1], 3);
1258 cont = 0;
1259 break;
1260
1261 default:
1262 BUG();
1263 }
1264 if (err == -ENOENT) {
1265 set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
1266 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1267 f2fs_err_ratelimited(sbi,
1268 "truncate node fail, ino:%lu, nid:%u, "
1269 "offset[0]:%d, offset[1]:%d, nofs:%d",
1270 inode->i_ino, dn.nid, offset[0],
1271 offset[1], nofs);
1272 err = 0;
1273 }
1274 if (err < 0)
1275 goto fail;
1276 if (offset[1] == 0 && get_nid(folio, offset[0], true)) {
1277 folio_lock(folio);
1278 BUG_ON(!is_node_folio(folio));
1279 set_nid(folio, offset[0], 0, true);
1280 folio_unlock(folio);
1281 }
1282 offset[1] = 0;
1283 offset[0]++;
1284 nofs += err;
1285 }
1286 fail:
1287 f2fs_folio_put(folio, false);
1288 trace_f2fs_truncate_inode_blocks_exit(inode, err);
1289 return err > 0 ? 0 : err;
1290 }
1291
1292 /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)1293 int f2fs_truncate_xattr_node(struct inode *inode)
1294 {
1295 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1296 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1297 struct dnode_of_data dn;
1298 struct folio *nfolio;
1299 int err;
1300
1301 if (!nid)
1302 return 0;
1303
1304 nfolio = f2fs_get_xnode_folio(sbi, nid);
1305 if (IS_ERR(nfolio))
1306 return PTR_ERR(nfolio);
1307
1308 set_new_dnode(&dn, inode, NULL, nfolio, nid);
1309 err = truncate_node(&dn);
1310 if (err) {
1311 f2fs_folio_put(nfolio, true);
1312 return err;
1313 }
1314
1315 f2fs_i_xnid_write(inode, 0);
1316
1317 return 0;
1318 }
1319
1320 /*
1321 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1322 * f2fs_unlock_op().
1323 */
f2fs_remove_inode_page(struct inode * inode)1324 int f2fs_remove_inode_page(struct inode *inode)
1325 {
1326 struct dnode_of_data dn;
1327 int err;
1328
1329 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1330 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1331 if (err)
1332 return err;
1333
1334 err = f2fs_truncate_xattr_node(inode);
1335 if (err) {
1336 f2fs_put_dnode(&dn);
1337 return err;
1338 }
1339
1340 /* remove potential inline_data blocks */
1341 if (!IS_DEVICE_ALIASING(inode) &&
1342 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1343 S_ISLNK(inode->i_mode)))
1344 f2fs_truncate_data_blocks_range(&dn, 1);
1345
1346 /* 0 is possible, after f2fs_new_inode() has failed */
1347 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1348 f2fs_put_dnode(&dn);
1349 return -EIO;
1350 }
1351
1352 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1353 f2fs_warn(F2FS_I_SB(inode),
1354 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1355 inode->i_ino, (unsigned long long)inode->i_blocks);
1356 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1357 }
1358
1359 /* will put inode & node pages */
1360 err = truncate_node(&dn);
1361 if (err) {
1362 f2fs_put_dnode(&dn);
1363 return err;
1364 }
1365 return 0;
1366 }
1367
f2fs_new_inode_folio(struct inode * inode)1368 struct folio *f2fs_new_inode_folio(struct inode *inode)
1369 {
1370 struct dnode_of_data dn;
1371
1372 /* allocate inode page for new inode */
1373 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1374
1375 /* caller should f2fs_folio_put(folio, true); */
1376 return f2fs_new_node_folio(&dn, 0);
1377 }
1378
f2fs_new_node_folio(struct dnode_of_data * dn,unsigned int ofs)1379 struct folio *f2fs_new_node_folio(struct dnode_of_data *dn, unsigned int ofs)
1380 {
1381 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1382 struct node_info new_ni;
1383 struct folio *folio;
1384 int err;
1385
1386 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1387 return ERR_PTR(-EPERM);
1388
1389 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), dn->nid, false);
1390 if (IS_ERR(folio))
1391 return folio;
1392
1393 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1394 goto fail;
1395
1396 #ifdef CONFIG_F2FS_CHECK_FS
1397 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1398 if (err) {
1399 dec_valid_node_count(sbi, dn->inode, !ofs);
1400 goto fail;
1401 }
1402 if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1403 err = -EFSCORRUPTED;
1404 dec_valid_node_count(sbi, dn->inode, !ofs);
1405 set_sbi_flag(sbi, SBI_NEED_FSCK);
1406 f2fs_warn_ratelimited(sbi,
1407 "f2fs_new_node_folio: inconsistent nat entry, "
1408 "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
1409 new_ni.ino, new_ni.nid, new_ni.blk_addr,
1410 new_ni.version, new_ni.flag);
1411 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
1412 goto fail;
1413 }
1414 #endif
1415 new_ni.nid = dn->nid;
1416 new_ni.ino = dn->inode->i_ino;
1417 new_ni.blk_addr = NULL_ADDR;
1418 new_ni.flag = 0;
1419 new_ni.version = 0;
1420 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1421
1422 f2fs_folio_wait_writeback(folio, NODE, true, true);
1423 fill_node_footer(folio, dn->nid, dn->inode->i_ino, ofs, true);
1424 set_cold_node(folio, S_ISDIR(dn->inode->i_mode));
1425 if (!folio_test_uptodate(folio))
1426 folio_mark_uptodate(folio);
1427 if (folio_mark_dirty(folio))
1428 dn->node_changed = true;
1429
1430 if (f2fs_has_xattr_block(ofs))
1431 f2fs_i_xnid_write(dn->inode, dn->nid);
1432
1433 if (ofs == 0)
1434 inc_valid_inode_count(sbi);
1435 return folio;
1436 fail:
1437 clear_node_folio_dirty(folio);
1438 f2fs_folio_put(folio, true);
1439 return ERR_PTR(err);
1440 }
1441
1442 /*
1443 * Caller should do after getting the following values.
1444 * 0: f2fs_folio_put(folio, false)
1445 * LOCKED_PAGE or error: f2fs_folio_put(folio, true)
1446 */
read_node_folio(struct folio * folio,blk_opf_t op_flags)1447 static int read_node_folio(struct folio *folio, blk_opf_t op_flags)
1448 {
1449 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1450 struct node_info ni;
1451 struct f2fs_io_info fio = {
1452 .sbi = sbi,
1453 .type = NODE,
1454 .op = REQ_OP_READ,
1455 .op_flags = op_flags,
1456 .folio = folio,
1457 .encrypted_page = NULL,
1458 };
1459 int err;
1460
1461 if (folio_test_uptodate(folio)) {
1462 if (!f2fs_inode_chksum_verify(sbi, folio)) {
1463 folio_clear_uptodate(folio);
1464 return -EFSBADCRC;
1465 }
1466 return LOCKED_PAGE;
1467 }
1468
1469 err = f2fs_get_node_info(sbi, folio->index, &ni, false);
1470 if (err)
1471 return err;
1472
1473 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1474 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1475 folio_clear_uptodate(folio);
1476 return -ENOENT;
1477 }
1478
1479 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1480
1481 err = f2fs_submit_page_bio(&fio);
1482
1483 if (!err)
1484 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
1485
1486 return err;
1487 }
1488
1489 /*
1490 * Readahead a node page
1491 */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)1492 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1493 {
1494 struct folio *afolio;
1495 int err;
1496
1497 if (!nid)
1498 return;
1499 if (f2fs_check_nid_range(sbi, nid))
1500 return;
1501
1502 afolio = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1503 if (afolio)
1504 return;
1505
1506 afolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
1507 if (IS_ERR(afolio))
1508 return;
1509
1510 err = read_node_folio(afolio, REQ_RAHEAD);
1511 f2fs_folio_put(afolio, err ? true : false);
1512 }
1513
f2fs_sanity_check_node_footer(struct f2fs_sb_info * sbi,struct folio * folio,pgoff_t nid,enum node_type ntype,bool in_irq)1514 int f2fs_sanity_check_node_footer(struct f2fs_sb_info *sbi,
1515 struct folio *folio, pgoff_t nid,
1516 enum node_type ntype, bool in_irq)
1517 {
1518 bool is_inode, is_xnode;
1519
1520 if (unlikely(nid != nid_of_node(folio)))
1521 goto out_err;
1522
1523 is_inode = IS_INODE(folio);
1524 is_xnode = f2fs_has_xattr_block(ofs_of_node(folio));
1525
1526 switch (ntype) {
1527 case NODE_TYPE_REGULAR:
1528 if (is_inode && is_xnode)
1529 goto out_err;
1530 break;
1531 case NODE_TYPE_INODE:
1532 if (!is_inode || is_xnode)
1533 goto out_err;
1534 break;
1535 case NODE_TYPE_XATTR:
1536 if (is_inode || !is_xnode)
1537 goto out_err;
1538 break;
1539 case NODE_TYPE_NON_INODE:
1540 if (is_inode)
1541 goto out_err;
1542 break;
1543 default:
1544 break;
1545 }
1546 if (time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))
1547 goto out_err;
1548 return 0;
1549 out_err:
1550 set_sbi_flag(sbi, SBI_NEED_FSCK);
1551 f2fs_warn_ratelimited(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
1552 "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1553 ntype, nid, nid_of_node(folio), ino_of_node(folio),
1554 ofs_of_node(folio), cpver_of_node(folio),
1555 next_blkaddr_of_node(folio));
1556
1557 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
1558 return -EFSCORRUPTED;
1559 }
1560
__get_node_folio(struct f2fs_sb_info * sbi,pgoff_t nid,struct folio * parent,int start,enum node_type ntype)1561 static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
1562 struct folio *parent, int start, enum node_type ntype)
1563 {
1564 struct folio *folio;
1565 int err;
1566
1567 if (!nid)
1568 return ERR_PTR(-ENOENT);
1569 if (f2fs_check_nid_range(sbi, nid))
1570 return ERR_PTR(-EINVAL);
1571 repeat:
1572 folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
1573 if (IS_ERR(folio))
1574 return folio;
1575
1576 err = read_node_folio(folio, 0);
1577 if (err < 0)
1578 goto out_put_err;
1579 if (err == LOCKED_PAGE)
1580 goto page_hit;
1581
1582 if (parent)
1583 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1584
1585 folio_lock(folio);
1586
1587 if (unlikely(!is_node_folio(folio))) {
1588 f2fs_folio_put(folio, true);
1589 goto repeat;
1590 }
1591
1592 if (unlikely(!folio_test_uptodate(folio))) {
1593 err = -EIO;
1594 goto out_put_err;
1595 }
1596
1597 if (!f2fs_inode_chksum_verify(sbi, folio)) {
1598 err = -EFSBADCRC;
1599 goto out_err;
1600 }
1601 page_hit:
1602 err = f2fs_sanity_check_node_footer(sbi, folio, nid, ntype, false);
1603 if (!err)
1604 return folio;
1605 out_err:
1606 folio_clear_uptodate(folio);
1607 out_put_err:
1608 /* ENOENT comes from read_node_folio which is not an error. */
1609 if (err != -ENOENT)
1610 f2fs_handle_page_eio(sbi, folio, NODE);
1611 f2fs_folio_put(folio, true);
1612 return ERR_PTR(err);
1613 }
1614
f2fs_get_node_folio(struct f2fs_sb_info * sbi,pgoff_t nid,enum node_type node_type)1615 struct folio *f2fs_get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
1616 enum node_type node_type)
1617 {
1618 return __get_node_folio(sbi, nid, NULL, 0, node_type);
1619 }
1620
f2fs_get_inode_folio(struct f2fs_sb_info * sbi,pgoff_t ino)1621 struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
1622 {
1623 return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
1624 }
1625
f2fs_get_xnode_folio(struct f2fs_sb_info * sbi,pgoff_t xnid)1626 struct folio *f2fs_get_xnode_folio(struct f2fs_sb_info *sbi, pgoff_t xnid)
1627 {
1628 return __get_node_folio(sbi, xnid, NULL, 0, NODE_TYPE_XATTR);
1629 }
1630
f2fs_get_node_folio_ra(struct folio * parent,int start)1631 static struct folio *f2fs_get_node_folio_ra(struct folio *parent, int start)
1632 {
1633 struct f2fs_sb_info *sbi = F2FS_F_SB(parent);
1634 nid_t nid = get_nid(parent, start, false);
1635
1636 return __get_node_folio(sbi, nid, parent, start, NODE_TYPE_REGULAR);
1637 }
1638
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)1639 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1640 {
1641 struct inode *inode;
1642 struct folio *folio;
1643 int ret;
1644
1645 /* should flush inline_data before evict_inode */
1646 inode = ilookup(sbi->sb, ino);
1647 if (!inode)
1648 return;
1649
1650 folio = f2fs_filemap_get_folio(inode->i_mapping, 0,
1651 FGP_LOCK|FGP_NOWAIT, 0);
1652 if (IS_ERR(folio))
1653 goto iput_out;
1654
1655 if (!folio_test_uptodate(folio))
1656 goto folio_out;
1657
1658 if (!folio_test_dirty(folio))
1659 goto folio_out;
1660
1661 if (!folio_clear_dirty_for_io(folio))
1662 goto folio_out;
1663
1664 ret = f2fs_write_inline_data(inode, folio);
1665 inode_dec_dirty_pages(inode);
1666 f2fs_remove_dirty_inode(inode);
1667 if (ret)
1668 folio_mark_dirty(folio);
1669 folio_out:
1670 f2fs_folio_put(folio, true);
1671 iput_out:
1672 iput(inode);
1673 }
1674
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1675 static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1676 {
1677 pgoff_t index;
1678 struct folio_batch fbatch;
1679 struct folio *last_folio = NULL;
1680 int nr_folios;
1681
1682 folio_batch_init(&fbatch);
1683 index = 0;
1684
1685 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1686 (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1687 &fbatch))) {
1688 int i;
1689
1690 for (i = 0; i < nr_folios; i++) {
1691 struct folio *folio = fbatch.folios[i];
1692
1693 if (unlikely(f2fs_cp_error(sbi))) {
1694 f2fs_folio_put(last_folio, false);
1695 folio_batch_release(&fbatch);
1696 return ERR_PTR(-EIO);
1697 }
1698
1699 if (!IS_DNODE(folio) || !is_cold_node(folio))
1700 continue;
1701 if (ino_of_node(folio) != ino)
1702 continue;
1703
1704 folio_lock(folio);
1705
1706 if (unlikely(!is_node_folio(folio))) {
1707 continue_unlock:
1708 folio_unlock(folio);
1709 continue;
1710 }
1711 if (ino_of_node(folio) != ino)
1712 goto continue_unlock;
1713
1714 if (!folio_test_dirty(folio)) {
1715 /* someone wrote it for us */
1716 goto continue_unlock;
1717 }
1718
1719 if (last_folio)
1720 f2fs_folio_put(last_folio, false);
1721
1722 folio_get(folio);
1723 last_folio = folio;
1724 folio_unlock(folio);
1725 }
1726 folio_batch_release(&fbatch);
1727 cond_resched();
1728 }
1729 return last_folio;
1730 }
1731
__write_node_folio(struct folio * folio,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1732 static bool __write_node_folio(struct folio *folio, bool atomic, bool *submitted,
1733 struct writeback_control *wbc, bool do_balance,
1734 enum iostat_type io_type, unsigned int *seq_id)
1735 {
1736 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1737 nid_t nid;
1738 struct node_info ni;
1739 struct f2fs_io_info fio = {
1740 .sbi = sbi,
1741 .ino = ino_of_node(folio),
1742 .type = NODE,
1743 .op = REQ_OP_WRITE,
1744 .op_flags = wbc_to_write_flags(wbc),
1745 .folio = folio,
1746 .encrypted_page = NULL,
1747 .submitted = 0,
1748 .io_type = io_type,
1749 .io_wbc = wbc,
1750 };
1751 struct f2fs_lock_context lc;
1752 unsigned int seq;
1753
1754 trace_f2fs_writepage(folio, NODE);
1755
1756 if (unlikely(f2fs_cp_error(sbi))) {
1757 /* keep node pages in remount-ro mode */
1758 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1759 goto redirty_out;
1760 folio_clear_uptodate(folio);
1761 dec_page_count(sbi, F2FS_DIRTY_NODES);
1762 folio_unlock(folio);
1763 return true;
1764 }
1765
1766 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1767 goto redirty_out;
1768
1769 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1770 wbc->sync_mode == WB_SYNC_NONE &&
1771 IS_DNODE(folio) && is_cold_node(folio))
1772 goto redirty_out;
1773
1774 /* get old block addr of this node page */
1775 nid = nid_of_node(folio);
1776
1777 if (f2fs_sanity_check_node_footer(sbi, folio, nid,
1778 NODE_TYPE_REGULAR, false)) {
1779 f2fs_handle_critical_error(sbi, STOP_CP_REASON_CORRUPTED_NID);
1780 goto redirty_out;
1781 }
1782
1783 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1784 goto redirty_out;
1785
1786 f2fs_down_read_trace(&sbi->node_write, &lc);
1787
1788 /* This page is already truncated */
1789 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1790 folio_clear_uptodate(folio);
1791 dec_page_count(sbi, F2FS_DIRTY_NODES);
1792 f2fs_up_read_trace(&sbi->node_write, &lc);
1793 folio_unlock(folio);
1794 return true;
1795 }
1796
1797 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1798 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1799 DATA_GENERIC_ENHANCE)) {
1800 f2fs_up_read_trace(&sbi->node_write, &lc);
1801 goto redirty_out;
1802 }
1803
1804 if (atomic) {
1805 if (!test_opt(sbi, NOBARRIER))
1806 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1807 if (IS_INODE(folio))
1808 set_dentry_mark(folio,
1809 f2fs_need_dentry_mark(sbi, ino_of_node(folio)));
1810 }
1811
1812 /* should add to global list before clearing PAGECACHE status */
1813 if (f2fs_in_warm_node_list(sbi, folio)) {
1814 seq = f2fs_add_fsync_node_entry(sbi, folio);
1815 if (seq_id)
1816 *seq_id = seq;
1817 }
1818
1819 folio_start_writeback(folio);
1820
1821 fio.old_blkaddr = ni.blk_addr;
1822 f2fs_do_write_node_page(nid, &fio);
1823 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(folio));
1824 dec_page_count(sbi, F2FS_DIRTY_NODES);
1825 f2fs_up_read_trace(&sbi->node_write, &lc);
1826
1827 folio_unlock(folio);
1828
1829 if (unlikely(f2fs_cp_error(sbi))) {
1830 f2fs_submit_merged_write(sbi, NODE);
1831 submitted = NULL;
1832 }
1833 if (submitted)
1834 *submitted = fio.submitted;
1835
1836 if (do_balance)
1837 f2fs_balance_fs(sbi, false);
1838 return true;
1839
1840 redirty_out:
1841 folio_redirty_for_writepage(wbc, folio);
1842 folio_unlock(folio);
1843 return false;
1844 }
1845
f2fs_move_node_folio(struct folio * node_folio,int gc_type)1846 int f2fs_move_node_folio(struct folio *node_folio, int gc_type)
1847 {
1848 int err = 0;
1849
1850 if (gc_type == FG_GC) {
1851 struct writeback_control wbc = {
1852 .sync_mode = WB_SYNC_ALL,
1853 .nr_to_write = 1,
1854 };
1855
1856 f2fs_folio_wait_writeback(node_folio, NODE, true, true);
1857
1858 folio_mark_dirty(node_folio);
1859
1860 if (!folio_clear_dirty_for_io(node_folio)) {
1861 err = -EAGAIN;
1862 goto out_page;
1863 }
1864
1865 if (!__write_node_folio(node_folio, false, NULL,
1866 &wbc, false, FS_GC_NODE_IO, NULL))
1867 err = -EAGAIN;
1868 goto release_page;
1869 } else {
1870 /* set page dirty and write it */
1871 if (!folio_test_writeback(node_folio))
1872 folio_mark_dirty(node_folio);
1873 }
1874 out_page:
1875 folio_unlock(node_folio);
1876 release_page:
1877 f2fs_folio_put(node_folio, false);
1878 return err;
1879 }
1880
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)1881 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1882 struct writeback_control *wbc, bool atomic,
1883 unsigned int *seq_id)
1884 {
1885 pgoff_t index;
1886 struct folio_batch fbatch;
1887 int ret = 0;
1888 struct folio *last_folio = NULL;
1889 bool marked = false;
1890 nid_t ino = inode->i_ino;
1891 int nr_folios;
1892 int nwritten = 0;
1893
1894 if (atomic) {
1895 last_folio = last_fsync_dnode(sbi, ino);
1896 if (IS_ERR_OR_NULL(last_folio))
1897 return PTR_ERR_OR_ZERO(last_folio);
1898 }
1899 retry:
1900 folio_batch_init(&fbatch);
1901 index = 0;
1902
1903 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1904 (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1905 &fbatch))) {
1906 int i;
1907
1908 for (i = 0; i < nr_folios; i++) {
1909 struct folio *folio = fbatch.folios[i];
1910 bool submitted = false;
1911
1912 if (unlikely(f2fs_cp_error(sbi))) {
1913 f2fs_folio_put(last_folio, false);
1914 folio_batch_release(&fbatch);
1915 ret = -EIO;
1916 goto out;
1917 }
1918
1919 if (!IS_DNODE(folio) || !is_cold_node(folio))
1920 continue;
1921 if (ino_of_node(folio) != ino)
1922 continue;
1923
1924 folio_lock(folio);
1925
1926 if (unlikely(!is_node_folio(folio))) {
1927 continue_unlock:
1928 folio_unlock(folio);
1929 continue;
1930 }
1931 if (ino_of_node(folio) != ino)
1932 goto continue_unlock;
1933
1934 if (!folio_test_dirty(folio) && folio != last_folio) {
1935 /* someone wrote it for us */
1936 goto continue_unlock;
1937 }
1938
1939 f2fs_folio_wait_writeback(folio, NODE, true, true);
1940
1941 set_fsync_mark(folio, 0);
1942 set_dentry_mark(folio, 0);
1943
1944 if (!atomic || folio == last_folio) {
1945 set_fsync_mark(folio, 1);
1946 percpu_counter_inc(&sbi->rf_node_block_count);
1947 if (IS_INODE(folio)) {
1948 if (is_inode_flag_set(inode,
1949 FI_DIRTY_INODE))
1950 f2fs_update_inode(inode, folio);
1951 if (!atomic)
1952 set_dentry_mark(folio,
1953 f2fs_need_dentry_mark(sbi, ino));
1954 }
1955 /* may be written by other thread */
1956 if (!folio_test_dirty(folio))
1957 folio_mark_dirty(folio);
1958 }
1959
1960 if (!folio_clear_dirty_for_io(folio))
1961 goto continue_unlock;
1962
1963 if (!__write_node_folio(folio, atomic &&
1964 folio == last_folio,
1965 &submitted, wbc, true,
1966 FS_NODE_IO, seq_id)) {
1967 f2fs_folio_put(last_folio, false);
1968 folio_batch_release(&fbatch);
1969 ret = -EIO;
1970 goto out;
1971 }
1972 if (submitted)
1973 nwritten++;
1974
1975 if (folio == last_folio) {
1976 f2fs_folio_put(folio, false);
1977 folio_batch_release(&fbatch);
1978 marked = true;
1979 goto out;
1980 }
1981 }
1982 folio_batch_release(&fbatch);
1983 cond_resched();
1984 }
1985 if (atomic && !marked) {
1986 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1987 ino, last_folio->index);
1988 folio_lock(last_folio);
1989 f2fs_folio_wait_writeback(last_folio, NODE, true, true);
1990 folio_mark_dirty(last_folio);
1991 folio_unlock(last_folio);
1992 goto retry;
1993 }
1994 out:
1995 if (nwritten)
1996 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1997 return ret;
1998 }
1999
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)2000 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
2001 {
2002 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2003 bool clean;
2004
2005 if (inode->i_ino != ino)
2006 return 0;
2007
2008 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
2009 return 0;
2010
2011 spin_lock(&sbi->inode_lock[DIRTY_META]);
2012 clean = list_empty(&F2FS_I(inode)->gdirty_list);
2013 spin_unlock(&sbi->inode_lock[DIRTY_META]);
2014
2015 if (clean)
2016 return 0;
2017
2018 inode = igrab(inode);
2019 if (!inode)
2020 return 0;
2021 return 1;
2022 }
2023
flush_dirty_inode(struct folio * folio)2024 static bool flush_dirty_inode(struct folio *folio)
2025 {
2026 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
2027 struct inode *inode;
2028 nid_t ino = ino_of_node(folio);
2029
2030 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
2031 if (!inode)
2032 return false;
2033
2034 f2fs_update_inode(inode, folio);
2035 folio_unlock(folio);
2036
2037 iput(inode);
2038 return true;
2039 }
2040
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)2041 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
2042 {
2043 pgoff_t index = 0;
2044 struct folio_batch fbatch;
2045 int nr_folios;
2046
2047 folio_batch_init(&fbatch);
2048
2049 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
2050 (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
2051 &fbatch))) {
2052 int i;
2053
2054 for (i = 0; i < nr_folios; i++) {
2055 struct folio *folio = fbatch.folios[i];
2056
2057 if (!IS_INODE(folio))
2058 continue;
2059
2060 folio_lock(folio);
2061
2062 if (unlikely(!is_node_folio(folio)))
2063 goto unlock;
2064 if (!folio_test_dirty(folio))
2065 goto unlock;
2066
2067 /* flush inline_data, if it's async context. */
2068 if (folio_test_f2fs_inline(folio)) {
2069 folio_clear_f2fs_inline(folio);
2070 folio_unlock(folio);
2071 flush_inline_data(sbi, ino_of_node(folio));
2072 continue;
2073 }
2074 unlock:
2075 folio_unlock(folio);
2076 }
2077 folio_batch_release(&fbatch);
2078 cond_resched();
2079 }
2080 }
2081
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)2082 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
2083 struct writeback_control *wbc,
2084 bool do_balance, enum iostat_type io_type)
2085 {
2086 pgoff_t index;
2087 struct folio_batch fbatch;
2088 int step = 0;
2089 int nwritten = 0;
2090 int ret = 0;
2091 int nr_folios, done = 0;
2092
2093 folio_batch_init(&fbatch);
2094
2095 next_step:
2096 index = 0;
2097
2098 while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
2099 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
2100 &fbatch))) {
2101 int i;
2102
2103 for (i = 0; i < nr_folios; i++) {
2104 struct folio *folio = fbatch.folios[i];
2105 bool submitted = false;
2106
2107 /* give a priority to WB_SYNC threads */
2108 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
2109 wbc->sync_mode == WB_SYNC_NONE) {
2110 done = 1;
2111 break;
2112 }
2113
2114 /*
2115 * flushing sequence with step:
2116 * 0. indirect nodes
2117 * 1. dentry dnodes
2118 * 2. file dnodes
2119 */
2120 if (step == 0 && IS_DNODE(folio))
2121 continue;
2122 if (step == 1 && (!IS_DNODE(folio) ||
2123 is_cold_node(folio)))
2124 continue;
2125 if (step == 2 && (!IS_DNODE(folio) ||
2126 !is_cold_node(folio)))
2127 continue;
2128 lock_node:
2129 if (wbc->sync_mode == WB_SYNC_ALL)
2130 folio_lock(folio);
2131 else if (!folio_trylock(folio))
2132 continue;
2133
2134 if (unlikely(!is_node_folio(folio))) {
2135 continue_unlock:
2136 folio_unlock(folio);
2137 continue;
2138 }
2139
2140 if (!folio_test_dirty(folio)) {
2141 /* someone wrote it for us */
2142 goto continue_unlock;
2143 }
2144
2145 /* flush inline_data/inode, if it's async context. */
2146 if (!do_balance)
2147 goto write_node;
2148
2149 /* flush inline_data */
2150 if (folio_test_f2fs_inline(folio)) {
2151 folio_clear_f2fs_inline(folio);
2152 folio_unlock(folio);
2153 flush_inline_data(sbi, ino_of_node(folio));
2154 goto lock_node;
2155 }
2156
2157 /* flush dirty inode */
2158 if (IS_INODE(folio) && flush_dirty_inode(folio))
2159 goto lock_node;
2160 write_node:
2161 f2fs_folio_wait_writeback(folio, NODE, true, true);
2162
2163 if (!folio_clear_dirty_for_io(folio))
2164 goto continue_unlock;
2165
2166 set_fsync_mark(folio, 0);
2167 set_dentry_mark(folio, 0);
2168
2169 if (!__write_node_folio(folio, false, &submitted,
2170 wbc, do_balance, io_type, NULL)) {
2171 folio_batch_release(&fbatch);
2172 ret = -EIO;
2173 goto out;
2174 }
2175 if (submitted)
2176 nwritten++;
2177
2178 if (--wbc->nr_to_write == 0)
2179 break;
2180 }
2181 folio_batch_release(&fbatch);
2182 cond_resched();
2183
2184 if (wbc->nr_to_write == 0) {
2185 step = 2;
2186 break;
2187 }
2188 }
2189
2190 if (step < 2) {
2191 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2192 wbc->sync_mode == WB_SYNC_NONE && step == 1)
2193 goto out;
2194 step++;
2195 goto next_step;
2196 }
2197 out:
2198 if (nwritten)
2199 f2fs_submit_merged_write(sbi, NODE);
2200
2201 if (unlikely(f2fs_cp_error(sbi)))
2202 return -EIO;
2203 return ret;
2204 }
2205
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)2206 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2207 unsigned int seq_id)
2208 {
2209 struct fsync_node_entry *fn;
2210 struct list_head *head = &sbi->fsync_node_list;
2211 unsigned long flags;
2212 unsigned int cur_seq_id = 0;
2213
2214 while (seq_id && cur_seq_id < seq_id) {
2215 struct folio *folio;
2216
2217 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2218 if (list_empty(head)) {
2219 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2220 break;
2221 }
2222 fn = list_first_entry(head, struct fsync_node_entry, list);
2223 if (fn->seq_id > seq_id) {
2224 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2225 break;
2226 }
2227 cur_seq_id = fn->seq_id;
2228 folio = fn->folio;
2229 folio_get(folio);
2230 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2231
2232 f2fs_folio_wait_writeback(folio, NODE, true, false);
2233
2234 folio_put(folio);
2235 }
2236
2237 return filemap_check_errors(NODE_MAPPING(sbi));
2238 }
2239
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2240 static int f2fs_write_node_pages(struct address_space *mapping,
2241 struct writeback_control *wbc)
2242 {
2243 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2244 struct blk_plug plug;
2245 long diff;
2246
2247 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2248 goto skip_write;
2249
2250 /* balancing f2fs's metadata in background */
2251 f2fs_balance_fs_bg(sbi, true);
2252
2253 /* collect a number of dirty node pages and write together */
2254 if (wbc->sync_mode != WB_SYNC_ALL &&
2255 get_pages(sbi, F2FS_DIRTY_NODES) <
2256 nr_pages_to_skip(sbi, NODE))
2257 goto skip_write;
2258
2259 if (wbc->sync_mode == WB_SYNC_ALL)
2260 atomic_inc(&sbi->wb_sync_req[NODE]);
2261 else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2262 /* to avoid potential deadlock */
2263 if (current->plug)
2264 blk_finish_plug(current->plug);
2265 goto skip_write;
2266 }
2267
2268 trace_f2fs_writepages(mapping->host, wbc, NODE);
2269
2270 diff = nr_pages_to_write(sbi, NODE, wbc);
2271 blk_start_plug(&plug);
2272 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2273 blk_finish_plug(&plug);
2274 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2275
2276 if (wbc->sync_mode == WB_SYNC_ALL)
2277 atomic_dec(&sbi->wb_sync_req[NODE]);
2278 return 0;
2279
2280 skip_write:
2281 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2282 trace_f2fs_writepages(mapping->host, wbc, NODE);
2283 return 0;
2284 }
2285
f2fs_dirty_node_folio(struct address_space * mapping,struct folio * folio)2286 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2287 struct folio *folio)
2288 {
2289 trace_f2fs_set_page_dirty(folio, NODE);
2290
2291 if (!folio_test_uptodate(folio))
2292 folio_mark_uptodate(folio);
2293 #ifdef CONFIG_F2FS_CHECK_FS
2294 if (IS_INODE(folio))
2295 f2fs_inode_chksum_set(F2FS_M_SB(mapping), folio);
2296 #endif
2297 if (filemap_dirty_folio(mapping, folio)) {
2298 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2299 folio_set_f2fs_reference(folio);
2300 return true;
2301 }
2302 return false;
2303 }
2304
2305 /*
2306 * Structure of the f2fs node operations
2307 */
2308 const struct address_space_operations f2fs_node_aops = {
2309 .writepages = f2fs_write_node_pages,
2310 .dirty_folio = f2fs_dirty_node_folio,
2311 .invalidate_folio = f2fs_invalidate_folio,
2312 .release_folio = f2fs_release_folio,
2313 .migrate_folio = filemap_migrate_folio,
2314 };
2315
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)2316 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2317 nid_t n)
2318 {
2319 return radix_tree_lookup(&nm_i->free_nid_root, n);
2320 }
2321
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)2322 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2323 struct free_nid *i)
2324 {
2325 struct f2fs_nm_info *nm_i = NM_I(sbi);
2326 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2327
2328 if (err)
2329 return err;
2330
2331 nm_i->nid_cnt[FREE_NID]++;
2332 list_add_tail(&i->list, &nm_i->free_nid_list);
2333 return 0;
2334 }
2335
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2336 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2337 struct free_nid *i, enum nid_state state)
2338 {
2339 struct f2fs_nm_info *nm_i = NM_I(sbi);
2340
2341 f2fs_bug_on(sbi, state != i->state);
2342 nm_i->nid_cnt[state]--;
2343 if (state == FREE_NID)
2344 list_del(&i->list);
2345 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2346 }
2347
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2348 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2349 enum nid_state org_state, enum nid_state dst_state)
2350 {
2351 struct f2fs_nm_info *nm_i = NM_I(sbi);
2352
2353 f2fs_bug_on(sbi, org_state != i->state);
2354 i->state = dst_state;
2355 nm_i->nid_cnt[org_state]--;
2356 nm_i->nid_cnt[dst_state]++;
2357
2358 switch (dst_state) {
2359 case PREALLOC_NID:
2360 list_del(&i->list);
2361 break;
2362 case FREE_NID:
2363 list_add_tail(&i->list, &nm_i->free_nid_list);
2364 break;
2365 default:
2366 BUG_ON(1);
2367 }
2368 }
2369
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)2370 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2371 bool set, bool build)
2372 {
2373 struct f2fs_nm_info *nm_i = NM_I(sbi);
2374 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2375 unsigned int nid_ofs = nid - START_NID(nid);
2376
2377 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2378 return;
2379
2380 if (set) {
2381 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2382 return;
2383 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2384 nm_i->free_nid_count[nat_ofs]++;
2385 } else {
2386 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2387 return;
2388 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2389 if (!build)
2390 nm_i->free_nid_count[nat_ofs]--;
2391 }
2392 }
2393
2394 /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)2395 static bool add_free_nid(struct f2fs_sb_info *sbi,
2396 nid_t nid, bool build, bool update)
2397 {
2398 struct f2fs_nm_info *nm_i = NM_I(sbi);
2399 struct free_nid *i, *e;
2400 struct nat_entry *ne;
2401 int err;
2402 bool ret = false;
2403
2404 /* 0 nid should not be used */
2405 if (unlikely(nid == 0))
2406 return false;
2407
2408 if (unlikely(f2fs_check_nid_range(sbi, nid)))
2409 return false;
2410
2411 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2412 i->nid = nid;
2413 i->state = FREE_NID;
2414
2415 err = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2416 f2fs_bug_on(sbi, err);
2417
2418 err = -EINVAL;
2419
2420 spin_lock(&nm_i->nid_list_lock);
2421
2422 if (build) {
2423 /*
2424 * Thread A Thread B
2425 * - f2fs_create
2426 * - f2fs_new_inode
2427 * - f2fs_alloc_nid
2428 * - __insert_nid_to_list(PREALLOC_NID)
2429 * - f2fs_balance_fs_bg
2430 * - f2fs_build_free_nids
2431 * - __f2fs_build_free_nids
2432 * - scan_nat_page
2433 * - add_free_nid
2434 * - __lookup_nat_cache
2435 * - f2fs_add_link
2436 * - f2fs_init_inode_metadata
2437 * - f2fs_new_inode_folio
2438 * - f2fs_new_node_folio
2439 * - set_node_addr
2440 * - f2fs_alloc_nid_done
2441 * - __remove_nid_from_list(PREALLOC_NID)
2442 * - __insert_nid_to_list(FREE_NID)
2443 */
2444 ne = __lookup_nat_cache(nm_i, nid, false);
2445 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2446 nat_get_blkaddr(ne) != NULL_ADDR))
2447 goto err_out;
2448
2449 e = __lookup_free_nid_list(nm_i, nid);
2450 if (e) {
2451 if (e->state == FREE_NID)
2452 ret = true;
2453 goto err_out;
2454 }
2455 }
2456 ret = true;
2457 err = __insert_free_nid(sbi, i);
2458 err_out:
2459 if (update) {
2460 update_free_nid_bitmap(sbi, nid, ret, build);
2461 if (!build)
2462 nm_i->available_nids++;
2463 }
2464 spin_unlock(&nm_i->nid_list_lock);
2465 radix_tree_preload_end();
2466
2467 if (err)
2468 kmem_cache_free(free_nid_slab, i);
2469 return ret;
2470 }
2471
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2472 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2473 {
2474 struct f2fs_nm_info *nm_i = NM_I(sbi);
2475 struct free_nid *i;
2476 bool need_free = false;
2477
2478 spin_lock(&nm_i->nid_list_lock);
2479 i = __lookup_free_nid_list(nm_i, nid);
2480 if (i && i->state == FREE_NID) {
2481 __remove_free_nid(sbi, i, FREE_NID);
2482 need_free = true;
2483 }
2484 spin_unlock(&nm_i->nid_list_lock);
2485
2486 if (need_free)
2487 kmem_cache_free(free_nid_slab, i);
2488 }
2489
scan_nat_page(struct f2fs_sb_info * sbi,struct f2fs_nat_block * nat_blk,nid_t start_nid)2490 static int scan_nat_page(struct f2fs_sb_info *sbi,
2491 struct f2fs_nat_block *nat_blk, nid_t start_nid)
2492 {
2493 struct f2fs_nm_info *nm_i = NM_I(sbi);
2494 block_t blk_addr;
2495 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2496 int i;
2497
2498 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2499
2500 i = start_nid % NAT_ENTRY_PER_BLOCK;
2501
2502 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2503 if (unlikely(start_nid >= nm_i->max_nid))
2504 break;
2505
2506 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2507
2508 if (blk_addr == NEW_ADDR)
2509 return -EFSCORRUPTED;
2510
2511 if (blk_addr == NULL_ADDR) {
2512 add_free_nid(sbi, start_nid, true, true);
2513 } else {
2514 spin_lock(&NM_I(sbi)->nid_list_lock);
2515 update_free_nid_bitmap(sbi, start_nid, false, true);
2516 spin_unlock(&NM_I(sbi)->nid_list_lock);
2517 }
2518 }
2519
2520 return 0;
2521 }
2522
scan_curseg_cache(struct f2fs_sb_info * sbi)2523 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2524 {
2525 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2526 struct f2fs_journal *journal = curseg->journal;
2527 int i;
2528
2529 down_read(&curseg->journal_rwsem);
2530 for (i = 0; i < nats_in_cursum(journal); i++) {
2531 block_t addr;
2532 nid_t nid;
2533
2534 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2535 nid = le32_to_cpu(nid_in_journal(journal, i));
2536 if (addr == NULL_ADDR)
2537 add_free_nid(sbi, nid, true, false);
2538 else
2539 remove_free_nid(sbi, nid);
2540 }
2541 up_read(&curseg->journal_rwsem);
2542 }
2543
scan_free_nid_bits(struct f2fs_sb_info * sbi)2544 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2545 {
2546 struct f2fs_nm_info *nm_i = NM_I(sbi);
2547 unsigned int i, idx;
2548 nid_t nid;
2549
2550 f2fs_down_read(&nm_i->nat_tree_lock);
2551
2552 for (i = 0; i < nm_i->nat_blocks; i++) {
2553 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2554 continue;
2555 if (!nm_i->free_nid_count[i])
2556 continue;
2557 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2558 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2559 NAT_ENTRY_PER_BLOCK, idx);
2560 if (idx >= NAT_ENTRY_PER_BLOCK)
2561 break;
2562
2563 nid = i * NAT_ENTRY_PER_BLOCK + idx;
2564 add_free_nid(sbi, nid, true, false);
2565
2566 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2567 goto out;
2568 }
2569 }
2570 out:
2571 scan_curseg_cache(sbi);
2572
2573 f2fs_up_read(&nm_i->nat_tree_lock);
2574 }
2575
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2576 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2577 bool sync, bool mount)
2578 {
2579 struct f2fs_nm_info *nm_i = NM_I(sbi);
2580 int i = 0, ret;
2581 nid_t nid = nm_i->next_scan_nid;
2582
2583 if (unlikely(nid >= nm_i->max_nid))
2584 nid = 0;
2585
2586 if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2587 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2588
2589 /* Enough entries */
2590 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2591 return 0;
2592
2593 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2594 return 0;
2595
2596 if (!mount) {
2597 /* try to find free nids in free_nid_bitmap */
2598 scan_free_nid_bits(sbi);
2599
2600 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2601 return 0;
2602 }
2603
2604 /* readahead nat pages to be scanned */
2605 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2606 META_NAT, true);
2607
2608 f2fs_down_read(&nm_i->nat_tree_lock);
2609
2610 while (1) {
2611 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2612 nm_i->nat_block_bitmap)) {
2613 struct folio *folio = get_current_nat_folio(sbi, nid);
2614
2615 if (IS_ERR(folio)) {
2616 ret = PTR_ERR(folio);
2617 } else {
2618 ret = scan_nat_page(sbi, folio_address(folio),
2619 nid);
2620 f2fs_folio_put(folio, true);
2621 }
2622
2623 if (ret) {
2624 f2fs_up_read(&nm_i->nat_tree_lock);
2625
2626 if (ret == -EFSCORRUPTED) {
2627 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2628 set_sbi_flag(sbi, SBI_NEED_FSCK);
2629 f2fs_handle_error(sbi,
2630 ERROR_INCONSISTENT_NAT);
2631 }
2632
2633 return ret;
2634 }
2635 }
2636
2637 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2638 if (unlikely(nid >= nm_i->max_nid))
2639 nid = 0;
2640
2641 if (++i >= FREE_NID_PAGES)
2642 break;
2643 }
2644
2645 /* go to the next free nat pages to find free nids abundantly */
2646 nm_i->next_scan_nid = nid;
2647
2648 /* find free nids from current sum_pages */
2649 scan_curseg_cache(sbi);
2650
2651 f2fs_up_read(&nm_i->nat_tree_lock);
2652
2653 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2654 nm_i->ra_nid_pages, META_NAT, false);
2655
2656 return 0;
2657 }
2658
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2659 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2660 {
2661 int ret;
2662
2663 mutex_lock(&NM_I(sbi)->build_lock);
2664 ret = __f2fs_build_free_nids(sbi, sync, mount);
2665 mutex_unlock(&NM_I(sbi)->build_lock);
2666
2667 return ret;
2668 }
2669
2670 /*
2671 * If this function returns success, caller can obtain a new nid
2672 * from second parameter of this function.
2673 * The returned nid could be used ino as well as nid when inode is created.
2674 */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)2675 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2676 {
2677 struct f2fs_nm_info *nm_i = NM_I(sbi);
2678 struct free_nid *i = NULL;
2679 retry:
2680 if (time_to_inject(sbi, FAULT_ALLOC_NID))
2681 return false;
2682
2683 spin_lock(&nm_i->nid_list_lock);
2684
2685 if (unlikely(nm_i->available_nids == 0)) {
2686 spin_unlock(&nm_i->nid_list_lock);
2687 return false;
2688 }
2689
2690 /* We should not use stale free nids created by f2fs_build_free_nids */
2691 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2692 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2693 i = list_first_entry(&nm_i->free_nid_list,
2694 struct free_nid, list);
2695
2696 if (unlikely(is_invalid_nid(sbi, i->nid))) {
2697 spin_unlock(&nm_i->nid_list_lock);
2698 f2fs_err(sbi, "Corrupted nid %u in free_nid_list",
2699 i->nid);
2700 f2fs_stop_checkpoint(sbi, false,
2701 STOP_CP_REASON_CORRUPTED_NID);
2702 return false;
2703 }
2704
2705 *nid = i->nid;
2706
2707 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2708 nm_i->available_nids--;
2709
2710 update_free_nid_bitmap(sbi, *nid, false, false);
2711
2712 spin_unlock(&nm_i->nid_list_lock);
2713 return true;
2714 }
2715 spin_unlock(&nm_i->nid_list_lock);
2716
2717 /* Let's scan nat pages and its caches to get free nids */
2718 if (!f2fs_build_free_nids(sbi, true, false))
2719 goto retry;
2720 return false;
2721 }
2722
2723 /*
2724 * f2fs_alloc_nid() should be called prior to this function.
2725 */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)2726 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2727 {
2728 struct f2fs_nm_info *nm_i = NM_I(sbi);
2729 struct free_nid *i;
2730
2731 spin_lock(&nm_i->nid_list_lock);
2732 i = __lookup_free_nid_list(nm_i, nid);
2733 f2fs_bug_on(sbi, !i);
2734 __remove_free_nid(sbi, i, PREALLOC_NID);
2735 spin_unlock(&nm_i->nid_list_lock);
2736
2737 kmem_cache_free(free_nid_slab, i);
2738 }
2739
2740 /*
2741 * f2fs_alloc_nid() should be called prior to this function.
2742 */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)2743 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2744 {
2745 struct f2fs_nm_info *nm_i = NM_I(sbi);
2746 struct free_nid *i;
2747 bool need_free = false;
2748
2749 if (!nid)
2750 return;
2751
2752 spin_lock(&nm_i->nid_list_lock);
2753 i = __lookup_free_nid_list(nm_i, nid);
2754 f2fs_bug_on(sbi, !i);
2755
2756 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2757 __remove_free_nid(sbi, i, PREALLOC_NID);
2758 need_free = true;
2759 } else {
2760 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2761 }
2762
2763 nm_i->available_nids++;
2764
2765 update_free_nid_bitmap(sbi, nid, true, false);
2766
2767 spin_unlock(&nm_i->nid_list_lock);
2768
2769 if (need_free)
2770 kmem_cache_free(free_nid_slab, i);
2771 }
2772
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)2773 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2774 {
2775 struct f2fs_nm_info *nm_i = NM_I(sbi);
2776 int nr = nr_shrink;
2777
2778 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2779 return 0;
2780
2781 if (!mutex_trylock(&nm_i->build_lock))
2782 return 0;
2783
2784 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2785 struct free_nid *i, *next;
2786 unsigned int batch = SHRINK_NID_BATCH_SIZE;
2787
2788 spin_lock(&nm_i->nid_list_lock);
2789 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2790 if (!nr_shrink || !batch ||
2791 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2792 break;
2793 __remove_free_nid(sbi, i, FREE_NID);
2794 kmem_cache_free(free_nid_slab, i);
2795 nr_shrink--;
2796 batch--;
2797 }
2798 spin_unlock(&nm_i->nid_list_lock);
2799 }
2800
2801 mutex_unlock(&nm_i->build_lock);
2802
2803 return nr - nr_shrink;
2804 }
2805
f2fs_recover_inline_xattr(struct inode * inode,struct folio * folio)2806 int f2fs_recover_inline_xattr(struct inode *inode, struct folio *folio)
2807 {
2808 void *src_addr, *dst_addr;
2809 size_t inline_size;
2810 struct folio *ifolio;
2811 struct f2fs_inode *ri;
2812
2813 ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino);
2814 if (IS_ERR(ifolio))
2815 return PTR_ERR(ifolio);
2816
2817 ri = F2FS_INODE(folio);
2818 if (ri->i_inline & F2FS_INLINE_XATTR) {
2819 if (!f2fs_has_inline_xattr(inode)) {
2820 set_inode_flag(inode, FI_INLINE_XATTR);
2821 stat_inc_inline_xattr(inode);
2822 }
2823 } else {
2824 if (f2fs_has_inline_xattr(inode)) {
2825 stat_dec_inline_xattr(inode);
2826 clear_inode_flag(inode, FI_INLINE_XATTR);
2827 }
2828 goto update_inode;
2829 }
2830
2831 dst_addr = inline_xattr_addr(inode, ifolio);
2832 src_addr = inline_xattr_addr(inode, folio);
2833 inline_size = inline_xattr_size(inode);
2834
2835 f2fs_folio_wait_writeback(ifolio, NODE, true, true);
2836 memcpy(dst_addr, src_addr, inline_size);
2837 update_inode:
2838 f2fs_update_inode(inode, ifolio);
2839 f2fs_folio_put(ifolio, true);
2840 return 0;
2841 }
2842
f2fs_recover_xattr_data(struct inode * inode,struct folio * folio)2843 int f2fs_recover_xattr_data(struct inode *inode, struct folio *folio)
2844 {
2845 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2846 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2847 nid_t new_xnid;
2848 struct dnode_of_data dn;
2849 struct node_info ni;
2850 struct folio *xfolio;
2851 int err;
2852
2853 if (!prev_xnid)
2854 goto recover_xnid;
2855
2856 /* 1: invalidate the previous xattr nid */
2857 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2858 if (err)
2859 return err;
2860
2861 f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
2862 dec_valid_node_count(sbi, inode, false);
2863 set_node_addr(sbi, &ni, NULL_ADDR, false);
2864
2865 recover_xnid:
2866 /* 2: update xattr nid in inode */
2867 if (!f2fs_alloc_nid(sbi, &new_xnid))
2868 return -ENOSPC;
2869
2870 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2871 xfolio = f2fs_new_node_folio(&dn, XATTR_NODE_OFFSET);
2872 if (IS_ERR(xfolio)) {
2873 f2fs_alloc_nid_failed(sbi, new_xnid);
2874 return PTR_ERR(xfolio);
2875 }
2876
2877 f2fs_alloc_nid_done(sbi, new_xnid);
2878 f2fs_update_inode_page(inode);
2879
2880 /* 3: update and set xattr node page dirty */
2881 if (folio) {
2882 memcpy(F2FS_NODE(xfolio), F2FS_NODE(folio),
2883 VALID_XATTR_BLOCK_SIZE);
2884 folio_mark_dirty(xfolio);
2885 }
2886 f2fs_folio_put(xfolio, true);
2887
2888 return 0;
2889 }
2890
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct folio * folio)2891 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct folio *folio)
2892 {
2893 struct f2fs_inode *src, *dst;
2894 nid_t ino = ino_of_node(folio);
2895 struct node_info old_ni, new_ni;
2896 struct folio *ifolio;
2897 int err;
2898
2899 err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2900 if (err)
2901 return err;
2902
2903 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2904 return -EINVAL;
2905 retry:
2906 ifolio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), ino, false);
2907 if (IS_ERR(ifolio)) {
2908 memalloc_retry_wait(GFP_NOFS);
2909 goto retry;
2910 }
2911
2912 /* Should not use this inode from free nid list */
2913 remove_free_nid(sbi, ino);
2914
2915 if (!folio_test_uptodate(ifolio))
2916 folio_mark_uptodate(ifolio);
2917 fill_node_footer(ifolio, ino, ino, 0, true);
2918 set_cold_node(ifolio, false);
2919
2920 src = F2FS_INODE(folio);
2921 dst = F2FS_INODE(ifolio);
2922
2923 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2924 dst->i_size = 0;
2925 dst->i_blocks = cpu_to_le64(1);
2926 dst->i_links = cpu_to_le32(1);
2927 dst->i_xattr_nid = 0;
2928 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2929 if (dst->i_inline & F2FS_EXTRA_ATTR) {
2930 dst->i_extra_isize = src->i_extra_isize;
2931
2932 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2933 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2934 i_inline_xattr_size))
2935 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2936
2937 if (f2fs_sb_has_project_quota(sbi) &&
2938 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2939 i_projid))
2940 dst->i_projid = src->i_projid;
2941
2942 if (f2fs_sb_has_inode_crtime(sbi) &&
2943 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2944 i_crtime_nsec)) {
2945 dst->i_crtime = src->i_crtime;
2946 dst->i_crtime_nsec = src->i_crtime_nsec;
2947 }
2948 }
2949
2950 new_ni = old_ni;
2951 new_ni.ino = ino;
2952
2953 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2954 WARN_ON(1);
2955 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2956 inc_valid_inode_count(sbi);
2957 folio_mark_dirty(ifolio);
2958 f2fs_folio_put(ifolio, true);
2959 return 0;
2960 }
2961
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)2962 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2963 unsigned int segno, struct f2fs_summary_block *sum)
2964 {
2965 struct f2fs_node *rn;
2966 struct f2fs_summary *sum_entry;
2967 block_t addr;
2968 int i, idx, last_offset, nrpages;
2969
2970 /* scan the node segment */
2971 last_offset = BLKS_PER_SEG(sbi);
2972 addr = START_BLOCK(sbi, segno);
2973 sum_entry = sum_entries(sum);
2974
2975 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2976 nrpages = bio_max_segs(last_offset - i);
2977
2978 /* readahead node pages */
2979 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2980
2981 for (idx = addr; idx < addr + nrpages; idx++) {
2982 struct folio *folio = f2fs_get_tmp_folio(sbi, idx);
2983
2984 if (IS_ERR(folio))
2985 return PTR_ERR(folio);
2986
2987 rn = F2FS_NODE(folio);
2988 sum_entry->nid = rn->footer.nid;
2989 sum_entry->version = 0;
2990 sum_entry->ofs_in_node = 0;
2991 sum_entry++;
2992 f2fs_folio_put(folio, true);
2993 }
2994
2995 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2996 addr + nrpages);
2997 }
2998 return 0;
2999 }
3000
remove_nats_in_journal(struct f2fs_sb_info * sbi)3001 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
3002 {
3003 struct f2fs_nm_info *nm_i = NM_I(sbi);
3004 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3005 struct f2fs_journal *journal = curseg->journal;
3006 int i;
3007 bool init_dirty;
3008
3009 down_write(&curseg->journal_rwsem);
3010 for (i = 0; i < nats_in_cursum(journal); i++) {
3011 struct nat_entry *ne;
3012 struct f2fs_nat_entry raw_ne;
3013 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
3014
3015 if (f2fs_check_nid_range(sbi, nid))
3016 continue;
3017
3018 init_dirty = false;
3019
3020 raw_ne = nat_in_journal(journal, i);
3021
3022 ne = __lookup_nat_cache(nm_i, nid, true);
3023 if (!ne) {
3024 init_dirty = true;
3025 ne = __alloc_nat_entry(sbi, nid, true);
3026 __init_nat_entry(nm_i, ne, &raw_ne, true, true);
3027 }
3028
3029 /*
3030 * if a free nat in journal has not been used after last
3031 * checkpoint, we should remove it from available nids,
3032 * since later we will add it again.
3033 */
3034 if (!get_nat_flag(ne, IS_DIRTY) &&
3035 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
3036 spin_lock(&nm_i->nid_list_lock);
3037 nm_i->available_nids--;
3038 spin_unlock(&nm_i->nid_list_lock);
3039 }
3040
3041 __set_nat_cache_dirty(nm_i, ne, init_dirty);
3042 }
3043 update_nats_in_cursum(journal, -i);
3044 up_write(&curseg->journal_rwsem);
3045 }
3046
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)3047 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
3048 struct list_head *head, int max)
3049 {
3050 struct nat_entry_set *cur;
3051
3052 if (nes->entry_cnt >= max)
3053 goto add_out;
3054
3055 list_for_each_entry(cur, head, set_list) {
3056 if (cur->entry_cnt >= nes->entry_cnt) {
3057 list_add(&nes->set_list, cur->set_list.prev);
3058 return;
3059 }
3060 }
3061 add_out:
3062 list_add_tail(&nes->set_list, head);
3063 }
3064
__update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,const struct f2fs_nat_block * nat_blk)3065 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
3066 const struct f2fs_nat_block *nat_blk)
3067 {
3068 struct f2fs_nm_info *nm_i = NM_I(sbi);
3069 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
3070 int valid = 0;
3071 int i = 0;
3072
3073 if (!enabled_nat_bits(sbi, NULL))
3074 return;
3075
3076 if (nat_index == 0) {
3077 valid = 1;
3078 i = 1;
3079 }
3080 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
3081 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
3082 valid++;
3083 }
3084 if (valid == 0) {
3085 __set_bit_le(nat_index, nm_i->empty_nat_bits);
3086 __clear_bit_le(nat_index, nm_i->full_nat_bits);
3087 return;
3088 }
3089
3090 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
3091 if (valid == NAT_ENTRY_PER_BLOCK)
3092 __set_bit_le(nat_index, nm_i->full_nat_bits);
3093 else
3094 __clear_bit_le(nat_index, nm_i->full_nat_bits);
3095 }
3096
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)3097 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
3098 struct nat_entry_set *set, struct cp_control *cpc)
3099 {
3100 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3101 struct f2fs_journal *journal = curseg->journal;
3102 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3103 bool to_journal = true;
3104 struct f2fs_nat_block *nat_blk;
3105 struct nat_entry *ne, *cur;
3106 struct folio *folio = NULL;
3107
3108 /*
3109 * there are two steps to flush nat entries:
3110 * #1, flush nat entries to journal in current hot data summary block.
3111 * #2, flush nat entries to nat page.
3112 */
3113 if (enabled_nat_bits(sbi, cpc) ||
3114 !__has_cursum_space(sbi, journal, set->entry_cnt, NAT_JOURNAL))
3115 to_journal = false;
3116
3117 if (to_journal) {
3118 down_write(&curseg->journal_rwsem);
3119 } else {
3120 folio = get_next_nat_folio(sbi, start_nid);
3121 if (IS_ERR(folio))
3122 return PTR_ERR(folio);
3123
3124 nat_blk = folio_address(folio);
3125 f2fs_bug_on(sbi, !nat_blk);
3126 }
3127
3128 /* flush dirty nats in nat entry set */
3129 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3130 struct f2fs_nat_entry *raw_ne;
3131 nid_t nid = nat_get_nid(ne);
3132 int offset;
3133
3134 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3135
3136 if (to_journal) {
3137 offset = f2fs_lookup_journal_in_cursum(sbi, journal,
3138 NAT_JOURNAL, nid, 1);
3139 f2fs_bug_on(sbi, offset < 0);
3140 raw_ne = &nat_in_journal(journal, offset);
3141 nid_in_journal(journal, offset) = cpu_to_le32(nid);
3142 } else {
3143 raw_ne = &nat_blk->entries[nid - start_nid];
3144 }
3145 raw_nat_from_node_info(raw_ne, &ne->ni);
3146 nat_reset_flag(ne);
3147 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
3148 if (nat_get_blkaddr(ne) == NULL_ADDR) {
3149 add_free_nid(sbi, nid, false, true);
3150 } else {
3151 spin_lock(&NM_I(sbi)->nid_list_lock);
3152 update_free_nid_bitmap(sbi, nid, false, false);
3153 spin_unlock(&NM_I(sbi)->nid_list_lock);
3154 }
3155 }
3156
3157 if (to_journal) {
3158 up_write(&curseg->journal_rwsem);
3159 } else {
3160 __update_nat_bits(sbi, start_nid, nat_blk);
3161 f2fs_folio_put(folio, true);
3162 }
3163
3164 /* Allow dirty nats by node block allocation in write_begin */
3165 if (!set->entry_cnt) {
3166 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3167 kmem_cache_free(nat_entry_set_slab, set);
3168 }
3169 return 0;
3170 }
3171
3172 /*
3173 * This function is called during the checkpointing process.
3174 */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3175 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3176 {
3177 struct f2fs_nm_info *nm_i = NM_I(sbi);
3178 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3179 struct f2fs_journal *journal = curseg->journal;
3180 struct nat_entry_set *setvec[NAT_VEC_SIZE];
3181 struct nat_entry_set *set, *tmp;
3182 unsigned int found, entry_count = 0;
3183 nid_t set_idx = 0;
3184 LIST_HEAD(sets);
3185 int err = 0;
3186
3187 /*
3188 * during unmount, let's flush nat_bits before checking
3189 * nat_cnt[DIRTY_NAT].
3190 */
3191 if (enabled_nat_bits(sbi, cpc)) {
3192 f2fs_down_write(&nm_i->nat_tree_lock);
3193 remove_nats_in_journal(sbi);
3194 f2fs_up_write(&nm_i->nat_tree_lock);
3195 }
3196
3197 if (!nm_i->nat_cnt[DIRTY_NAT])
3198 return 0;
3199
3200 f2fs_down_write(&nm_i->nat_tree_lock);
3201
3202 /*
3203 * if there are no enough space in journal to store dirty nat
3204 * entries, remove all entries from journal and merge them
3205 * into nat entry set.
3206 */
3207 if (enabled_nat_bits(sbi, cpc) ||
3208 !__has_cursum_space(sbi, journal,
3209 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3210 remove_nats_in_journal(sbi);
3211
3212 while ((found = __gang_lookup_nat_set(nm_i,
3213 set_idx, NAT_VEC_SIZE, setvec))) {
3214 unsigned idx;
3215
3216 set_idx = setvec[found - 1]->set + 1;
3217 for (idx = 0; idx < found; idx++)
3218 __adjust_nat_entry_set(setvec[idx], &sets,
3219 MAX_NAT_JENTRIES(sbi, journal));
3220 }
3221
3222 /*
3223 * Readahead the current NAT block to prevent read requests from
3224 * being issued and waited on one by one.
3225 */
3226 list_for_each_entry(set, &sets, set_list) {
3227 entry_count += set->entry_cnt;
3228 if (!enabled_nat_bits(sbi, cpc) &&
3229 __has_cursum_space(sbi, journal,
3230 entry_count, NAT_JOURNAL))
3231 continue;
3232 f2fs_ra_meta_pages(sbi, set->set, 1, META_NAT, true);
3233 }
3234 /* flush dirty nats in nat entry set */
3235 list_for_each_entry_safe(set, tmp, &sets, set_list) {
3236 err = __flush_nat_entry_set(sbi, set, cpc);
3237 if (err)
3238 break;
3239 }
3240
3241 f2fs_up_write(&nm_i->nat_tree_lock);
3242 /* Allow dirty nats by node block allocation in write_begin */
3243
3244 return err;
3245 }
3246
__get_nat_bitmaps(struct f2fs_sb_info * sbi)3247 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3248 {
3249 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3250 struct f2fs_nm_info *nm_i = NM_I(sbi);
3251 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3252 unsigned int i;
3253 __u64 cp_ver = cur_cp_version(ckpt);
3254 block_t nat_bits_addr;
3255
3256 if (!enabled_nat_bits(sbi, NULL))
3257 return 0;
3258
3259 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3260 nm_i->nat_bits = f2fs_kvzalloc(sbi,
3261 F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
3262 if (!nm_i->nat_bits)
3263 return -ENOMEM;
3264
3265 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
3266 nm_i->nat_bits_blocks;
3267 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3268 struct folio *folio;
3269
3270 folio = f2fs_get_meta_folio(sbi, nat_bits_addr++);
3271 if (IS_ERR(folio))
3272 return PTR_ERR(folio);
3273
3274 memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
3275 folio_address(folio), F2FS_BLKSIZE);
3276 f2fs_folio_put(folio, true);
3277 }
3278
3279 cp_ver |= (cur_cp_crc(ckpt) << 32);
3280 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3281 disable_nat_bits(sbi, true);
3282 return 0;
3283 }
3284
3285 nm_i->full_nat_bits = nm_i->nat_bits + 8;
3286 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3287
3288 f2fs_notice(sbi, "Found nat_bits in checkpoint");
3289 return 0;
3290 }
3291
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3292 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3293 {
3294 struct f2fs_nm_info *nm_i = NM_I(sbi);
3295 unsigned int i = 0;
3296 nid_t nid, last_nid;
3297
3298 if (!enabled_nat_bits(sbi, NULL))
3299 return;
3300
3301 for (i = 0; i < nm_i->nat_blocks; i++) {
3302 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3303 if (i >= nm_i->nat_blocks)
3304 break;
3305
3306 __set_bit_le(i, nm_i->nat_block_bitmap);
3307
3308 nid = i * NAT_ENTRY_PER_BLOCK;
3309 last_nid = nid + NAT_ENTRY_PER_BLOCK;
3310
3311 spin_lock(&NM_I(sbi)->nid_list_lock);
3312 for (; nid < last_nid; nid++)
3313 update_free_nid_bitmap(sbi, nid, true, true);
3314 spin_unlock(&NM_I(sbi)->nid_list_lock);
3315 }
3316
3317 for (i = 0; i < nm_i->nat_blocks; i++) {
3318 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3319 if (i >= nm_i->nat_blocks)
3320 break;
3321
3322 __set_bit_le(i, nm_i->nat_block_bitmap);
3323 }
3324 }
3325
init_node_manager(struct f2fs_sb_info * sbi)3326 static int init_node_manager(struct f2fs_sb_info *sbi)
3327 {
3328 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3329 struct f2fs_nm_info *nm_i = NM_I(sbi);
3330 unsigned char *version_bitmap;
3331 unsigned int nat_segs;
3332 int err;
3333
3334 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3335
3336 /* segment_count_nat includes pair segment so divide to 2. */
3337 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3338 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3339 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3340
3341 /* not used nids: 0, node, meta, (and root counted as valid node) */
3342 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3343 F2FS_RESERVED_NODE_NUM;
3344 nm_i->nid_cnt[FREE_NID] = 0;
3345 nm_i->nid_cnt[PREALLOC_NID] = 0;
3346 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3347 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3348 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3349 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3350
3351 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3352 INIT_LIST_HEAD(&nm_i->free_nid_list);
3353 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3354 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3355 INIT_LIST_HEAD(&nm_i->nat_entries);
3356 spin_lock_init(&nm_i->nat_list_lock);
3357
3358 mutex_init(&nm_i->build_lock);
3359 spin_lock_init(&nm_i->nid_list_lock);
3360 init_f2fs_rwsem(&nm_i->nat_tree_lock);
3361
3362 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3363 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3364 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3365 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3366 GFP_KERNEL);
3367 if (!nm_i->nat_bitmap)
3368 return -ENOMEM;
3369
3370 if (!test_opt(sbi, NAT_BITS))
3371 disable_nat_bits(sbi, true);
3372
3373 err = __get_nat_bitmaps(sbi);
3374 if (err)
3375 return err;
3376
3377 #ifdef CONFIG_F2FS_CHECK_FS
3378 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3379 GFP_KERNEL);
3380 if (!nm_i->nat_bitmap_mir)
3381 return -ENOMEM;
3382 #endif
3383
3384 return 0;
3385 }
3386
init_free_nid_cache(struct f2fs_sb_info * sbi)3387 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3388 {
3389 struct f2fs_nm_info *nm_i = NM_I(sbi);
3390 int i;
3391
3392 nm_i->free_nid_bitmap =
3393 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3394 nm_i->nat_blocks),
3395 GFP_KERNEL);
3396 if (!nm_i->free_nid_bitmap)
3397 return -ENOMEM;
3398
3399 for (i = 0; i < nm_i->nat_blocks; i++) {
3400 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3401 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3402 if (!nm_i->free_nid_bitmap[i])
3403 return -ENOMEM;
3404 }
3405
3406 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3407 GFP_KERNEL);
3408 if (!nm_i->nat_block_bitmap)
3409 return -ENOMEM;
3410
3411 nm_i->free_nid_count =
3412 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3413 nm_i->nat_blocks),
3414 GFP_KERNEL);
3415 if (!nm_i->free_nid_count)
3416 return -ENOMEM;
3417 return 0;
3418 }
3419
f2fs_build_node_manager(struct f2fs_sb_info * sbi)3420 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3421 {
3422 int err;
3423
3424 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3425 GFP_KERNEL);
3426 if (!sbi->nm_info)
3427 return -ENOMEM;
3428
3429 err = init_node_manager(sbi);
3430 if (err)
3431 return err;
3432
3433 err = init_free_nid_cache(sbi);
3434 if (err)
3435 return err;
3436
3437 /* load free nid status from nat_bits table */
3438 load_free_nid_bitmap(sbi);
3439
3440 return f2fs_build_free_nids(sbi, true, true);
3441 }
3442
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)3443 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3444 {
3445 struct f2fs_nm_info *nm_i = NM_I(sbi);
3446 struct free_nid *i, *next_i;
3447 void *vec[NAT_VEC_SIZE];
3448 struct nat_entry **natvec = (struct nat_entry **)vec;
3449 struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3450 nid_t nid = 0;
3451 unsigned int found;
3452
3453 if (!nm_i)
3454 return;
3455
3456 /* destroy free nid list */
3457 spin_lock(&nm_i->nid_list_lock);
3458 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3459 __remove_free_nid(sbi, i, FREE_NID);
3460 spin_unlock(&nm_i->nid_list_lock);
3461 kmem_cache_free(free_nid_slab, i);
3462 spin_lock(&nm_i->nid_list_lock);
3463 }
3464 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3465 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3466 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3467 spin_unlock(&nm_i->nid_list_lock);
3468
3469 /* destroy nat cache */
3470 f2fs_down_write(&nm_i->nat_tree_lock);
3471 while ((found = __gang_lookup_nat_cache(nm_i,
3472 nid, NAT_VEC_SIZE, natvec))) {
3473 unsigned idx;
3474
3475 nid = nat_get_nid(natvec[found - 1]) + 1;
3476 for (idx = 0; idx < found; idx++) {
3477 spin_lock(&nm_i->nat_list_lock);
3478 list_del(&natvec[idx]->list);
3479 spin_unlock(&nm_i->nat_list_lock);
3480
3481 __del_from_nat_cache(nm_i, natvec[idx]);
3482 }
3483 }
3484 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3485
3486 /* destroy nat set cache */
3487 nid = 0;
3488 memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
3489 while ((found = __gang_lookup_nat_set(nm_i,
3490 nid, NAT_VEC_SIZE, setvec))) {
3491 unsigned idx;
3492
3493 nid = setvec[found - 1]->set + 1;
3494 for (idx = 0; idx < found; idx++) {
3495 /* entry_cnt is not zero, when cp_error was occurred */
3496 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3497 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3498 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3499 }
3500 }
3501 f2fs_up_write(&nm_i->nat_tree_lock);
3502
3503 kvfree(nm_i->nat_block_bitmap);
3504 if (nm_i->free_nid_bitmap) {
3505 int i;
3506
3507 for (i = 0; i < nm_i->nat_blocks; i++)
3508 kvfree(nm_i->free_nid_bitmap[i]);
3509 kvfree(nm_i->free_nid_bitmap);
3510 }
3511 kvfree(nm_i->free_nid_count);
3512
3513 kfree(nm_i->nat_bitmap);
3514 kvfree(nm_i->nat_bits);
3515 #ifdef CONFIG_F2FS_CHECK_FS
3516 kfree(nm_i->nat_bitmap_mir);
3517 #endif
3518 sbi->nm_info = NULL;
3519 kfree(nm_i);
3520 }
3521
f2fs_create_node_manager_caches(void)3522 int __init f2fs_create_node_manager_caches(void)
3523 {
3524 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3525 sizeof(struct nat_entry));
3526 if (!nat_entry_slab)
3527 goto fail;
3528
3529 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3530 sizeof(struct free_nid));
3531 if (!free_nid_slab)
3532 goto destroy_nat_entry;
3533
3534 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3535 sizeof(struct nat_entry_set));
3536 if (!nat_entry_set_slab)
3537 goto destroy_free_nid;
3538
3539 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3540 sizeof(struct fsync_node_entry));
3541 if (!fsync_node_entry_slab)
3542 goto destroy_nat_entry_set;
3543 return 0;
3544
3545 destroy_nat_entry_set:
3546 kmem_cache_destroy(nat_entry_set_slab);
3547 destroy_free_nid:
3548 kmem_cache_destroy(free_nid_slab);
3549 destroy_nat_entry:
3550 kmem_cache_destroy(nat_entry_slab);
3551 fail:
3552 return -ENOMEM;
3553 }
3554
f2fs_destroy_node_manager_caches(void)3555 void f2fs_destroy_node_manager_caches(void)
3556 {
3557 kmem_cache_destroy(fsync_node_entry_slab);
3558 kmem_cache_destroy(nat_entry_set_slab);
3559 kmem_cache_destroy(free_nid_slab);
3560 kmem_cache_destroy(nat_entry_slab);
3561 }
3562