1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "gc.h"
23 #include "iostat.h"
24 #include <trace/events/f2fs.h>
25
26 static struct kmem_cache *victim_entry_slab;
27
28 static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
30
gc_thread_func(void * data)31 static int gc_thread_func(void *data)
32 {
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
37 unsigned int wait_ms;
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false,
42 .one_time = false };
43
44 wait_ms = gc_th->min_sleep_time;
45
46 set_freezable();
47 do {
48 bool sync_mode, foreground = false, gc_boost = false;
49
50 wait_event_freezable_timeout(*wq,
51 kthread_should_stop() ||
52 waitqueue_active(fggc_wq) ||
53 gc_th->gc_wake,
54 msecs_to_jiffies(wait_ms));
55
56 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq)) {
57 foreground = true;
58 gc_control.one_time = false;
59 } else if (f2fs_sb_has_blkzoned(sbi)) {
60 gc_control.one_time = true;
61 }
62
63 /* give it a try one time */
64 if (gc_th->gc_wake)
65 gc_th->gc_wake = false;
66
67 if (f2fs_readonly(sbi->sb)) {
68 stat_other_skip_bggc_count(sbi);
69 continue;
70 }
71 if (kthread_should_stop())
72 break;
73
74 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
75 increase_sleep_time(gc_th, &wait_ms);
76 stat_other_skip_bggc_count(sbi);
77 continue;
78 }
79
80 if (time_to_inject(sbi, FAULT_CHECKPOINT))
81 f2fs_stop_checkpoint(sbi, false,
82 STOP_CP_REASON_FAULT_INJECT);
83
84 if (!sb_start_write_trylock(sbi->sb)) {
85 stat_other_skip_bggc_count(sbi);
86 continue;
87 }
88
89 /*
90 * [GC triggering condition]
91 * 0. GC is not conducted currently.
92 * 1. There are enough dirty segments.
93 * 2. IO subsystem is idle by checking the # of writeback pages.
94 * 3. IO subsystem is idle by checking the # of requests in
95 * bdev's request list.
96 *
97 * Note) We have to avoid triggering GCs frequently.
98 * Because it is possible that some segments can be
99 * invalidated soon after by user update or deletion.
100 * So, I'd like to wait some time to collect dirty segments.
101 */
102 if (sbi->gc_mode == GC_URGENT_HIGH ||
103 sbi->gc_mode == GC_URGENT_MID) {
104 wait_ms = gc_th->urgent_sleep_time;
105 f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
106 goto do_gc;
107 }
108
109 if (foreground) {
110 f2fs_down_write_trace(&sbi->gc_lock, &gc_control.lc);
111 goto do_gc;
112 } else if (!f2fs_down_write_trylock_trace(&sbi->gc_lock,
113 &gc_control.lc)) {
114 stat_other_skip_bggc_count(sbi);
115 goto next;
116 }
117
118 if (!is_idle(sbi, GC_TIME)) {
119 increase_sleep_time(gc_th, &wait_ms);
120 f2fs_up_write_trace(&sbi->gc_lock, &gc_control.lc);
121 stat_io_skip_bggc_count(sbi);
122 goto next;
123 }
124
125 if (f2fs_sb_has_blkzoned(sbi)) {
126 if (has_enough_free_blocks(sbi,
127 gc_th->no_zoned_gc_percent)) {
128 wait_ms = gc_th->no_gc_sleep_time;
129 f2fs_up_write_trace(&sbi->gc_lock,
130 &gc_control.lc);
131 goto next;
132 }
133 if (wait_ms == gc_th->no_gc_sleep_time)
134 wait_ms = gc_th->max_sleep_time;
135 }
136
137 if (need_to_boost_gc(sbi)) {
138 decrease_sleep_time(gc_th, &wait_ms);
139 if (f2fs_sb_has_blkzoned(sbi))
140 gc_boost = true;
141 } else {
142 increase_sleep_time(gc_th, &wait_ms);
143 }
144 do_gc:
145 stat_inc_gc_call_count(sbi, foreground ?
146 FOREGROUND : BACKGROUND);
147
148 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
149 (gc_boost && gc_th->boost_gc_greedy);
150
151 /* foreground GC was been triggered via f2fs_balance_fs() */
152 if (foreground && !f2fs_sb_has_blkzoned(sbi))
153 sync_mode = false;
154
155 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
156 gc_control.no_bg_gc = foreground;
157 gc_control.nr_free_secs = foreground ? 1 : 0;
158
159 /* if return value is not zero, no victim was selected */
160 if (f2fs_gc(sbi, &gc_control)) {
161 /* don't bother wait_ms by foreground gc */
162 if (!foreground)
163 wait_ms = gc_th->no_gc_sleep_time;
164 } else {
165 /* reset wait_ms to default sleep time */
166 if (wait_ms == gc_th->no_gc_sleep_time)
167 wait_ms = gc_th->min_sleep_time;
168 }
169
170 if (foreground)
171 wake_up_all(&gc_th->fggc_wq);
172
173 trace_f2fs_background_gc(sbi->sb, wait_ms,
174 prefree_segments(sbi), free_segments(sbi));
175
176 /* balancing f2fs's metadata periodically */
177 f2fs_balance_fs_bg(sbi, true);
178 next:
179 if (sbi->gc_mode != GC_NORMAL) {
180 spin_lock(&sbi->gc_remaining_trials_lock);
181 if (sbi->gc_remaining_trials) {
182 sbi->gc_remaining_trials--;
183 if (!sbi->gc_remaining_trials)
184 sbi->gc_mode = GC_NORMAL;
185 }
186 spin_unlock(&sbi->gc_remaining_trials_lock);
187 }
188 sb_end_write(sbi->sb);
189
190 } while (!kthread_should_stop());
191 return 0;
192 }
193
f2fs_start_gc_thread(struct f2fs_sb_info * sbi)194 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
195 {
196 struct f2fs_gc_kthread *gc_th;
197 dev_t dev = sbi->sb->s_bdev->bd_dev;
198
199 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
200 if (!gc_th)
201 return -ENOMEM;
202
203 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
204 gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
205 gc_th->boost_gc_multiple = BOOST_GC_MULTIPLE;
206 gc_th->boost_gc_greedy = GC_GREEDY;
207
208 if (f2fs_sb_has_blkzoned(sbi)) {
209 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
210 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
211 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
212 gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
213 gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
214 } else {
215 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
216 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
217 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
218 gc_th->no_zoned_gc_percent = 0;
219 gc_th->boost_zoned_gc_percent = 0;
220 }
221
222 gc_th->gc_wake = false;
223
224 sbi->gc_thread = gc_th;
225 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
226 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
227 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
228 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
229 if (IS_ERR(gc_th->f2fs_gc_task)) {
230 int err = PTR_ERR(gc_th->f2fs_gc_task);
231
232 kfree(gc_th);
233 sbi->gc_thread = NULL;
234 return err;
235 }
236
237 set_user_nice(gc_th->f2fs_gc_task,
238 PRIO_TO_NICE(sbi->critical_task_priority));
239 return 0;
240 }
241
f2fs_stop_gc_thread(struct f2fs_sb_info * sbi)242 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
243 {
244 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
245
246 if (!gc_th)
247 return;
248 kthread_stop(gc_th->f2fs_gc_task);
249 wake_up_all(&gc_th->fggc_wq);
250 kfree(gc_th);
251 sbi->gc_thread = NULL;
252 }
253
select_gc_type(struct f2fs_sb_info * sbi,int gc_type)254 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
255 {
256 int gc_mode;
257
258 if (gc_type == BG_GC) {
259 if (sbi->am.atgc_enabled)
260 gc_mode = GC_AT;
261 else
262 gc_mode = GC_CB;
263 } else {
264 gc_mode = GC_GREEDY;
265 }
266
267 switch (sbi->gc_mode) {
268 case GC_IDLE_CB:
269 case GC_URGENT_LOW:
270 case GC_URGENT_MID:
271 gc_mode = GC_CB;
272 break;
273 case GC_IDLE_GREEDY:
274 case GC_URGENT_HIGH:
275 gc_mode = GC_GREEDY;
276 break;
277 case GC_IDLE_AT:
278 gc_mode = GC_AT;
279 break;
280 }
281
282 return gc_mode;
283 }
284
select_policy(struct f2fs_sb_info * sbi,int gc_type,int type,struct victim_sel_policy * p)285 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
286 int type, struct victim_sel_policy *p)
287 {
288 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
289
290 if (p->alloc_mode == SSR || p->alloc_mode == AT_SSR) {
291 p->gc_mode = GC_GREEDY;
292 p->dirty_bitmap = dirty_i->dirty_segmap[type];
293 p->max_search = dirty_i->nr_dirty[type];
294 p->ofs_unit = 1;
295 } else {
296 p->gc_mode = select_gc_type(sbi, gc_type);
297 p->ofs_unit = SEGS_PER_SEC(sbi);
298 if (__is_large_section(sbi)) {
299 p->dirty_bitmap = dirty_i->dirty_secmap;
300 p->max_search = count_bits(p->dirty_bitmap,
301 0, MAIN_SECS(sbi));
302 } else {
303 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
304 p->max_search = dirty_i->nr_dirty[DIRTY];
305 }
306 }
307
308 /*
309 * adjust candidates range, should select all dirty segments for
310 * foreground GC and urgent GC cases.
311 */
312 if (gc_type != FG_GC &&
313 (sbi->gc_mode != GC_URGENT_HIGH) &&
314 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
315 p->max_search > sbi->max_victim_search)
316 p->max_search = sbi->max_victim_search;
317
318 /* let's select beginning hot/small space first. */
319 if (f2fs_need_rand_seg(sbi))
320 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
321 SEGS_PER_SEC(sbi));
322 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
323 p->offset = 0;
324 else
325 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
326 }
327
get_max_cost(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)328 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
329 struct victim_sel_policy *p)
330 {
331 /* SSR allocates in a segment unit */
332 if (p->alloc_mode == SSR)
333 return BLKS_PER_SEG(sbi);
334 else if (p->alloc_mode == AT_SSR)
335 return UINT_MAX;
336
337 /* LFS */
338 if (p->gc_mode == GC_GREEDY)
339 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
340 else if (p->gc_mode == GC_CB)
341 return UINT_MAX;
342 else if (p->gc_mode == GC_AT)
343 return UINT_MAX;
344 else /* No other gc_mode */
345 return 0;
346 }
347
check_bg_victims(struct f2fs_sb_info * sbi)348 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
349 {
350 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
351 unsigned int secno;
352
353 /*
354 * If the gc_type is FG_GC, we can select victim segments
355 * selected by background GC before.
356 * Those segments guarantee they have small valid blocks.
357 */
358 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
359 if (sec_usage_check(sbi, secno))
360 continue;
361 clear_bit(secno, dirty_i->victim_secmap);
362 return GET_SEG_FROM_SEC(sbi, secno);
363 }
364 return NULL_SEGNO;
365 }
366
get_cb_cost(struct f2fs_sb_info * sbi,unsigned int segno)367 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
368 {
369 struct sit_info *sit_i = SIT_I(sbi);
370 unsigned long long mtime = 0;
371 unsigned int vblocks;
372 unsigned char age = 0;
373 unsigned char u;
374 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
375
376 mtime = f2fs_get_section_mtime(sbi, segno);
377 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
378 vblocks = get_valid_blocks(sbi, segno, true);
379 vblocks = div_u64(vblocks, usable_segs_per_sec);
380
381 u = BLKS_TO_SEGS(sbi, vblocks * 100);
382
383 /* Handle if the system time has changed by the user */
384 if (mtime < sit_i->min_mtime)
385 sit_i->min_mtime = mtime;
386 if (mtime > sit_i->max_mtime)
387 sit_i->max_mtime = mtime;
388 if (sit_i->max_mtime != sit_i->min_mtime)
389 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
390 sit_i->max_mtime - sit_i->min_mtime);
391
392 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
393 }
394
get_gc_cost(struct f2fs_sb_info * sbi,unsigned int segno,struct victim_sel_policy * p,unsigned int valid_thresh_ratio)395 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
396 unsigned int segno, struct victim_sel_policy *p,
397 unsigned int valid_thresh_ratio)
398 {
399 if (p->alloc_mode == SSR)
400 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
401
402 if (p->one_time_gc && (valid_thresh_ratio < 100) &&
403 (get_valid_blocks(sbi, segno, true) >=
404 CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100))
405 return UINT_MAX;
406
407 /* alloc_mode == LFS */
408 if (p->gc_mode == GC_GREEDY)
409 return get_valid_blocks(sbi, segno, true);
410 else if (p->gc_mode == GC_CB)
411 return get_cb_cost(sbi, segno);
412
413 f2fs_bug_on(sbi, 1);
414 return 0;
415 }
416
count_bits(const unsigned long * addr,unsigned int offset,unsigned int len)417 static unsigned int count_bits(const unsigned long *addr,
418 unsigned int offset, unsigned int len)
419 {
420 unsigned int end = offset + len, sum = 0;
421
422 while (offset < end) {
423 if (test_bit(offset++, addr))
424 ++sum;
425 }
426 return sum;
427 }
428
f2fs_check_victim_tree(struct f2fs_sb_info * sbi,struct rb_root_cached * root)429 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
430 struct rb_root_cached *root)
431 {
432 #ifdef CONFIG_F2FS_CHECK_FS
433 struct rb_node *cur = rb_first_cached(root), *next;
434 struct victim_entry *cur_ve, *next_ve;
435
436 while (cur) {
437 next = rb_next(cur);
438 if (!next)
439 return true;
440
441 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
442 next_ve = rb_entry(next, struct victim_entry, rb_node);
443
444 if (cur_ve->mtime > next_ve->mtime) {
445 f2fs_info(sbi, "broken victim_rbtree, "
446 "cur_mtime(%llu) next_mtime(%llu)",
447 cur_ve->mtime, next_ve->mtime);
448 return false;
449 }
450 cur = next;
451 }
452 #endif
453 return true;
454 }
455
__lookup_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime)456 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
457 unsigned long long mtime)
458 {
459 struct atgc_management *am = &sbi->am;
460 struct rb_node *node = am->root.rb_root.rb_node;
461 struct victim_entry *ve = NULL;
462
463 while (node) {
464 ve = rb_entry(node, struct victim_entry, rb_node);
465
466 if (mtime < ve->mtime)
467 node = node->rb_left;
468 else
469 node = node->rb_right;
470 }
471 return ve;
472 }
473
__create_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)474 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
475 unsigned long long mtime, unsigned int segno)
476 {
477 struct atgc_management *am = &sbi->am;
478 struct victim_entry *ve;
479
480 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
481
482 ve->mtime = mtime;
483 ve->segno = segno;
484
485 list_add_tail(&ve->list, &am->victim_list);
486 am->victim_count++;
487
488 return ve;
489 }
490
__insert_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)491 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
492 unsigned long long mtime, unsigned int segno)
493 {
494 struct atgc_management *am = &sbi->am;
495 struct rb_root_cached *root = &am->root;
496 struct rb_node **p = &root->rb_root.rb_node;
497 struct rb_node *parent = NULL;
498 struct victim_entry *ve;
499 bool left_most = true;
500
501 /* look up rb tree to find parent node */
502 while (*p) {
503 parent = *p;
504 ve = rb_entry(parent, struct victim_entry, rb_node);
505
506 if (mtime < ve->mtime) {
507 p = &(*p)->rb_left;
508 } else {
509 p = &(*p)->rb_right;
510 left_most = false;
511 }
512 }
513
514 ve = __create_victim_entry(sbi, mtime, segno);
515
516 rb_link_node(&ve->rb_node, parent, p);
517 rb_insert_color_cached(&ve->rb_node, root, left_most);
518 }
519
add_victim_entry(struct f2fs_sb_info * sbi,struct victim_sel_policy * p,unsigned int segno)520 static void add_victim_entry(struct f2fs_sb_info *sbi,
521 struct victim_sel_policy *p, unsigned int segno)
522 {
523 struct sit_info *sit_i = SIT_I(sbi);
524 unsigned long long mtime = 0;
525
526 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
527 if (p->gc_mode == GC_AT &&
528 get_valid_blocks(sbi, segno, true) == 0)
529 return;
530 }
531
532 mtime = f2fs_get_section_mtime(sbi, segno);
533 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
534
535 /* Handle if the system time has changed by the user */
536 if (mtime < sit_i->min_mtime)
537 sit_i->min_mtime = mtime;
538 if (mtime > sit_i->max_mtime)
539 sit_i->max_mtime = mtime;
540 if (mtime < sit_i->dirty_min_mtime)
541 sit_i->dirty_min_mtime = mtime;
542 if (mtime > sit_i->dirty_max_mtime)
543 sit_i->dirty_max_mtime = mtime;
544
545 /* don't choose young section as candidate */
546 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
547 return;
548
549 __insert_victim_entry(sbi, mtime, segno);
550 }
551
atgc_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)552 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
553 struct victim_sel_policy *p)
554 {
555 struct sit_info *sit_i = SIT_I(sbi);
556 struct atgc_management *am = &sbi->am;
557 struct rb_root_cached *root = &am->root;
558 struct rb_node *node;
559 struct victim_entry *ve;
560 unsigned long long total_time;
561 unsigned long long age, u, accu;
562 unsigned long long max_mtime = sit_i->dirty_max_mtime;
563 unsigned long long min_mtime = sit_i->dirty_min_mtime;
564 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
565 unsigned int vblocks;
566 unsigned int dirty_threshold = max(am->max_candidate_count,
567 am->candidate_ratio *
568 am->victim_count / 100);
569 unsigned int age_weight = am->age_weight;
570 unsigned int cost;
571 unsigned int iter = 0;
572
573 if (max_mtime < min_mtime)
574 return;
575
576 max_mtime += 1;
577 total_time = max_mtime - min_mtime;
578
579 accu = div64_u64(ULLONG_MAX, total_time);
580 accu = min_t(unsigned long long, div_u64(accu, 100),
581 DEFAULT_ACCURACY_CLASS);
582
583 node = rb_first_cached(root);
584 next:
585 ve = rb_entry_safe(node, struct victim_entry, rb_node);
586 if (!ve)
587 return;
588
589 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
590 goto skip;
591
592 /* age = 10000 * x% * 60 */
593 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
594 age_weight;
595
596 vblocks = get_valid_blocks(sbi, ve->segno, true);
597 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
598
599 /* u = 10000 * x% * 40 */
600 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
601 (100 - age_weight);
602
603 f2fs_bug_on(sbi, age + u >= UINT_MAX);
604
605 cost = UINT_MAX - (age + u);
606 iter++;
607
608 if (cost < p->min_cost ||
609 (cost == p->min_cost && age > p->oldest_age)) {
610 p->min_cost = cost;
611 p->oldest_age = age;
612 p->min_segno = ve->segno;
613 }
614 skip:
615 if (iter < dirty_threshold) {
616 node = rb_next(node);
617 goto next;
618 }
619 }
620
621 /*
622 * select candidates around source section in range of
623 * [target - dirty_threshold, target + dirty_threshold]
624 */
atssr_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)625 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
626 struct victim_sel_policy *p)
627 {
628 struct sit_info *sit_i = SIT_I(sbi);
629 struct atgc_management *am = &sbi->am;
630 struct victim_entry *ve;
631 unsigned long long age;
632 unsigned long long max_mtime = sit_i->dirty_max_mtime;
633 unsigned long long min_mtime = sit_i->dirty_min_mtime;
634 unsigned int vblocks;
635 unsigned int dirty_threshold = max(am->max_candidate_count,
636 am->candidate_ratio *
637 am->victim_count / 100);
638 unsigned int cost, iter;
639 int stage = 0;
640
641 if (max_mtime < min_mtime)
642 return;
643 max_mtime += 1;
644 next_stage:
645 iter = 0;
646 ve = __lookup_victim_entry(sbi, p->age);
647 next_node:
648 if (!ve) {
649 if (stage++ == 0)
650 goto next_stage;
651 return;
652 }
653
654 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
655 goto skip_node;
656
657 age = max_mtime - ve->mtime;
658
659 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
660 f2fs_bug_on(sbi, !vblocks);
661
662 /* rare case */
663 if (vblocks == BLKS_PER_SEG(sbi))
664 goto skip_node;
665
666 iter++;
667
668 age = max_mtime - abs(p->age - age);
669 cost = UINT_MAX - vblocks;
670
671 if (cost < p->min_cost ||
672 (cost == p->min_cost && age > p->oldest_age)) {
673 p->min_cost = cost;
674 p->oldest_age = age;
675 p->min_segno = ve->segno;
676 }
677 skip_node:
678 if (iter < dirty_threshold) {
679 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
680 rb_next(&ve->rb_node),
681 struct victim_entry, rb_node);
682 goto next_node;
683 }
684
685 if (stage++ == 0)
686 goto next_stage;
687 }
688
lookup_victim_by_age(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)689 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
690 struct victim_sel_policy *p)
691 {
692 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
693
694 if (p->gc_mode == GC_AT)
695 atgc_lookup_victim(sbi, p);
696 else if (p->alloc_mode == AT_SSR)
697 atssr_lookup_victim(sbi, p);
698 else
699 f2fs_bug_on(sbi, 1);
700 }
701
release_victim_entry(struct f2fs_sb_info * sbi)702 static void release_victim_entry(struct f2fs_sb_info *sbi)
703 {
704 struct atgc_management *am = &sbi->am;
705 struct victim_entry *ve, *tmp;
706
707 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
708 list_del(&ve->list);
709 kmem_cache_free(victim_entry_slab, ve);
710 am->victim_count--;
711 }
712
713 am->root = RB_ROOT_CACHED;
714
715 f2fs_bug_on(sbi, am->victim_count);
716 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
717 }
718
f2fs_pin_section(struct f2fs_sb_info * sbi,unsigned int segno)719 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
720 {
721 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
722 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
723
724 if (!dirty_i->enable_pin_section)
725 return false;
726 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
727 dirty_i->pinned_secmap_cnt++;
728 return true;
729 }
730
f2fs_pinned_section_exists(struct dirty_seglist_info * dirty_i)731 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
732 {
733 return dirty_i->pinned_secmap_cnt;
734 }
735
f2fs_section_is_pinned(struct dirty_seglist_info * dirty_i,unsigned int secno)736 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
737 unsigned int secno)
738 {
739 return dirty_i->enable_pin_section &&
740 f2fs_pinned_section_exists(dirty_i) &&
741 test_bit(secno, dirty_i->pinned_secmap);
742 }
743
f2fs_unpin_all_sections(struct f2fs_sb_info * sbi,bool enable)744 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
745 {
746 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
747
748 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
749 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
750 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
751 }
752 DIRTY_I(sbi)->enable_pin_section = enable;
753 }
754
f2fs_gc_pinned_control(struct inode * inode,int gc_type,unsigned int segno)755 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
756 unsigned int segno)
757 {
758 if (!f2fs_is_pinned_file(inode))
759 return 0;
760 if (gc_type != FG_GC)
761 return -EBUSY;
762 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
763 f2fs_pin_file_control(inode, true);
764 return -EAGAIN;
765 }
766
767 /*
768 * This function is called from two paths.
769 * One is garbage collection and the other is SSR segment selection.
770 * When it is called during GC, it just gets a victim segment
771 * and it does not remove it from dirty seglist.
772 * When it is called from SSR segment selection, it finds a segment
773 * which has minimum valid blocks and removes it from dirty seglist.
774 */
f2fs_get_victim(struct f2fs_sb_info * sbi,unsigned int * result,int gc_type,int type,char alloc_mode,unsigned long long age,bool one_time)775 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
776 int gc_type, int type, char alloc_mode,
777 unsigned long long age, bool one_time)
778 {
779 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
780 struct sit_info *sm = SIT_I(sbi);
781 struct victim_sel_policy p = {0};
782 unsigned int secno, last_victim;
783 unsigned int last_segment;
784 unsigned int nsearched;
785 unsigned int valid_thresh_ratio = 100;
786 bool is_atgc;
787 int ret = 0;
788
789 mutex_lock(&dirty_i->seglist_lock);
790 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
791
792 p.alloc_mode = alloc_mode;
793 p.age = age;
794 p.age_threshold = sbi->am.age_threshold;
795 if (one_time) {
796 p.one_time_gc = one_time;
797 if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG))
798 valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio;
799 }
800
801 retry:
802 select_policy(sbi, gc_type, type, &p);
803 p.min_segno = NULL_SEGNO;
804 p.oldest_age = 0;
805 p.min_cost = get_max_cost(sbi, &p);
806
807 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
808 nsearched = 0;
809
810 if (is_atgc)
811 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
812
813 if (*result != NULL_SEGNO) {
814 if (!get_valid_blocks(sbi, *result, false)) {
815 ret = -ENODATA;
816 goto out;
817 }
818
819 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
820 ret = -EBUSY;
821 goto out;
822 }
823 if (gc_type == FG_GC)
824 clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
825 p.min_segno = *result;
826 goto got_result;
827 }
828
829 ret = -ENODATA;
830 if (p.max_search == 0)
831 goto out;
832
833 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
834 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
835 p.min_segno = sbi->next_victim_seg[BG_GC];
836 *result = p.min_segno;
837 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
838 goto got_result;
839 }
840 if (gc_type == FG_GC &&
841 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
842 p.min_segno = sbi->next_victim_seg[FG_GC];
843 *result = p.min_segno;
844 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
845 goto got_result;
846 }
847 }
848
849 last_victim = sm->last_victim[p.gc_mode];
850 if (p.alloc_mode == LFS && gc_type == FG_GC) {
851 p.min_segno = check_bg_victims(sbi);
852 if (p.min_segno != NULL_SEGNO)
853 goto got_it;
854 }
855
856 while (1) {
857 unsigned long cost, *dirty_bitmap;
858 unsigned int unit_no, segno;
859
860 dirty_bitmap = p.dirty_bitmap;
861 unit_no = find_next_bit(dirty_bitmap,
862 last_segment / p.ofs_unit,
863 p.offset / p.ofs_unit);
864 segno = unit_no * p.ofs_unit;
865 if (segno >= last_segment) {
866 if (sm->last_victim[p.gc_mode]) {
867 last_segment =
868 sm->last_victim[p.gc_mode];
869 sm->last_victim[p.gc_mode] = 0;
870 p.offset = 0;
871 continue;
872 }
873 break;
874 }
875
876 p.offset = segno + p.ofs_unit;
877 nsearched++;
878
879 #ifdef CONFIG_F2FS_CHECK_FS
880 /*
881 * skip selecting the invalid segno (that is failed due to block
882 * validity check failure during GC) to avoid endless GC loop in
883 * such cases.
884 */
885 if (test_bit(segno, sm->invalid_segmap))
886 goto next;
887 #endif
888
889 secno = GET_SEC_FROM_SEG(sbi, segno);
890
891 if (sec_usage_check(sbi, secno))
892 goto next;
893
894 /* Don't touch checkpointed data */
895 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
896 if (p.alloc_mode == LFS) {
897 /*
898 * LFS is set to find source section during GC.
899 * The victim should have no checkpointed data.
900 */
901 if (get_ckpt_valid_blocks(sbi, segno, true))
902 goto next;
903 } else {
904 /*
905 * SSR | AT_SSR are set to find target segment
906 * for writes which can be full by checkpointed
907 * and newly written blocks.
908 */
909 if (!f2fs_segment_has_free_slot(sbi, segno))
910 goto next;
911 }
912 }
913
914 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
915 goto next;
916
917 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
918 goto next;
919
920 if (is_atgc) {
921 add_victim_entry(sbi, &p, segno);
922 goto next;
923 }
924
925 cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio);
926
927 if (p.min_cost > cost) {
928 p.min_segno = segno;
929 p.min_cost = cost;
930 }
931 next:
932 if (nsearched >= p.max_search) {
933 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
934 sm->last_victim[p.gc_mode] =
935 last_victim + p.ofs_unit;
936 else
937 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
938 sm->last_victim[p.gc_mode] %=
939 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
940 break;
941 }
942 }
943
944 /* get victim for GC_AT/AT_SSR */
945 if (is_atgc) {
946 lookup_victim_by_age(sbi, &p);
947 release_victim_entry(sbi);
948 }
949
950 if (is_atgc && p.min_segno == NULL_SEGNO &&
951 sm->elapsed_time < p.age_threshold) {
952 p.age_threshold = 0;
953 goto retry;
954 }
955
956 if (p.min_segno != NULL_SEGNO) {
957 got_it:
958 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
959 got_result:
960 if (p.alloc_mode == LFS) {
961 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
962 if (gc_type == FG_GC)
963 sbi->cur_victim_sec = secno;
964 else
965 set_bit(secno, dirty_i->victim_secmap);
966 }
967 ret = 0;
968
969 }
970 out:
971 if (p.min_segno != NULL_SEGNO)
972 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
973 sbi->cur_victim_sec,
974 prefree_segments(sbi), free_segments(sbi));
975 mutex_unlock(&dirty_i->seglist_lock);
976
977 return ret;
978 }
979
find_gc_inode(struct gc_inode_list * gc_list,nid_t ino)980 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
981 {
982 struct inode_entry *ie;
983
984 ie = radix_tree_lookup(&gc_list->iroot, ino);
985 if (ie)
986 return ie->inode;
987 return NULL;
988 }
989
add_gc_inode(struct gc_inode_list * gc_list,struct inode * inode)990 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
991 {
992 struct inode_entry *new_ie;
993
994 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
995 iput(inode);
996 return;
997 }
998 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
999 GFP_NOFS, true, NULL);
1000 new_ie->inode = inode;
1001
1002 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
1003 list_add_tail(&new_ie->list, &gc_list->ilist);
1004 }
1005
put_gc_inode(struct gc_inode_list * gc_list)1006 static void put_gc_inode(struct gc_inode_list *gc_list)
1007 {
1008 struct inode_entry *ie, *next_ie;
1009
1010 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
1011 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
1012 iput(ie->inode);
1013 list_del(&ie->list);
1014 kmem_cache_free(f2fs_inode_entry_slab, ie);
1015 }
1016 }
1017
check_valid_map(struct f2fs_sb_info * sbi,unsigned int segno,int offset)1018 static int check_valid_map(struct f2fs_sb_info *sbi,
1019 unsigned int segno, int offset)
1020 {
1021 struct sit_info *sit_i = SIT_I(sbi);
1022 struct seg_entry *sentry;
1023 int ret;
1024
1025 down_read(&sit_i->sentry_lock);
1026 sentry = get_seg_entry(sbi, segno);
1027 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
1028 up_read(&sit_i->sentry_lock);
1029 return ret;
1030 }
1031
1032 /*
1033 * This function compares node address got in summary with that in NAT.
1034 * On validity, copy that node with cold status, otherwise (invalid node)
1035 * ignore that.
1036 */
gc_node_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,unsigned int segno,int gc_type,struct blk_plug * plug)1037 static int gc_node_segment(struct f2fs_sb_info *sbi,
1038 struct f2fs_summary *sum, unsigned int segno, int gc_type,
1039 struct blk_plug *plug)
1040 {
1041 struct f2fs_summary *entry;
1042 block_t start_addr;
1043 int off;
1044 int phase = 0;
1045 bool fggc = (gc_type == FG_GC);
1046 int submitted = 0;
1047 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1048
1049 start_addr = START_BLOCK(sbi, segno);
1050
1051 next_step:
1052 entry = sum;
1053
1054 if (fggc && phase == 2)
1055 atomic_inc(&sbi->wb_sync_req[NODE]);
1056
1057 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1058 nid_t nid = le32_to_cpu(entry->nid);
1059 struct folio *node_folio;
1060 struct node_info ni;
1061 int err;
1062
1063 /* stop BG_GC if there is not enough free sections. */
1064 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1065 return submitted;
1066
1067 if (check_valid_map(sbi, segno, off) == 0)
1068 continue;
1069
1070 if (phase == 0) {
1071 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1072 META_NAT, true);
1073 continue;
1074 }
1075
1076 if (phase == 1) {
1077 f2fs_ra_node_page(sbi, nid);
1078 continue;
1079 }
1080
1081 /* phase == 2 */
1082 node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
1083 if (IS_ERR(node_folio))
1084 continue;
1085
1086 /* block may become invalid during f2fs_get_node_folio */
1087 if (check_valid_map(sbi, segno, off) == 0) {
1088 f2fs_folio_put(node_folio, true);
1089 continue;
1090 }
1091
1092 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1093 f2fs_folio_put(node_folio, true);
1094 continue;
1095 }
1096
1097 if (ni.blk_addr != start_addr + off) {
1098 f2fs_folio_put(node_folio, true);
1099 continue;
1100 }
1101
1102 err = f2fs_move_node_folio(node_folio, gc_type);
1103 if (!err && gc_type == FG_GC)
1104 submitted++;
1105 stat_inc_node_blk_count(sbi, 1, gc_type);
1106 }
1107
1108 if (++phase < 3) {
1109 blk_finish_plug(plug);
1110 blk_start_plug(plug);
1111 goto next_step;
1112 }
1113
1114 if (fggc)
1115 atomic_dec(&sbi->wb_sync_req[NODE]);
1116 return submitted;
1117 }
1118
1119 /*
1120 * Calculate start block index indicating the given node offset.
1121 * Be careful, caller should give this node offset only indicating direct node
1122 * blocks. If any node offsets, which point the other types of node blocks such
1123 * as indirect or double indirect node blocks, are given, it must be a caller's
1124 * bug.
1125 */
f2fs_start_bidx_of_node(unsigned int node_ofs,struct inode * inode)1126 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1127 {
1128 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1129 unsigned int bidx;
1130
1131 if (node_ofs == 0)
1132 return 0;
1133
1134 if (node_ofs <= 2) {
1135 bidx = node_ofs - 1;
1136 } else if (node_ofs <= indirect_blks) {
1137 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1138
1139 bidx = node_ofs - 2 - dec;
1140 } else {
1141 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1142
1143 bidx = node_ofs - 5 - dec;
1144 }
1145 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1146 }
1147
is_alive(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct node_info * dni,block_t blkaddr,unsigned int * nofs)1148 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1149 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1150 {
1151 struct folio *node_folio;
1152 nid_t nid;
1153 unsigned int ofs_in_node, max_addrs, base;
1154 block_t source_blkaddr;
1155
1156 nid = le32_to_cpu(sum->nid);
1157 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1158
1159 node_folio = f2fs_get_node_folio(sbi, nid, NODE_TYPE_REGULAR);
1160 if (IS_ERR(node_folio))
1161 return false;
1162
1163 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1164 f2fs_folio_put(node_folio, true);
1165 return false;
1166 }
1167
1168 if (sum->version != dni->version) {
1169 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1170 __func__);
1171 set_sbi_flag(sbi, SBI_NEED_FSCK);
1172 }
1173
1174 if (f2fs_check_nid_range(sbi, dni->ino)) {
1175 f2fs_folio_put(node_folio, true);
1176 return false;
1177 }
1178
1179 if (IS_INODE(node_folio)) {
1180 base = offset_in_addr(F2FS_INODE(node_folio));
1181 max_addrs = DEF_ADDRS_PER_INODE;
1182 } else {
1183 base = 0;
1184 max_addrs = DEF_ADDRS_PER_BLOCK;
1185 }
1186
1187 if (base + ofs_in_node >= max_addrs) {
1188 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1189 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1190 f2fs_folio_put(node_folio, true);
1191 return false;
1192 }
1193
1194 *nofs = ofs_of_node(node_folio);
1195 source_blkaddr = data_blkaddr(NULL, node_folio, ofs_in_node);
1196 f2fs_folio_put(node_folio, true);
1197
1198 if (source_blkaddr != blkaddr) {
1199 #ifdef CONFIG_F2FS_CHECK_FS
1200 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1201 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1202
1203 if (unlikely(check_valid_map(sbi, segno, offset))) {
1204 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1205 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1206 blkaddr, source_blkaddr, segno);
1207 set_sbi_flag(sbi, SBI_NEED_FSCK);
1208 }
1209 }
1210 #endif
1211 return false;
1212 }
1213 return true;
1214 }
1215
ra_data_block(struct inode * inode,pgoff_t index)1216 static int ra_data_block(struct inode *inode, pgoff_t index)
1217 {
1218 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1219 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1220 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1221 struct dnode_of_data dn;
1222 struct folio *folio, *efolio;
1223 struct f2fs_io_info fio = {
1224 .sbi = sbi,
1225 .ino = inode->i_ino,
1226 .type = DATA,
1227 .temp = COLD,
1228 .op = REQ_OP_READ,
1229 .op_flags = 0,
1230 .encrypted_page = NULL,
1231 .in_list = 0,
1232 };
1233 int err;
1234
1235 folio = f2fs_grab_cache_folio(mapping, index, true);
1236 if (IS_ERR(folio))
1237 return PTR_ERR(folio);
1238
1239 if (f2fs_lookup_read_extent_cache_block(inode, index,
1240 &dn.data_blkaddr)) {
1241 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1242 DATA_GENERIC_ENHANCE_READ))) {
1243 err = -EFSCORRUPTED;
1244 goto put_folio;
1245 }
1246 goto got_it;
1247 }
1248
1249 set_new_dnode(&dn, inode, NULL, NULL, 0);
1250 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1251 if (err)
1252 goto put_folio;
1253 f2fs_put_dnode(&dn);
1254
1255 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1256 err = -ENOENT;
1257 goto put_folio;
1258 }
1259 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1260 DATA_GENERIC_ENHANCE))) {
1261 err = -EFSCORRUPTED;
1262 goto put_folio;
1263 }
1264 got_it:
1265 /* read folio */
1266 fio.folio = folio;
1267 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1268
1269 /*
1270 * don't cache encrypted data into meta inode until previous dirty
1271 * data were writebacked to avoid racing between GC and flush.
1272 */
1273 f2fs_folio_wait_writeback(folio, DATA, true, true);
1274
1275 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1276
1277 efolio = f2fs_filemap_get_folio(META_MAPPING(sbi), dn.data_blkaddr,
1278 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1279 if (IS_ERR(efolio)) {
1280 err = PTR_ERR(efolio);
1281 goto put_folio;
1282 }
1283
1284 fio.encrypted_page = &efolio->page;
1285
1286 err = f2fs_submit_page_bio(&fio);
1287 if (err)
1288 goto put_encrypted_page;
1289 f2fs_put_page(fio.encrypted_page, false);
1290 f2fs_folio_put(folio, true);
1291
1292 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1293 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1294
1295 return 0;
1296 put_encrypted_page:
1297 f2fs_put_page(fio.encrypted_page, true);
1298 put_folio:
1299 f2fs_folio_put(folio, true);
1300 return err;
1301 }
1302
1303 /*
1304 * Move data block via META_MAPPING while keeping locked data page.
1305 * This can be used to move blocks, aka LBAs, directly on disk.
1306 */
move_data_block(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1307 static int move_data_block(struct inode *inode, block_t bidx,
1308 int gc_type, unsigned int segno, int off)
1309 {
1310 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1311 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1312 struct f2fs_io_info fio = {
1313 .sbi = F2FS_I_SB(inode),
1314 .ino = inode->i_ino,
1315 .type = DATA,
1316 .temp = COLD,
1317 .op = REQ_OP_READ,
1318 .op_flags = 0,
1319 .encrypted_page = NULL,
1320 .in_list = 0,
1321 };
1322 struct dnode_of_data dn;
1323 struct f2fs_summary sum;
1324 struct node_info ni;
1325 struct folio *folio, *mfolio, *efolio;
1326 block_t newaddr;
1327 int err = 0;
1328 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1329 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1330 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1331 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1332
1333 /* do not read out */
1334 folio = f2fs_grab_cache_folio(mapping, bidx, false);
1335 if (IS_ERR(folio))
1336 return PTR_ERR(folio);
1337
1338 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1339 err = -ENOENT;
1340 goto out;
1341 }
1342
1343 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1344 if (err)
1345 goto out;
1346
1347 set_new_dnode(&dn, inode, NULL, NULL, 0);
1348 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1349 if (err)
1350 goto out;
1351
1352 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1353 folio_clear_uptodate(folio);
1354 err = -ENOENT;
1355 goto put_out;
1356 }
1357
1358 /*
1359 * don't cache encrypted data into meta inode until previous dirty
1360 * data were writebacked to avoid racing between GC and flush.
1361 */
1362 f2fs_folio_wait_writeback(folio, DATA, true, true);
1363
1364 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1365
1366 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1367 if (err)
1368 goto put_out;
1369
1370 /* read page */
1371 fio.folio = folio;
1372 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1373
1374 if (lfs_mode)
1375 f2fs_down_write(&fio.sbi->io_order_lock);
1376
1377 mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
1378 fio.old_blkaddr, false);
1379 if (IS_ERR(mfolio)) {
1380 err = PTR_ERR(mfolio);
1381 goto up_out;
1382 }
1383
1384 fio.encrypted_page = folio_file_page(mfolio, fio.old_blkaddr);
1385
1386 /* read source block in mfolio */
1387 if (!folio_test_uptodate(mfolio)) {
1388 err = f2fs_submit_page_bio(&fio);
1389 if (err) {
1390 f2fs_folio_put(mfolio, true);
1391 goto up_out;
1392 }
1393
1394 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1395 F2FS_BLKSIZE);
1396 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1397 F2FS_BLKSIZE);
1398
1399 folio_lock(mfolio);
1400 if (unlikely(!is_meta_folio(mfolio) ||
1401 !folio_test_uptodate(mfolio))) {
1402 err = -EIO;
1403 f2fs_folio_put(mfolio, true);
1404 goto up_out;
1405 }
1406 }
1407
1408 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1409
1410 /* allocate block address */
1411 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1412 &sum, type, NULL);
1413 if (err) {
1414 f2fs_folio_put(mfolio, true);
1415 /* filesystem should shutdown, no need to recovery block */
1416 goto up_out;
1417 }
1418
1419 efolio = f2fs_filemap_get_folio(META_MAPPING(fio.sbi), newaddr,
1420 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1421 if (IS_ERR(efolio)) {
1422 err = PTR_ERR(efolio);
1423 f2fs_folio_put(mfolio, true);
1424 goto recover_block;
1425 }
1426
1427 fio.encrypted_page = &efolio->page;
1428
1429 /* write target block */
1430 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1431 memcpy(page_address(fio.encrypted_page),
1432 folio_address(mfolio), PAGE_SIZE);
1433 f2fs_folio_put(mfolio, true);
1434
1435 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
1436
1437 set_page_dirty(fio.encrypted_page);
1438 if (clear_page_dirty_for_io(fio.encrypted_page))
1439 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1440
1441 set_page_writeback(fio.encrypted_page);
1442
1443 fio.op = REQ_OP_WRITE;
1444 fio.op_flags = REQ_SYNC;
1445 fio.new_blkaddr = newaddr;
1446 f2fs_submit_page_write(&fio);
1447
1448 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1449
1450 f2fs_update_data_blkaddr(&dn, newaddr);
1451 set_inode_flag(inode, FI_APPEND_WRITE);
1452
1453 f2fs_put_page(fio.encrypted_page, true);
1454 recover_block:
1455 if (err)
1456 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1457 true, true, true);
1458 up_out:
1459 if (lfs_mode)
1460 f2fs_up_write(&fio.sbi->io_order_lock);
1461 put_out:
1462 f2fs_put_dnode(&dn);
1463 out:
1464 if (!folio_test_uptodate(folio))
1465 __folio_set_dropbehind(folio);
1466 folio_unlock(folio);
1467 folio_end_dropbehind(folio);
1468 folio_put(folio);
1469 return err;
1470 }
1471
move_data_page(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1472 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1473 unsigned int segno, int off)
1474 {
1475 struct folio *folio;
1476 int err = 0;
1477
1478 folio = f2fs_get_lock_data_folio(inode, bidx, true);
1479 if (IS_ERR(folio))
1480 return PTR_ERR(folio);
1481
1482 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1483 err = -ENOENT;
1484 goto out;
1485 }
1486
1487 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1488 if (err)
1489 goto out;
1490
1491 if (gc_type == BG_GC) {
1492 if (folio_test_writeback(folio)) {
1493 err = -EAGAIN;
1494 goto out;
1495 }
1496 folio_mark_dirty(folio);
1497 folio_set_f2fs_gcing(folio);
1498 } else {
1499 struct f2fs_io_info fio = {
1500 .sbi = F2FS_I_SB(inode),
1501 .ino = inode->i_ino,
1502 .type = DATA,
1503 .temp = COLD,
1504 .op = REQ_OP_WRITE,
1505 .op_flags = REQ_SYNC,
1506 .old_blkaddr = NULL_ADDR,
1507 .folio = folio,
1508 .encrypted_page = NULL,
1509 .need_lock = LOCK_REQ,
1510 .io_type = FS_GC_DATA_IO,
1511 };
1512 bool is_dirty = folio_test_dirty(folio);
1513
1514 retry:
1515 f2fs_folio_wait_writeback(folio, DATA, true, true);
1516
1517 folio_mark_dirty(folio);
1518 if (folio_clear_dirty_for_io(folio)) {
1519 inode_dec_dirty_pages(inode);
1520 f2fs_remove_dirty_inode(inode);
1521 }
1522
1523 folio_set_f2fs_gcing(folio);
1524
1525 err = f2fs_do_write_data_page(&fio);
1526 if (err) {
1527 folio_clear_f2fs_gcing(folio);
1528 if (err == -ENOMEM) {
1529 memalloc_retry_wait(GFP_NOFS);
1530 goto retry;
1531 }
1532 if (is_dirty)
1533 folio_mark_dirty(folio);
1534 }
1535 }
1536 out:
1537 f2fs_folio_put(folio, true);
1538 return err;
1539 }
1540
1541 /*
1542 * This function tries to get parent node of victim data block, and identifies
1543 * data block validity. If the block is valid, copy that with cold status and
1544 * modify parent node.
1545 * If the parent node is not valid or the data block address is different,
1546 * the victim data block is ignored.
1547 */
gc_data_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct gc_inode_list * gc_list,unsigned int segno,int gc_type,bool force_migrate,struct blk_plug * plug)1548 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1549 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1550 bool force_migrate, struct blk_plug *plug)
1551 {
1552 struct super_block *sb = sbi->sb;
1553 struct f2fs_summary *entry;
1554 block_t start_addr;
1555 int off;
1556 int phase = 0;
1557 int submitted = 0;
1558 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1559
1560 start_addr = START_BLOCK(sbi, segno);
1561
1562 next_step:
1563 entry = sum;
1564
1565 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1566 struct inode *inode;
1567 struct node_info dni; /* dnode info for the data */
1568 unsigned int ofs_in_node, nofs;
1569 block_t start_bidx;
1570 nid_t nid = le32_to_cpu(entry->nid);
1571
1572 /*
1573 * stop BG_GC if there is not enough free sections.
1574 * Or, stop GC if the segment becomes fully valid caused by
1575 * race condition along with SSR block allocation.
1576 */
1577 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1578 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1579 CAP_BLKS_PER_SEC(sbi)))
1580 return submitted;
1581
1582 if (check_valid_map(sbi, segno, off) == 0)
1583 continue;
1584
1585 if (phase == 0) {
1586 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1587 META_NAT, true);
1588 continue;
1589 }
1590
1591 if (phase == 1) {
1592 f2fs_ra_node_page(sbi, nid);
1593 continue;
1594 }
1595
1596 /* Get an inode by ino with checking validity */
1597 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1598 continue;
1599
1600 if (phase == 2) {
1601 f2fs_ra_node_page(sbi, dni.ino);
1602 continue;
1603 }
1604
1605 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1606
1607 if (phase == 3) {
1608 struct folio *data_folio;
1609 int err;
1610
1611 inode = f2fs_iget(sb, dni.ino);
1612 if (IS_ERR(inode))
1613 continue;
1614
1615 if (is_bad_inode(inode) ||
1616 special_file(inode->i_mode)) {
1617 iput(inode);
1618 continue;
1619 }
1620
1621 if (f2fs_has_inline_data(inode)) {
1622 iput(inode);
1623 set_sbi_flag(sbi, SBI_NEED_FSCK);
1624 f2fs_err_ratelimited(sbi,
1625 "inode %lx has both inline_data flag and "
1626 "data block, nid=%u, ofs_in_node=%u",
1627 inode->i_ino, dni.nid, ofs_in_node);
1628 continue;
1629 }
1630
1631 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1632 if (err == -EAGAIN) {
1633 iput(inode);
1634 return submitted;
1635 }
1636
1637 if (!f2fs_down_write_trylock(
1638 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1639 iput(inode);
1640 sbi->skipped_gc_rwsem++;
1641 continue;
1642 }
1643
1644 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1645 ofs_in_node;
1646
1647 if (f2fs_meta_inode_gc_required(inode)) {
1648 int err = ra_data_block(inode, start_bidx);
1649
1650 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1651 if (err) {
1652 iput(inode);
1653 continue;
1654 }
1655 add_gc_inode(gc_list, inode);
1656 continue;
1657 }
1658
1659 data_folio = f2fs_get_read_data_folio(inode, start_bidx,
1660 REQ_RAHEAD, true, NULL);
1661 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1662 if (IS_ERR(data_folio)) {
1663 iput(inode);
1664 continue;
1665 }
1666
1667 f2fs_folio_put(data_folio, false);
1668 add_gc_inode(gc_list, inode);
1669 continue;
1670 }
1671
1672 /* phase 4 */
1673 inode = find_gc_inode(gc_list, dni.ino);
1674 if (inode) {
1675 struct f2fs_inode_info *fi = F2FS_I(inode);
1676 bool locked = false;
1677 int err;
1678
1679 if (S_ISREG(inode->i_mode)) {
1680 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1681 sbi->skipped_gc_rwsem++;
1682 continue;
1683 }
1684 if (!f2fs_down_write_trylock(
1685 &fi->i_gc_rwsem[READ])) {
1686 sbi->skipped_gc_rwsem++;
1687 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1688 continue;
1689 }
1690 locked = true;
1691
1692 /* wait for all inflight aio data */
1693 inode_dio_wait(inode);
1694 }
1695
1696 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1697 + ofs_in_node;
1698 if (f2fs_meta_inode_gc_required(inode))
1699 err = move_data_block(inode, start_bidx,
1700 gc_type, segno, off);
1701 else
1702 err = move_data_page(inode, start_bidx, gc_type,
1703 segno, off);
1704
1705 if (!err && (gc_type == FG_GC ||
1706 f2fs_meta_inode_gc_required(inode)))
1707 submitted++;
1708
1709 if (locked) {
1710 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1711 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1712 }
1713
1714 stat_inc_data_blk_count(sbi, 1, gc_type);
1715 }
1716 }
1717
1718 if (++phase < 5) {
1719 blk_finish_plug(plug);
1720 blk_start_plug(plug);
1721 goto next_step;
1722 }
1723
1724 return submitted;
1725 }
1726
__get_victim(struct f2fs_sb_info * sbi,unsigned int * victim,int gc_type,bool one_time)1727 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1728 int gc_type, bool one_time)
1729 {
1730 struct sit_info *sit_i = SIT_I(sbi);
1731 int ret;
1732
1733 down_write(&sit_i->sentry_lock);
1734 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1735 LFS, 0, one_time);
1736 up_write(&sit_i->sentry_lock);
1737 return ret;
1738 }
1739
do_garbage_collect(struct f2fs_sb_info * sbi,unsigned int start_segno,struct gc_inode_list * gc_list,int gc_type,bool force_migrate,bool one_time)1740 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1741 unsigned int start_segno,
1742 struct gc_inode_list *gc_list, int gc_type,
1743 bool force_migrate, bool one_time)
1744 {
1745 struct blk_plug plug;
1746 unsigned int segno = start_segno;
1747 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1748 unsigned int sec_end_segno;
1749 int seg_freed = 0, migrated = 0;
1750 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1751 SUM_TYPE_DATA : SUM_TYPE_NODE;
1752 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1753 int submitted = 0, sum_blk_cnt;
1754
1755 if (__is_large_section(sbi)) {
1756 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1757
1758 /*
1759 * zone-capacity can be less than zone-size in zoned devices,
1760 * resulting in less than expected usable segments in the zone,
1761 * calculate the end segno in the zone which can be garbage
1762 * collected
1763 */
1764 if (f2fs_sb_has_blkzoned(sbi))
1765 sec_end_segno -= SEGS_PER_SEC(sbi) -
1766 f2fs_usable_segs_in_sec(sbi);
1767
1768 if (gc_type == BG_GC || one_time) {
1769 unsigned int window_granularity =
1770 sbi->migration_window_granularity;
1771
1772 if (f2fs_sb_has_blkzoned(sbi) &&
1773 !has_enough_free_blocks(sbi,
1774 sbi->gc_thread->boost_zoned_gc_percent))
1775 window_granularity *=
1776 sbi->gc_thread->boost_gc_multiple;
1777
1778 end_segno = start_segno + window_granularity;
1779 }
1780
1781 if (end_segno > sec_end_segno)
1782 end_segno = sec_end_segno;
1783 }
1784
1785 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1786
1787 segno = rounddown(segno, sbi->sums_per_block);
1788 sum_blk_cnt = DIV_ROUND_UP(end_segno - segno, sbi->sums_per_block);
1789 /* readahead multi ssa blocks those have contiguous address */
1790 if (__is_large_section(sbi))
1791 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1792 sum_blk_cnt, META_SSA, true);
1793
1794 /* reference all summary page */
1795 while (segno < end_segno) {
1796 struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno);
1797
1798 segno += sbi->sums_per_block;
1799 if (IS_ERR(sum_folio)) {
1800 int err = PTR_ERR(sum_folio);
1801
1802 end_segno = segno - sbi->sums_per_block;
1803 segno = rounddown(start_segno, sbi->sums_per_block);
1804 while (segno < end_segno) {
1805 sum_folio = filemap_get_folio(META_MAPPING(sbi),
1806 GET_SUM_BLOCK(sbi, segno));
1807 folio_put_refs(sum_folio, 2);
1808 segno += sbi->sums_per_block;
1809 }
1810 return err;
1811 }
1812 folio_unlock(sum_folio);
1813 }
1814
1815 blk_start_plug(&plug);
1816
1817 segno = start_segno;
1818 while (segno < end_segno) {
1819 unsigned int cur_segno;
1820
1821 /* find segment summary of victim */
1822 struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
1823 GET_SUM_BLOCK(sbi, segno));
1824 unsigned int block_end_segno = rounddown(segno, sbi->sums_per_block)
1825 + sbi->sums_per_block;
1826
1827 if (block_end_segno > end_segno)
1828 block_end_segno = end_segno;
1829
1830 if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
1831 f2fs_err(sbi, "%s: segment %u is used by log",
1832 __func__, segno);
1833 f2fs_bug_on(sbi, 1);
1834 goto next_block;
1835 }
1836
1837 if (!folio_test_uptodate(sum_folio) ||
1838 unlikely(f2fs_cp_error(sbi)))
1839 goto next_block;
1840
1841 for (cur_segno = segno; cur_segno < block_end_segno;
1842 cur_segno++) {
1843 struct f2fs_summary_block *sum;
1844
1845 if (get_valid_blocks(sbi, cur_segno, false) == 0)
1846 goto freed;
1847 if (gc_type == BG_GC && __is_large_section(sbi) &&
1848 migrated >= sbi->migration_granularity)
1849 continue;
1850
1851 sum = SUM_BLK_PAGE_ADDR(sbi, sum_folio, cur_segno);
1852 if (type != GET_SUM_TYPE(sum_footer(sbi, sum))) {
1853 f2fs_err(sbi, "Inconsistent segment (%u) type "
1854 "[%d, %d] in SSA and SIT",
1855 cur_segno, type,
1856 GET_SUM_TYPE(
1857 sum_footer(sbi, sum)));
1858 f2fs_stop_checkpoint(sbi, false,
1859 STOP_CP_REASON_CORRUPTED_SUMMARY);
1860 continue;
1861 }
1862
1863 /*
1864 * this is to avoid deadlock:
1865 * - lock_page(sum_page) - f2fs_replace_block
1866 * - check_valid_map() - down_write(sentry_lock)
1867 * - down_read(sentry_lock) - change_curseg()
1868 * - lock_page(sum_page)
1869 */
1870 if (type == SUM_TYPE_NODE)
1871 submitted += gc_node_segment(sbi, sum->entries,
1872 cur_segno, gc_type, &plug);
1873 else
1874 submitted += gc_data_segment(sbi, sum->entries,
1875 gc_list, cur_segno,
1876 gc_type, force_migrate, &plug);
1877
1878 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1879 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1880 migrated++;
1881
1882 freed:
1883 if (gc_type == FG_GC &&
1884 get_valid_blocks(sbi, cur_segno, false) == 0)
1885 seg_freed++;
1886
1887 if (__is_large_section(sbi))
1888 sbi->next_victim_seg[gc_type] =
1889 (cur_segno + 1 < sec_end_segno) ?
1890 cur_segno + 1 : NULL_SEGNO;
1891 }
1892 next_block:
1893 folio_put_refs(sum_folio, 2);
1894 segno = block_end_segno;
1895 }
1896
1897 if (submitted)
1898 f2fs_submit_merged_write(sbi, data_type);
1899
1900 blk_finish_plug(&plug);
1901
1902 if (migrated)
1903 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1904
1905 return seg_freed;
1906 }
1907
f2fs_gc(struct f2fs_sb_info * sbi,struct f2fs_gc_control * gc_control)1908 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1909 {
1910 int gc_type = gc_control->init_gc_type;
1911 unsigned int segno = gc_control->victim_segno;
1912 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1913 int ret = 0;
1914 struct cp_control cpc;
1915 struct gc_inode_list gc_list = {
1916 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1917 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1918 };
1919 unsigned int skipped_round = 0, round = 0;
1920 unsigned int upper_secs;
1921
1922 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1923 gc_control->nr_free_secs,
1924 get_pages(sbi, F2FS_DIRTY_NODES),
1925 get_pages(sbi, F2FS_DIRTY_DENTS),
1926 get_pages(sbi, F2FS_DIRTY_IMETA),
1927 free_sections(sbi),
1928 free_segments(sbi),
1929 reserved_segments(sbi),
1930 prefree_segments(sbi));
1931
1932 cpc.reason = __get_cp_reason(sbi);
1933 gc_more:
1934 sbi->skipped_gc_rwsem = 0;
1935 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1936 ret = -EINVAL;
1937 goto stop;
1938 }
1939 if (unlikely(f2fs_cp_error(sbi))) {
1940 ret = -EIO;
1941 goto stop;
1942 }
1943
1944 /* Let's run FG_GC, if we don't have enough space. */
1945 if (has_not_enough_free_secs(sbi, 0, 0)) {
1946 gc_type = FG_GC;
1947 gc_control->one_time = false;
1948
1949 /*
1950 * For example, if there are many prefree_segments below given
1951 * threshold, we can make them free by checkpoint. Then, we
1952 * secure free segments which doesn't need fggc any more.
1953 */
1954 if (prefree_segments(sbi)) {
1955 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1956 ret = f2fs_write_checkpoint(sbi, &cpc);
1957 if (ret)
1958 goto stop;
1959 /* Reset due to checkpoint */
1960 sec_freed = 0;
1961 }
1962 }
1963
1964 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1965 if (gc_type == BG_GC && gc_control->no_bg_gc) {
1966 ret = -EINVAL;
1967 goto stop;
1968 }
1969 retry:
1970 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
1971 if (ret) {
1972 /* allow to search victim from sections has pinned data */
1973 if (ret == -ENODATA && gc_type == FG_GC &&
1974 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1975 f2fs_unpin_all_sections(sbi, false);
1976 goto retry;
1977 }
1978 goto stop;
1979 }
1980
1981 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1982 gc_control->should_migrate_blocks,
1983 gc_control->one_time);
1984 if (seg_freed < 0)
1985 goto stop;
1986
1987 total_freed += seg_freed;
1988
1989 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
1990 sec_freed++;
1991 total_sec_freed++;
1992 }
1993
1994 if (gc_control->one_time)
1995 goto stop;
1996
1997 if (gc_type == FG_GC) {
1998 sbi->cur_victim_sec = NULL_SEGNO;
1999
2000 if (has_enough_free_secs(sbi, sec_freed, 0)) {
2001 if (!gc_control->no_bg_gc &&
2002 total_sec_freed < gc_control->nr_free_secs)
2003 goto go_gc_more;
2004 goto stop;
2005 }
2006 if (sbi->skipped_gc_rwsem)
2007 skipped_round++;
2008 round++;
2009 if (skipped_round > MAX_SKIP_GC_COUNT &&
2010 skipped_round * 2 >= round) {
2011 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2012 ret = f2fs_write_checkpoint(sbi, &cpc);
2013 goto stop;
2014 }
2015 } else if (has_enough_free_secs(sbi, 0, 0)) {
2016 goto stop;
2017 }
2018
2019 upper_secs = __get_secs_required(sbi);
2020
2021 /*
2022 * Write checkpoint to reclaim prefree segments.
2023 * We need more three extra sections for writer's data/node/dentry.
2024 */
2025 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
2026 prefree_segments(sbi)) {
2027 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2028 ret = f2fs_write_checkpoint(sbi, &cpc);
2029 if (ret)
2030 goto stop;
2031 /* Reset due to checkpoint */
2032 sec_freed = 0;
2033 }
2034 go_gc_more:
2035 segno = NULL_SEGNO;
2036 goto gc_more;
2037
2038 stop:
2039 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
2040 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
2041
2042 if (gc_type == FG_GC)
2043 f2fs_unpin_all_sections(sbi, true);
2044
2045 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
2046 get_pages(sbi, F2FS_DIRTY_NODES),
2047 get_pages(sbi, F2FS_DIRTY_DENTS),
2048 get_pages(sbi, F2FS_DIRTY_IMETA),
2049 free_sections(sbi),
2050 free_segments(sbi),
2051 reserved_segments(sbi),
2052 prefree_segments(sbi));
2053
2054 f2fs_up_write_trace(&sbi->gc_lock, &gc_control->lc);
2055
2056 put_gc_inode(&gc_list);
2057
2058 if (gc_control->err_gc_skipped && !ret)
2059 ret = total_sec_freed ? 0 : -EAGAIN;
2060 return ret;
2061 }
2062
f2fs_create_garbage_collection_cache(void)2063 int __init f2fs_create_garbage_collection_cache(void)
2064 {
2065 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2066 sizeof(struct victim_entry));
2067 return victim_entry_slab ? 0 : -ENOMEM;
2068 }
2069
f2fs_destroy_garbage_collection_cache(void)2070 void f2fs_destroy_garbage_collection_cache(void)
2071 {
2072 kmem_cache_destroy(victim_entry_slab);
2073 }
2074
init_atgc_management(struct f2fs_sb_info * sbi)2075 static void init_atgc_management(struct f2fs_sb_info *sbi)
2076 {
2077 struct atgc_management *am = &sbi->am;
2078
2079 if (test_opt(sbi, ATGC) &&
2080 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2081 am->atgc_enabled = true;
2082
2083 am->root = RB_ROOT_CACHED;
2084 INIT_LIST_HEAD(&am->victim_list);
2085 am->victim_count = 0;
2086
2087 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2088 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2089 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
2090 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
2091 }
2092
f2fs_build_gc_manager(struct f2fs_sb_info * sbi)2093 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2094 {
2095 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2096
2097 /* give warm/cold data area from slower device */
2098 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2099 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2100 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2101
2102 init_atgc_management(sbi);
2103 }
2104
f2fs_gc_range(struct f2fs_sb_info * sbi,unsigned int start_seg,unsigned int end_seg,bool dry_run,unsigned int dry_run_sections)2105 int f2fs_gc_range(struct f2fs_sb_info *sbi,
2106 unsigned int start_seg, unsigned int end_seg,
2107 bool dry_run, unsigned int dry_run_sections)
2108 {
2109 unsigned int segno;
2110 unsigned int gc_secs = dry_run_sections;
2111
2112 if (unlikely(f2fs_cp_error(sbi)))
2113 return -EIO;
2114
2115 stat_inc_gc_call_count(sbi, FOREGROUND);
2116 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2117 struct gc_inode_list gc_list = {
2118 .ilist = LIST_HEAD_INIT(gc_list.ilist),
2119 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2120 };
2121
2122 /*
2123 * avoid migrating empty section, as it can be allocated by
2124 * log in parallel.
2125 */
2126 if (!get_valid_blocks(sbi, segno, true))
2127 continue;
2128
2129 if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
2130 continue;
2131
2132 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2133 put_gc_inode(&gc_list);
2134
2135 if (!dry_run && get_valid_blocks(sbi, segno, true))
2136 return -EAGAIN;
2137 if (dry_run && dry_run_sections &&
2138 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2139 break;
2140
2141 if (fatal_signal_pending(current))
2142 return -ERESTARTSYS;
2143 }
2144
2145 return 0;
2146 }
2147
free_segment_range(struct f2fs_sb_info * sbi,unsigned int secs,bool dry_run)2148 static int free_segment_range(struct f2fs_sb_info *sbi,
2149 unsigned int secs, bool dry_run)
2150 {
2151 unsigned int next_inuse, start, end;
2152 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2153 int gc_mode, gc_type;
2154 int err = 0;
2155 int type;
2156
2157 /* Force block allocation for GC */
2158 MAIN_SECS(sbi) -= secs;
2159 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2160 end = MAIN_SEGS(sbi) - 1;
2161
2162 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2163 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2164 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2165 SIT_I(sbi)->last_victim[gc_mode] = 0;
2166
2167 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2168 if (sbi->next_victim_seg[gc_type] >= start)
2169 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2170 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2171
2172 /* Move out cursegs from the target range */
2173 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2174 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2175 if (err)
2176 goto out;
2177 }
2178
2179 /* do GC to move out valid blocks in the range */
2180 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2181 if (err || dry_run)
2182 goto out;
2183
2184 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2185 err = f2fs_write_checkpoint(sbi, &cpc);
2186 if (err)
2187 goto out;
2188
2189 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2190 if (next_inuse <= end) {
2191 f2fs_err(sbi, "segno %u should be free but still inuse!",
2192 next_inuse);
2193 f2fs_bug_on(sbi, 1);
2194 }
2195 out:
2196 MAIN_SECS(sbi) += secs;
2197 return err;
2198 }
2199
update_sb_metadata(struct f2fs_sb_info * sbi,int secs)2200 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2201 {
2202 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2203 int section_count;
2204 int segment_count;
2205 int segment_count_main;
2206 long long block_count;
2207 int segs = secs * SEGS_PER_SEC(sbi);
2208
2209 f2fs_down_write(&sbi->sb_lock);
2210
2211 section_count = le32_to_cpu(raw_sb->section_count);
2212 segment_count = le32_to_cpu(raw_sb->segment_count);
2213 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2214 block_count = le64_to_cpu(raw_sb->block_count);
2215
2216 raw_sb->section_count = cpu_to_le32(section_count + secs);
2217 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2218 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2219 raw_sb->block_count = cpu_to_le64(block_count +
2220 (long long)SEGS_TO_BLKS(sbi, segs));
2221 if (f2fs_is_multi_device(sbi)) {
2222 int last_dev = sbi->s_ndevs - 1;
2223 int dev_segs =
2224 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2225
2226 raw_sb->devs[last_dev].total_segments =
2227 cpu_to_le32(dev_segs + segs);
2228 }
2229
2230 f2fs_up_write(&sbi->sb_lock);
2231 }
2232
update_fs_metadata(struct f2fs_sb_info * sbi,int secs)2233 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2234 {
2235 int segs = secs * SEGS_PER_SEC(sbi);
2236 long long blks = SEGS_TO_BLKS(sbi, segs);
2237 long long user_block_count =
2238 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2239
2240 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2241 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2242 MAIN_SECS(sbi) += secs;
2243 if (sbi->allocate_section_hint > MAIN_SECS(sbi))
2244 sbi->allocate_section_hint = MAIN_SECS(sbi);
2245 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2246 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2247 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2248
2249 if (f2fs_is_multi_device(sbi)) {
2250 int last_dev = sbi->s_ndevs - 1;
2251
2252 sbi->allocate_section_hint = FDEV(0).total_segments /
2253 SEGS_PER_SEC(sbi);
2254
2255 FDEV(last_dev).total_segments =
2256 (int)FDEV(last_dev).total_segments + segs;
2257 FDEV(last_dev).end_blk =
2258 (long long)FDEV(last_dev).end_blk + blks;
2259 #ifdef CONFIG_BLK_DEV_ZONED
2260 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2261 div_u64(blks, sbi->blocks_per_blkz);
2262 #endif
2263 }
2264 }
2265
f2fs_resize_fs(struct file * filp,__u64 block_count)2266 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2267 {
2268 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2269 __u64 old_block_count, shrunk_blocks;
2270 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2271 struct f2fs_lock_context lc;
2272 struct f2fs_lock_context glc;
2273 struct f2fs_lock_context clc;
2274 unsigned int secs;
2275 int err = 0;
2276 __u32 rem;
2277
2278 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2279 if (block_count > old_block_count)
2280 return -EINVAL;
2281
2282 if (f2fs_is_multi_device(sbi)) {
2283 int last_dev = sbi->s_ndevs - 1;
2284 __u64 last_segs = FDEV(last_dev).total_segments;
2285
2286 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2287 old_block_count)
2288 return -EINVAL;
2289 }
2290
2291 /* new fs size should align to section size */
2292 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2293 if (rem)
2294 return -EINVAL;
2295
2296 if (block_count == old_block_count)
2297 return 0;
2298
2299 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2300 f2fs_err(sbi, "Should run fsck to repair first.");
2301 return -EFSCORRUPTED;
2302 }
2303
2304 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2305 f2fs_err(sbi, "Checkpoint should be enabled.");
2306 return -EINVAL;
2307 }
2308
2309 err = mnt_want_write_file(filp);
2310 if (err)
2311 return err;
2312
2313 shrunk_blocks = old_block_count - block_count;
2314 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2315
2316 /* stop other GC */
2317 if (!f2fs_down_write_trylock_trace(&sbi->gc_lock, &glc)) {
2318 err = -EAGAIN;
2319 goto out_drop_write;
2320 }
2321
2322 /* stop CP to protect MAIN_SEC in free_segment_range */
2323 f2fs_lock_op(sbi, &lc);
2324
2325 spin_lock(&sbi->stat_lock);
2326 if (shrunk_blocks + valid_user_blocks(sbi) +
2327 sbi->current_reserved_blocks + sbi->unusable_block_count +
2328 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2329 err = -ENOSPC;
2330 spin_unlock(&sbi->stat_lock);
2331
2332 if (err)
2333 goto out_unlock;
2334
2335 err = free_segment_range(sbi, secs, true);
2336
2337 out_unlock:
2338 f2fs_unlock_op(sbi, &lc);
2339 f2fs_up_write_trace(&sbi->gc_lock, &glc);
2340 out_drop_write:
2341 mnt_drop_write_file(filp);
2342 if (err)
2343 return err;
2344
2345 err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2346 if (err)
2347 return err;
2348
2349 if (f2fs_readonly(sbi->sb)) {
2350 err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2351 if (err)
2352 return err;
2353 return -EROFS;
2354 }
2355
2356 f2fs_down_write_trace(&sbi->gc_lock, &glc);
2357 f2fs_down_write_trace(&sbi->cp_global_sem, &clc);
2358
2359 spin_lock(&sbi->stat_lock);
2360 if (shrunk_blocks + valid_user_blocks(sbi) +
2361 sbi->current_reserved_blocks + sbi->unusable_block_count +
2362 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2363 err = -ENOSPC;
2364 else
2365 sbi->user_block_count -= shrunk_blocks;
2366 spin_unlock(&sbi->stat_lock);
2367 if (err)
2368 goto out_err;
2369
2370 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2371 err = free_segment_range(sbi, secs, false);
2372 if (err)
2373 goto recover_out;
2374
2375 update_sb_metadata(sbi, -secs);
2376
2377 err = f2fs_commit_super(sbi, false);
2378 if (err) {
2379 update_sb_metadata(sbi, secs);
2380 goto recover_out;
2381 }
2382
2383 update_fs_metadata(sbi, -secs);
2384 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2385 set_sbi_flag(sbi, SBI_IS_DIRTY);
2386
2387 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2388 err = f2fs_write_checkpoint(sbi, &cpc);
2389 if (err) {
2390 update_fs_metadata(sbi, secs);
2391 update_sb_metadata(sbi, secs);
2392 f2fs_commit_super(sbi, false);
2393 }
2394 recover_out:
2395 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2396 if (err) {
2397 set_sbi_flag(sbi, SBI_NEED_FSCK);
2398 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2399
2400 spin_lock(&sbi->stat_lock);
2401 sbi->user_block_count += shrunk_blocks;
2402 spin_unlock(&sbi->stat_lock);
2403 }
2404 out_err:
2405 f2fs_up_write_trace(&sbi->cp_global_sem, &clc);
2406 f2fs_up_write_trace(&sbi->gc_lock, &glc);
2407 thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2408 return err;
2409 }
2410