Lines Matching defs:sbi

33 	struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
62 if (f2fs_readonly(sbi->sb)) {
63 stat_other_skip_bggc_count(sbi);
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
71 stat_other_skip_bggc_count(sbi);
75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
76 f2fs_stop_checkpoint(sbi, false,
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
99 if (sbi->gc_mode == GC_URGENT_HIGH ||
100 sbi->gc_mode == GC_URGENT_MID) {
102 f2fs_down_write(&sbi->gc_lock);
107 f2fs_down_write(&sbi->gc_lock);
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
110 stat_other_skip_bggc_count(sbi);
114 if (!is_idle(sbi, GC_TIME)) {
116 f2fs_up_write(&sbi->gc_lock);
117 stat_io_skip_bggc_count(sbi);
121 if (f2fs_sb_has_blkzoned(sbi)) {
122 if (has_enough_free_blocks(sbi,
125 f2fs_up_write(&sbi->gc_lock);
132 if (need_to_boost_gc(sbi)) {
134 if (f2fs_sb_has_blkzoned(sbi))
140 stat_inc_gc_call_count(sbi, foreground ?
143 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
147 if (foreground && !f2fs_sb_has_blkzoned(sbi))
155 if (f2fs_gc(sbi, &gc_control)) {
168 trace_f2fs_background_gc(sbi->sb, wait_ms,
169 prefree_segments(sbi), free_segments(sbi));
172 f2fs_balance_fs_bg(sbi, true);
174 if (sbi->gc_mode != GC_NORMAL) {
175 spin_lock(&sbi->gc_remaining_trials_lock);
176 if (sbi->gc_remaining_trials) {
177 sbi->gc_remaining_trials--;
178 if (!sbi->gc_remaining_trials)
179 sbi->gc_mode = GC_NORMAL;
181 spin_unlock(&sbi->gc_remaining_trials_lock);
183 sb_end_write(sbi->sb);
189 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
192 dev_t dev = sbi->sb->s_bdev->bd_dev;
194 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
203 if (f2fs_sb_has_blkzoned(sbi)) {
219 sbi->gc_thread = gc_th;
220 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
221 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
222 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
228 sbi->gc_thread = NULL;
235 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
237 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
244 sbi->gc_thread = NULL;
247 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
252 if (sbi->am.atgc_enabled)
260 switch (sbi->gc_mode) {
278 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
281 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
289 p->gc_mode = select_gc_type(sbi, gc_type);
290 p->ofs_unit = SEGS_PER_SEC(sbi);
291 if (__is_large_section(sbi)) {
294 0, MAIN_SECS(sbi));
306 (sbi->gc_mode != GC_URGENT_HIGH) &&
308 p->max_search > sbi->max_victim_search)
309 p->max_search = sbi->max_victim_search;
312 if (f2fs_need_rand_seg(sbi))
313 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
314 SEGS_PER_SEC(sbi));
318 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
321 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
326 return BLKS_PER_SEG(sbi);
332 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
341 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
343 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
351 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
352 if (sec_usage_check(sbi, secno))
355 return GET_SEG_FROM_SEC(sbi, secno);
360 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
362 struct sit_info *sit_i = SIT_I(sbi);
367 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
369 mtime = f2fs_get_section_mtime(sbi, segno);
370 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
371 vblocks = get_valid_blocks(sbi, segno, true);
374 u = BLKS_TO_SEGS(sbi, vblocks * 100);
388 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
393 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
396 (get_valid_blocks(sbi, segno, true) >=
397 CAP_BLKS_PER_SEC(sbi) * valid_thresh_ratio / 100))
402 return get_valid_blocks(sbi, segno, true);
404 return get_cb_cost(sbi, segno);
406 f2fs_bug_on(sbi, 1);
422 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
438 f2fs_info(sbi, "broken victim_rbtree, "
449 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
452 struct atgc_management *am = &sbi->am;
467 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
470 struct atgc_management *am = &sbi->am;
484 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
487 struct atgc_management *am = &sbi->am;
507 ve = __create_victim_entry(sbi, mtime, segno);
513 static void add_victim_entry(struct f2fs_sb_info *sbi,
516 struct sit_info *sit_i = SIT_I(sbi);
519 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
521 get_valid_blocks(sbi, segno, true) == 0)
525 mtime = f2fs_get_section_mtime(sbi, segno);
526 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
542 __insert_victim_entry(sbi, mtime, segno);
545 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
548 struct sit_info *sit_i = SIT_I(sbi);
549 struct atgc_management *am = &sbi->am;
557 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
589 vblocks = get_valid_blocks(sbi, ve->segno, true);
590 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
596 f2fs_bug_on(sbi, age + u >= UINT_MAX);
618 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
621 struct sit_info *sit_i = SIT_I(sbi);
622 struct atgc_management *am = &sbi->am;
639 ve = __lookup_victim_entry(sbi, p->age);
652 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
653 f2fs_bug_on(sbi, !vblocks);
656 if (vblocks == BLKS_PER_SEG(sbi))
682 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
685 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
688 atgc_lookup_victim(sbi, p);
690 atssr_lookup_victim(sbi, p);
692 f2fs_bug_on(sbi, 1);
695 static void release_victim_entry(struct f2fs_sb_info *sbi)
697 struct atgc_management *am = &sbi->am;
708 f2fs_bug_on(sbi, am->victim_count);
709 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
712 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
714 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
715 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
737 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
739 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
741 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
742 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
743 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
745 DIRTY_I(sbi)->enable_pin_section = enable;
768 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
772 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
773 struct sit_info *sm = SIT_I(sbi);
783 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
787 p.age_threshold = sbi->am.age_threshold;
790 if (has_enough_free_secs(sbi, 0, NR_PERSISTENT_LOG))
791 valid_thresh_ratio = sbi->gc_thread->valid_thresh_ratio;
795 select_policy(sbi, gc_type, type, &p);
798 p.min_cost = get_max_cost(sbi, &p);
804 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
807 if (!get_valid_blocks(sbi, *result, false)) {
812 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) {
817 clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap);
826 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
827 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
828 p.min_segno = sbi->next_victim_seg[BG_GC];
830 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
834 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
835 p.min_segno = sbi->next_victim_seg[FG_GC];
837 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
844 p.min_segno = check_bg_victims(sbi);
882 secno = GET_SEC_FROM_SEG(sbi, segno);
884 if (sec_usage_check(sbi, secno))
888 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
894 if (get_ckpt_valid_blocks(sbi, segno, true))
902 if (!f2fs_segment_has_free_slot(sbi, segno))
914 add_victim_entry(sbi, &p, segno);
918 cost = get_gc_cost(sbi, segno, &p, valid_thresh_ratio);
932 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
939 lookup_victim_by_age(sbi, &p);
940 release_victim_entry(sbi);
954 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
956 sbi->cur_victim_sec = secno;
965 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
966 sbi->cur_victim_sec,
967 prefree_segments(sbi), free_segments(sbi));
1011 static int check_valid_map(struct f2fs_sb_info *sbi,
1014 struct sit_info *sit_i = SIT_I(sbi);
1019 sentry = get_seg_entry(sbi, segno);
1030 static int gc_node_segment(struct f2fs_sb_info *sbi,
1039 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1041 start_addr = START_BLOCK(sbi, segno);
1047 atomic_inc(&sbi->wb_sync_req[NODE]);
1056 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1059 if (check_valid_map(sbi, segno, off) == 0)
1063 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1069 f2fs_ra_node_page(sbi, nid);
1074 node_folio = f2fs_get_node_folio(sbi, nid);
1079 if (check_valid_map(sbi, segno, off) == 0) {
1084 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1097 stat_inc_node_blk_count(sbi, 1, gc_type);
1104 atomic_dec(&sbi->wb_sync_req[NODE]);
1137 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1148 node_folio = f2fs_get_node_folio(sbi, nid);
1152 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1158 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1160 set_sbi_flag(sbi, SBI_NEED_FSCK);
1163 if (f2fs_check_nid_range(sbi, dni->ino)) {
1177 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1189 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1190 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1192 if (unlikely(check_valid_map(sbi, segno, offset))) {
1193 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1194 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1196 set_sbi_flag(sbi, SBI_NEED_FSCK);
1207 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1213 .sbi = sbi,
1230 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1248 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1266 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1280 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1281 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1301 .sbi = F2FS_I_SB(inode),
1316 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1317 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1318 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1354 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1363 f2fs_down_write(&fio.sbi->io_order_lock);
1365 mfolio = f2fs_grab_cache_folio(META_MAPPING(fio.sbi),
1382 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1384 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1399 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1407 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1421 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1);
1425 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1434 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1442 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1446 f2fs_up_write(&fio.sbi->io_order_lock);
1482 .sbi = F2FS_I_SB(inode),
1530 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1534 struct super_block *sb = sbi->sb;
1540 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1542 start_addr = START_BLOCK(sbi, segno);
1559 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1560 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1561 CAP_BLKS_PER_SEC(sbi)))
1564 if (check_valid_map(sbi, segno, off) == 0)
1568 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1574 f2fs_ra_node_page(sbi, nid);
1579 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1583 f2fs_ra_node_page(sbi, dni.ino);
1605 set_sbi_flag(sbi, SBI_NEED_FSCK);
1606 f2fs_err_ratelimited(sbi,
1622 sbi->skipped_gc_rwsem++;
1663 sbi->skipped_gc_rwsem++;
1668 sbi->skipped_gc_rwsem++;
1696 stat_inc_data_blk_count(sbi, 1, gc_type);
1706 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1709 struct sit_info *sit_i = SIT_I(sbi);
1713 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1719 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1726 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1729 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1734 if (__is_large_section(sbi)) {
1735 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1743 if (f2fs_sb_has_blkzoned(sbi))
1744 sec_end_segno -= SEGS_PER_SEC(sbi) -
1745 f2fs_usable_segs_in_sec(sbi);
1749 sbi->migration_window_granularity;
1751 if (f2fs_sb_has_blkzoned(sbi) &&
1752 !has_enough_free_blocks(sbi,
1753 sbi->gc_thread->boost_zoned_gc_percent))
1755 sbi->gc_thread->boost_gc_multiple;
1764 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1767 if (__is_large_section(sbi))
1768 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1773 struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno++);
1779 sum_folio = filemap_get_folio(META_MAPPING(sbi),
1780 GET_SUM_BLOCK(sbi, segno));
1794 struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
1795 GET_SUM_BLOCK(sbi, segno));
1797 if (get_valid_blocks(sbi, segno, false) == 0)
1799 if (gc_type == BG_GC && __is_large_section(sbi) &&
1800 migrated >= sbi->migration_granularity)
1803 unlikely(f2fs_cp_error(sbi)))
1808 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1810 f2fs_stop_checkpoint(sbi, false,
1823 submitted += gc_node_segment(sbi, sum->entries, segno,
1826 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1830 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1831 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1836 get_valid_blocks(sbi, segno, false) == 0)
1839 if (__is_large_section(sbi))
1840 sbi->next_victim_seg[gc_type] =
1848 f2fs_submit_merged_write(sbi, data_type);
1853 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1858 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1872 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1874 get_pages(sbi, F2FS_DIRTY_NODES),
1875 get_pages(sbi, F2FS_DIRTY_DENTS),
1876 get_pages(sbi, F2FS_DIRTY_IMETA),
1877 free_sections(sbi),
1878 free_segments(sbi),
1879 reserved_segments(sbi),
1880 prefree_segments(sbi));
1882 cpc.reason = __get_cp_reason(sbi);
1884 sbi->skipped_gc_rwsem = 0;
1885 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1889 if (unlikely(f2fs_cp_error(sbi))) {
1895 if (has_not_enough_free_secs(sbi, 0, 0)) {
1904 if (prefree_segments(sbi)) {
1905 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1906 ret = f2fs_write_checkpoint(sbi, &cpc);
1920 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
1924 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1925 f2fs_unpin_all_sections(sbi, false);
1931 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1939 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
1948 sbi->cur_victim_sec = NULL_SEGNO;
1950 if (has_enough_free_secs(sbi, sec_freed, 0)) {
1956 if (sbi->skipped_gc_rwsem)
1961 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1962 ret = f2fs_write_checkpoint(sbi, &cpc);
1965 } else if (has_enough_free_secs(sbi, 0, 0)) {
1969 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1975 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1976 prefree_segments(sbi)) {
1977 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1978 ret = f2fs_write_checkpoint(sbi, &cpc);
1989 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1990 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1993 f2fs_unpin_all_sections(sbi, true);
1995 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1996 get_pages(sbi, F2FS_DIRTY_NODES),
1997 get_pages(sbi, F2FS_DIRTY_DENTS),
1998 get_pages(sbi, F2FS_DIRTY_IMETA),
1999 free_sections(sbi),
2000 free_segments(sbi),
2001 reserved_segments(sbi),
2002 prefree_segments(sbi));
2004 f2fs_up_write(&sbi->gc_lock);
2025 static void init_atgc_management(struct f2fs_sb_info *sbi)
2027 struct atgc_management *am = &sbi->am;
2029 if (test_opt(sbi, ATGC) &&
2030 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2043 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2045 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2048 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2049 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2050 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2052 init_atgc_management(sbi);
2055 int f2fs_gc_range(struct f2fs_sb_info *sbi,
2062 if (unlikely(f2fs_cp_error(sbi)))
2065 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2071 if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno)))
2074 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2077 if (!dry_run && get_valid_blocks(sbi, segno, true))
2080 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2090 static int free_segment_range(struct f2fs_sb_info *sbi,
2100 MAIN_SECS(sbi) -= secs;
2101 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2102 end = MAIN_SEGS(sbi) - 1;
2104 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2106 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2107 SIT_I(sbi)->last_victim[gc_mode] = 0;
2110 if (sbi->next_victim_seg[gc_type] >= start)
2111 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2112 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2116 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2122 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2126 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2127 err = f2fs_write_checkpoint(sbi, &cpc);
2131 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2133 f2fs_err(sbi, "segno %u should be free but still inuse!",
2135 f2fs_bug_on(sbi, 1);
2138 MAIN_SECS(sbi) += secs;
2142 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2144 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2149 int segs = secs * SEGS_PER_SEC(sbi);
2151 f2fs_down_write(&sbi->sb_lock);
2162 (long long)SEGS_TO_BLKS(sbi, segs));
2163 if (f2fs_is_multi_device(sbi)) {
2164 int last_dev = sbi->s_ndevs - 1;
2172 f2fs_up_write(&sbi->sb_lock);
2175 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2177 int segs = secs * SEGS_PER_SEC(sbi);
2178 long long blks = SEGS_TO_BLKS(sbi, segs);
2180 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2182 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2183 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2184 MAIN_SECS(sbi) += secs;
2185 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2186 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2187 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2189 if (f2fs_is_multi_device(sbi)) {
2190 int last_dev = sbi->s_ndevs - 1;
2198 div_u64(blks, sbi->blocks_per_blkz);
2205 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2212 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2216 if (f2fs_is_multi_device(sbi)) {
2217 int last_dev = sbi->s_ndevs - 1;
2220 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2226 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2233 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2234 f2fs_err(sbi, "Should run fsck to repair first.");
2238 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2239 f2fs_err(sbi, "Checkpoint should be enabled.");
2248 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2251 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2257 f2fs_lock_op(sbi);
2259 spin_lock(&sbi->stat_lock);
2260 if (shrunk_blocks + valid_user_blocks(sbi) +
2261 sbi->current_reserved_blocks + sbi->unusable_block_count +
2262 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2264 spin_unlock(&sbi->stat_lock);
2269 err = free_segment_range(sbi, secs, true);
2272 f2fs_unlock_op(sbi);
2273 f2fs_up_write(&sbi->gc_lock);
2279 err = freeze_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2283 if (f2fs_readonly(sbi->sb)) {
2284 err = thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);
2290 f2fs_down_write(&sbi->gc_lock);
2291 f2fs_down_write(&sbi->cp_global_sem);
2293 spin_lock(&sbi->stat_lock);
2294 if (shrunk_blocks + valid_user_blocks(sbi) +
2295 sbi->current_reserved_blocks + sbi->unusable_block_count +
2296 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2299 sbi->user_block_count -= shrunk_blocks;
2300 spin_unlock(&sbi->stat_lock);
2304 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2305 err = free_segment_range(sbi, secs, false);
2309 update_sb_metadata(sbi, -secs);
2311 err = f2fs_commit_super(sbi, false);
2313 update_sb_metadata(sbi, secs);
2317 update_fs_metadata(sbi, -secs);
2318 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2319 set_sbi_flag(sbi, SBI_IS_DIRTY);
2321 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2322 err = f2fs_write_checkpoint(sbi, &cpc);
2324 update_fs_metadata(sbi, secs);
2325 update_sb_metadata(sbi, secs);
2326 f2fs_commit_super(sbi, false);
2329 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2331 set_sbi_flag(sbi, SBI_NEED_FSCK);
2332 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2334 spin_lock(&sbi->stat_lock);
2335 sbi->user_block_count += shrunk_blocks;
2336 spin_unlock(&sbi->stat_lock);
2339 f2fs_up_write(&sbi->cp_global_sem);
2340 f2fs_up_write(&sbi->gc_lock);
2341 thaw_super(sbi->sb, FREEZE_HOLDER_KERNEL, NULL);