Lines Matching full:sbi

69 int f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned long rate,  in f2fs_build_fault_attr()  argument
72 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; in f2fs_build_fault_attr()
90 f2fs_info(sbi, in f2fs_build_fault_attr()
278 void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, in f2fs_printk() argument
292 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk()
295 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk()
339 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) in limit_reserve_root() argument
341 block_t limit = min((sbi->user_block_count >> 3), in limit_reserve_root()
342 sbi->user_block_count - sbi->reserved_blocks); in limit_reserve_root()
345 if (test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
346 F2FS_OPTION(sbi).root_reserved_blocks > limit) { in limit_reserve_root()
347 F2FS_OPTION(sbi).root_reserved_blocks = limit; in limit_reserve_root()
348 f2fs_info(sbi, "Reduce reserved blocks for root = %u", in limit_reserve_root()
349 F2FS_OPTION(sbi).root_reserved_blocks); in limit_reserve_root()
351 if (!test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
352 (!uid_eq(F2FS_OPTION(sbi).s_resuid, in limit_reserve_root()
354 !gid_eq(F2FS_OPTION(sbi).s_resgid, in limit_reserve_root()
356 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", in limit_reserve_root()
358 F2FS_OPTION(sbi).s_resuid), in limit_reserve_root()
360 F2FS_OPTION(sbi).s_resgid)); in limit_reserve_root()
363 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) in adjust_unusable_cap_perc() argument
365 if (!F2FS_OPTION(sbi).unusable_cap_perc) in adjust_unusable_cap_perc()
368 if (F2FS_OPTION(sbi).unusable_cap_perc == 100) in adjust_unusable_cap_perc()
369 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; in adjust_unusable_cap_perc()
371 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * in adjust_unusable_cap_perc()
372 F2FS_OPTION(sbi).unusable_cap_perc; in adjust_unusable_cap_perc()
374 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", in adjust_unusable_cap_perc()
375 F2FS_OPTION(sbi).unusable_cap, in adjust_unusable_cap_perc()
376 F2FS_OPTION(sbi).unusable_cap_perc); in adjust_unusable_cap_perc()
389 static int f2fs_set_qf_name(struct f2fs_sb_info *sbi, int qtype, in f2fs_set_qf_name() argument
392 struct super_block *sb = sbi->sb; in f2fs_set_qf_name()
396 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
397 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_set_qf_name()
400 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_set_qf_name()
401 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); in f2fs_set_qf_name()
407 f2fs_err(sbi, "Not enough memory for storing quotafile name"); in f2fs_set_qf_name()
410 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
411 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) in f2fs_set_qf_name()
414 f2fs_err(sbi, "%s quota file already specified", in f2fs_set_qf_name()
419 f2fs_err(sbi, "quotafile must be on filesystem root"); in f2fs_set_qf_name()
422 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; in f2fs_set_qf_name()
423 set_opt(sbi, QUOTA); in f2fs_set_qf_name()
430 static int f2fs_clear_qf_name(struct f2fs_sb_info *sbi, int qtype) in f2fs_clear_qf_name() argument
432 struct super_block *sb = sbi->sb; in f2fs_clear_qf_name()
434 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_clear_qf_name()
435 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_clear_qf_name()
438 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); in f2fs_clear_qf_name()
439 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; in f2fs_clear_qf_name()
443 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) in f2fs_check_quota_options() argument
450 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { in f2fs_check_quota_options()
451 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); in f2fs_check_quota_options()
454 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || in f2fs_check_quota_options()
455 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || in f2fs_check_quota_options()
456 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) { in f2fs_check_quota_options()
457 if (test_opt(sbi, USRQUOTA) && in f2fs_check_quota_options()
458 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_check_quota_options()
459 clear_opt(sbi, USRQUOTA); in f2fs_check_quota_options()
461 if (test_opt(sbi, GRPQUOTA) && in f2fs_check_quota_options()
462 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_check_quota_options()
463 clear_opt(sbi, GRPQUOTA); in f2fs_check_quota_options()
465 if (test_opt(sbi, PRJQUOTA) && in f2fs_check_quota_options()
466 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_check_quota_options()
467 clear_opt(sbi, PRJQUOTA); in f2fs_check_quota_options()
469 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || in f2fs_check_quota_options()
470 test_opt(sbi, PRJQUOTA)) { in f2fs_check_quota_options()
471 f2fs_err(sbi, "old and new quota format mixing"); in f2fs_check_quota_options()
475 if (!F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
476 f2fs_err(sbi, "journaled quota format not specified"); in f2fs_check_quota_options()
481 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
482 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); in f2fs_check_quota_options()
483 F2FS_OPTION(sbi).s_jquota_fmt = 0; in f2fs_check_quota_options()
489 static int f2fs_set_test_dummy_encryption(struct f2fs_sb_info *sbi, in f2fs_set_test_dummy_encryption() argument
499 &F2FS_OPTION(sbi).dummy_enc_policy; in f2fs_set_test_dummy_encryption()
503 f2fs_warn(sbi, "test_dummy_encryption option not supported"); in f2fs_set_test_dummy_encryption()
507 if (!f2fs_sb_has_encrypt(sbi)) { in f2fs_set_test_dummy_encryption()
508 f2fs_err(sbi, "Encrypt feature is off"); in f2fs_set_test_dummy_encryption()
519 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); in f2fs_set_test_dummy_encryption()
526 f2fs_warn(sbi, in f2fs_set_test_dummy_encryption()
529 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", in f2fs_set_test_dummy_encryption()
532 f2fs_warn(sbi, "Error processing option \"%s\" [%d]", in f2fs_set_test_dummy_encryption()
536 f2fs_warn(sbi, "Test dummy encryption mode enabled"); in f2fs_set_test_dummy_encryption()
541 static bool is_compress_extension_exist(struct f2fs_sb_info *sbi, in is_compress_extension_exist() argument
549 ext = F2FS_OPTION(sbi).extensions; in is_compress_extension_exist()
550 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in is_compress_extension_exist()
552 ext = F2FS_OPTION(sbi).noextensions; in is_compress_extension_exist()
553 ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in is_compress_extension_exist()
571 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi) in f2fs_test_compress_extension() argument
577 ext = F2FS_OPTION(sbi).extensions; in f2fs_test_compress_extension()
578 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in f2fs_test_compress_extension()
579 noext = F2FS_OPTION(sbi).noextensions; in f2fs_test_compress_extension()
580 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in f2fs_test_compress_extension()
587 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files"); in f2fs_test_compress_extension()
592 …f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension… in f2fs_test_compress_extension()
602 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str) in f2fs_set_lz4hc_level() argument
608 F2FS_OPTION(sbi).compress_level = 0; in f2fs_set_lz4hc_level()
615 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); in f2fs_set_lz4hc_level()
622 f2fs_info(sbi, "invalid lz4hc compress level: %d", level); in f2fs_set_lz4hc_level()
626 F2FS_OPTION(sbi).compress_level = level; in f2fs_set_lz4hc_level()
630 F2FS_OPTION(sbi).compress_level = 0; in f2fs_set_lz4hc_level()
633 f2fs_info(sbi, "kernel doesn't support lz4hc compression"); in f2fs_set_lz4hc_level()
640 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str) in f2fs_set_zstd_level() argument
646 F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; in f2fs_set_zstd_level()
653 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); in f2fs_set_zstd_level()
661 f2fs_info(sbi, "do not support negative compress level: %d", level); in f2fs_set_zstd_level()
666 f2fs_info(sbi, "invalid zstd compress level: %d", level); in f2fs_set_zstd_level()
670 F2FS_OPTION(sbi).compress_level = level; in f2fs_set_zstd_level()
676 static int parse_options(struct f2fs_sb_info *sbi, char *options, bool is_remount) in parse_options() argument
712 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in parse_options()
714 if (f2fs_sb_has_blkzoned(sbi)) { in parse_options()
715 f2fs_warn(sbi, "zoned devices need bggc"); in parse_options()
719 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; in parse_options()
721 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; in parse_options()
729 set_opt(sbi, DISABLE_ROLL_FORWARD); in parse_options()
733 set_opt(sbi, NORECOVERY); in parse_options()
736 if (!f2fs_hw_support_discard(sbi)) { in parse_options()
737 f2fs_warn(sbi, "device does not support discard"); in parse_options()
740 set_opt(sbi, DISCARD); in parse_options()
743 if (f2fs_hw_should_discard(sbi)) { in parse_options()
744 f2fs_warn(sbi, "discard is required for zoned block devices"); in parse_options()
747 clear_opt(sbi, DISCARD); in parse_options()
751 f2fs_warn(sbi, "heap/no_heap options were deprecated"); in parse_options()
755 set_opt(sbi, XATTR_USER); in parse_options()
758 clear_opt(sbi, XATTR_USER); in parse_options()
761 set_opt(sbi, INLINE_XATTR); in parse_options()
764 clear_opt(sbi, INLINE_XATTR); in parse_options()
769 set_opt(sbi, INLINE_XATTR_SIZE); in parse_options()
770 F2FS_OPTION(sbi).inline_xattr_size = arg; in parse_options()
778 f2fs_info(sbi, "xattr options not supported"); in parse_options()
783 set_opt(sbi, POSIX_ACL); in parse_options()
786 clear_opt(sbi, POSIX_ACL); in parse_options()
791 f2fs_info(sbi, "acl options not supported"); in parse_options()
800 F2FS_OPTION(sbi).active_logs = arg; in parse_options()
803 set_opt(sbi, DISABLE_EXT_IDENTIFY); in parse_options()
806 set_opt(sbi, INLINE_DATA); in parse_options()
809 set_opt(sbi, INLINE_DENTRY); in parse_options()
812 clear_opt(sbi, INLINE_DENTRY); in parse_options()
815 set_opt(sbi, FLUSH_MERGE); in parse_options()
818 clear_opt(sbi, FLUSH_MERGE); in parse_options()
821 set_opt(sbi, NOBARRIER); in parse_options()
824 clear_opt(sbi, NOBARRIER); in parse_options()
827 set_opt(sbi, FASTBOOT); in parse_options()
830 set_opt(sbi, READ_EXTENT_CACHE); in parse_options()
833 if (f2fs_sb_has_device_alias(sbi)) { in parse_options()
834 f2fs_err(sbi, "device aliasing requires extent cache"); in parse_options()
837 clear_opt(sbi, READ_EXTENT_CACHE); in parse_options()
840 clear_opt(sbi, INLINE_DATA); in parse_options()
843 set_opt(sbi, DATA_FLUSH); in parse_options()
848 if (test_opt(sbi, RESERVE_ROOT)) { in parse_options()
849 f2fs_info(sbi, "Preserve previous reserve_root=%u", in parse_options()
850 F2FS_OPTION(sbi).root_reserved_blocks); in parse_options()
852 F2FS_OPTION(sbi).root_reserved_blocks = arg; in parse_options()
853 set_opt(sbi, RESERVE_ROOT); in parse_options()
861 f2fs_err(sbi, "Invalid uid value %d", arg); in parse_options()
864 F2FS_OPTION(sbi).s_resuid = uid; in parse_options()
871 f2fs_err(sbi, "Invalid gid value %d", arg); in parse_options()
874 F2FS_OPTION(sbi).s_resgid = gid; in parse_options()
882 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in parse_options()
884 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in parse_options()
886 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG; in parse_options()
888 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK; in parse_options()
899 if (f2fs_build_fault_attr(sbi, arg, in parse_options()
902 set_opt(sbi, FAULT_INJECTION); in parse_options()
908 if (f2fs_build_fault_attr(sbi, 0, arg)) in parse_options()
910 set_opt(sbi, FAULT_INJECTION); in parse_options()
915 f2fs_info(sbi, "fault injection options not supported"); in parse_options()
919 set_opt(sbi, LAZYTIME); in parse_options()
922 clear_opt(sbi, LAZYTIME); in parse_options()
927 set_opt(sbi, USRQUOTA); in parse_options()
930 set_opt(sbi, GRPQUOTA); in parse_options()
933 set_opt(sbi, PRJQUOTA); in parse_options()
936 ret = f2fs_set_qf_name(sbi, USRQUOTA, &args[0]); in parse_options()
941 ret = f2fs_set_qf_name(sbi, GRPQUOTA, &args[0]); in parse_options()
946 ret = f2fs_set_qf_name(sbi, PRJQUOTA, &args[0]); in parse_options()
951 ret = f2fs_clear_qf_name(sbi, USRQUOTA); in parse_options()
956 ret = f2fs_clear_qf_name(sbi, GRPQUOTA); in parse_options()
961 ret = f2fs_clear_qf_name(sbi, PRJQUOTA); in parse_options()
966 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD; in parse_options()
969 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0; in parse_options()
972 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1; in parse_options()
975 clear_opt(sbi, QUOTA); in parse_options()
976 clear_opt(sbi, USRQUOTA); in parse_options()
977 clear_opt(sbi, GRPQUOTA); in parse_options()
978 clear_opt(sbi, PRJQUOTA); in parse_options()
995 f2fs_info(sbi, "quota operations not supported"); in parse_options()
1004 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in parse_options()
1006 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in parse_options()
1018 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in parse_options()
1020 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; in parse_options()
1022 F2FS_OPTION(sbi).fsync_mode = in parse_options()
1031 ret = f2fs_set_test_dummy_encryption(sbi, p, &args[0], in parse_options()
1038 set_opt(sbi, INLINECRYPT); in parse_options()
1040 f2fs_info(sbi, "inline encryption not supported"); in parse_options()
1048 F2FS_OPTION(sbi).unusable_cap_perc = arg; in parse_options()
1049 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1054 F2FS_OPTION(sbi).unusable_cap = arg; in parse_options()
1055 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1058 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1061 clear_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1064 set_opt(sbi, MERGE_CHECKPOINT); in parse_options()
1067 clear_opt(sbi, MERGE_CHECKPOINT); in parse_options()
1071 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1072 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1080 F2FS_OPTION(sbi).compress_level = 0; in parse_options()
1081 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1084 f2fs_info(sbi, "kernel doesn't support lzo compression"); in parse_options()
1088 ret = f2fs_set_lz4hc_level(sbi, name); in parse_options()
1093 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1096 f2fs_info(sbi, "kernel doesn't support lz4 compression"); in parse_options()
1100 ret = f2fs_set_zstd_level(sbi, name); in parse_options()
1105 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1108 f2fs_info(sbi, "kernel doesn't support zstd compression"); in parse_options()
1112 F2FS_OPTION(sbi).compress_level = 0; in parse_options()
1113 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1116 f2fs_info(sbi, "kernel doesn't support lzorle compression"); in parse_options()
1125 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1126 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1133 f2fs_err(sbi, in parse_options()
1137 F2FS_OPTION(sbi).compress_log_size = arg; in parse_options()
1140 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1141 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1148 ext = F2FS_OPTION(sbi).extensions; in parse_options()
1149 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in parse_options()
1153 f2fs_err(sbi, in parse_options()
1159 if (is_compress_extension_exist(sbi, name, true)) { in parse_options()
1169 F2FS_OPTION(sbi).compress_ext_cnt++; in parse_options()
1173 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1174 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1181 noext = F2FS_OPTION(sbi).noextensions; in parse_options()
1182 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in parse_options()
1186 f2fs_err(sbi, in parse_options()
1192 if (is_compress_extension_exist(sbi, name, false)) { in parse_options()
1202 F2FS_OPTION(sbi).nocompress_ext_cnt++; in parse_options()
1206 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1207 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1210 F2FS_OPTION(sbi).compress_chksum = true; in parse_options()
1213 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1214 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1221 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; in parse_options()
1223 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER; in parse_options()
1231 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1232 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1235 set_opt(sbi, COMPRESS_CACHE); in parse_options()
1245 f2fs_info(sbi, "compression options not supported"); in parse_options()
1249 set_opt(sbi, ATGC); in parse_options()
1252 set_opt(sbi, GC_MERGE); in parse_options()
1255 clear_opt(sbi, GC_MERGE); in parse_options()
1262 F2FS_OPTION(sbi).discard_unit = in parse_options()
1265 F2FS_OPTION(sbi).discard_unit = in parse_options()
1268 F2FS_OPTION(sbi).discard_unit = in parse_options()
1281 F2FS_OPTION(sbi).memory_mode = in parse_options()
1284 F2FS_OPTION(sbi).memory_mode = in parse_options()
1293 set_opt(sbi, AGE_EXTENT_CACHE); in parse_options()
1300 F2FS_OPTION(sbi).errors = in parse_options()
1303 F2FS_OPTION(sbi).errors = in parse_options()
1306 F2FS_OPTION(sbi).errors = in parse_options()
1315 set_opt(sbi, NAT_BITS); in parse_options()
1318 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", in parse_options()
1326 static int f2fs_default_check(struct f2fs_sb_info *sbi) in f2fs_default_check() argument
1329 if (f2fs_check_quota_options(sbi)) in f2fs_default_check()
1332 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { in f2fs_default_check()
1333 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in f2fs_default_check()
1336 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { in f2fs_default_check()
1337 …f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in f2fs_default_check()
1342 if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) { in f2fs_default_check()
1343 f2fs_err(sbi, in f2fs_default_check()
1353 if (f2fs_sb_has_blkzoned(sbi)) { in f2fs_default_check()
1355 if (F2FS_OPTION(sbi).discard_unit != in f2fs_default_check()
1357 …f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default… in f2fs_default_check()
1358 F2FS_OPTION(sbi).discard_unit = in f2fs_default_check()
1362 if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) { in f2fs_default_check()
1363 f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature"); in f2fs_default_check()
1367 f2fs_err(sbi, "Zoned block device support is not enabled"); in f2fs_default_check()
1373 if (f2fs_test_compress_extension(sbi)) { in f2fs_default_check()
1374 f2fs_err(sbi, "invalid compress or nocompress extension"); in f2fs_default_check()
1379 if (test_opt(sbi, INLINE_XATTR_SIZE)) { in f2fs_default_check()
1382 if (!f2fs_sb_has_extra_attr(sbi) || in f2fs_default_check()
1383 !f2fs_sb_has_flexible_inline_xattr(sbi)) { in f2fs_default_check()
1384 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); in f2fs_default_check()
1387 if (!test_opt(sbi, INLINE_XATTR)) { in f2fs_default_check()
1388 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); in f2fs_default_check()
1395 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || in f2fs_default_check()
1396 F2FS_OPTION(sbi).inline_xattr_size > max_size) { in f2fs_default_check()
1397 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", in f2fs_default_check()
1403 if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) { in f2fs_default_check()
1404 f2fs_err(sbi, "LFS is not compatible with ATGC"); in f2fs_default_check()
1408 if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) { in f2fs_default_check()
1409 f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode"); in f2fs_default_check()
1413 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) { in f2fs_default_check()
1414 f2fs_err(sbi, "Allow to mount readonly mode only"); in f2fs_default_check()
1418 if (test_opt(sbi, NORECOVERY) && !f2fs_readonly(sbi->sb)) { in f2fs_default_check()
1419 f2fs_err(sbi, "norecovery requires readonly mount"); in f2fs_default_check()
1459 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_drop_inode() local
1466 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_drop_inode()
1467 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_drop_inode()
1468 inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_drop_inode()
1517 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_dirtied() local
1520 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1525 stat_inc_dirty_inode(sbi, DIRTY_META); in f2fs_inode_dirtied()
1529 &sbi->inode_list[DIRTY_META]); in f2fs_inode_dirtied()
1530 inc_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_dirtied()
1532 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1542 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_synced() local
1544 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1546 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1551 dec_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_synced()
1556 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1566 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dirty_inode() local
1568 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_dirty_inode()
1569 inode->i_ino == F2FS_META_INO(sbi)) in f2fs_dirty_inode()
1584 static void destroy_percpu_info(struct f2fs_sb_info *sbi) in destroy_percpu_info() argument
1586 percpu_counter_destroy(&sbi->total_valid_inode_count); in destroy_percpu_info()
1587 percpu_counter_destroy(&sbi->rf_node_block_count); in destroy_percpu_info()
1588 percpu_counter_destroy(&sbi->alloc_valid_block_count); in destroy_percpu_info()
1591 static void destroy_device_list(struct f2fs_sb_info *sbi) in destroy_device_list() argument
1595 for (i = 0; i < sbi->s_ndevs; i++) { in destroy_device_list()
1602 kvfree(sbi->devs); in destroy_device_list()
1607 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_put_super() local
1613 f2fs_unregister_sysfs(sbi); in f2fs_put_super()
1618 mutex_lock(&sbi->umount_mutex); in f2fs_put_super()
1624 f2fs_stop_ckpt_thread(sbi); in f2fs_put_super()
1631 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in f2fs_put_super()
1632 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) { in f2fs_put_super()
1636 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_put_super()
1637 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1641 done = f2fs_issue_discard_timeout(sbi); in f2fs_put_super()
1642 if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) { in f2fs_put_super()
1646 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_put_super()
1647 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1654 f2fs_release_ino_entry(sbi, true); in f2fs_put_super()
1656 f2fs_leave_shrinker(sbi); in f2fs_put_super()
1657 mutex_unlock(&sbi->umount_mutex); in f2fs_put_super()
1660 f2fs_flush_merged_writes(sbi); in f2fs_put_super()
1662 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); in f2fs_put_super()
1664 if (err || f2fs_cp_error(sbi)) { in f2fs_put_super()
1665 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_put_super()
1666 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_put_super()
1670 if (!get_pages(sbi, i)) in f2fs_put_super()
1672 f2fs_err(sbi, "detect filesystem reference count leak during " in f2fs_put_super()
1673 "umount, type: %d, count: %lld", i, get_pages(sbi, i)); in f2fs_put_super()
1674 f2fs_bug_on(sbi, 1); in f2fs_put_super()
1677 f2fs_bug_on(sbi, sbi->fsync_node_num); in f2fs_put_super()
1679 f2fs_destroy_compress_inode(sbi); in f2fs_put_super()
1681 iput(sbi->node_inode); in f2fs_put_super()
1682 sbi->node_inode = NULL; in f2fs_put_super()
1684 iput(sbi->meta_inode); in f2fs_put_super()
1685 sbi->meta_inode = NULL; in f2fs_put_super()
1691 f2fs_destroy_stats(sbi); in f2fs_put_super()
1694 f2fs_destroy_node_manager(sbi); in f2fs_put_super()
1695 f2fs_destroy_segment_manager(sbi); in f2fs_put_super()
1697 /* flush s_error_work before sbi destroy */ in f2fs_put_super()
1698 flush_work(&sbi->s_error_work); in f2fs_put_super()
1700 f2fs_destroy_post_read_wq(sbi); in f2fs_put_super()
1702 kvfree(sbi->ckpt); in f2fs_put_super()
1704 kfree(sbi->raw_super); in f2fs_put_super()
1706 f2fs_destroy_page_array_cache(sbi); in f2fs_put_super()
1707 f2fs_destroy_xattr_caches(sbi); in f2fs_put_super()
1710 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_put_super()
1712 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_put_super()
1713 destroy_percpu_info(sbi); in f2fs_put_super()
1714 f2fs_destroy_iostat(sbi); in f2fs_put_super()
1716 kvfree(sbi->write_io[i]); in f2fs_put_super()
1724 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_sync_fs() local
1727 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_fs()
1729 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_sync_fs()
1734 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_sync_fs()
1738 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_sync_fs()
1739 err = f2fs_issue_checkpoint(sbi); in f2fs_sync_fs()
1747 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_freeze() local
1753 if (unlikely(f2fs_cp_error(sbi))) in f2fs_freeze()
1757 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY)) in f2fs_freeze()
1760 sbi->umount_lock_holder = current; in f2fs_freeze()
1763 f2fs_flush_ckpt_thread(sbi); in f2fs_freeze()
1765 sbi->umount_lock_holder = NULL; in f2fs_freeze()
1768 set_sbi_flag(sbi, SBI_IS_FREEZING); in f2fs_freeze()
1774 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_unfreeze() local
1783 if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi)) in f2fs_unfreeze()
1784 f2fs_issue_discard_timeout(sbi); in f2fs_unfreeze()
1838 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_statfs() local
1844 total_count = le64_to_cpu(sbi->raw_super->block_count); in f2fs_statfs()
1845 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); in f2fs_statfs()
1847 buf->f_bsize = sbi->blocksize; in f2fs_statfs()
1851 spin_lock(&sbi->stat_lock); in f2fs_statfs()
1852 if (sbi->carve_out) in f2fs_statfs()
1853 buf->f_blocks -= sbi->current_reserved_blocks; in f2fs_statfs()
1854 user_block_count = sbi->user_block_count; in f2fs_statfs()
1855 total_valid_node_count = valid_node_count(sbi); in f2fs_statfs()
1856 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_statfs()
1857 buf->f_bfree = user_block_count - valid_user_blocks(sbi) - in f2fs_statfs()
1858 sbi->current_reserved_blocks; in f2fs_statfs()
1860 if (unlikely(buf->f_bfree <= sbi->unusable_block_count)) in f2fs_statfs()
1863 buf->f_bfree -= sbi->unusable_block_count; in f2fs_statfs()
1864 spin_unlock(&sbi->stat_lock); in f2fs_statfs()
1866 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks) in f2fs_statfs()
1868 F2FS_OPTION(sbi).root_reserved_blocks; in f2fs_statfs()
1897 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_quota_options() local
1899 if (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1902 switch (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1916 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_show_quota_options()
1918 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]); in f2fs_show_quota_options()
1920 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_show_quota_options()
1922 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]); in f2fs_show_quota_options()
1924 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_show_quota_options()
1926 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]); in f2fs_show_quota_options()
1934 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_compress_options() local
1938 if (!f2fs_sb_has_compression(sbi)) in f2fs_show_compress_options()
1941 switch (F2FS_OPTION(sbi).compress_algorithm) { in f2fs_show_compress_options()
1957 if (F2FS_OPTION(sbi).compress_level) in f2fs_show_compress_options()
1958 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level); in f2fs_show_compress_options()
1961 F2FS_OPTION(sbi).compress_log_size); in f2fs_show_compress_options()
1963 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) { in f2fs_show_compress_options()
1965 F2FS_OPTION(sbi).extensions[i]); in f2fs_show_compress_options()
1968 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) { in f2fs_show_compress_options()
1970 F2FS_OPTION(sbi).noextensions[i]); in f2fs_show_compress_options()
1973 if (F2FS_OPTION(sbi).compress_chksum) in f2fs_show_compress_options()
1976 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS) in f2fs_show_compress_options()
1978 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER) in f2fs_show_compress_options()
1981 if (test_opt(sbi, COMPRESS_CACHE)) in f2fs_show_compress_options()
1988 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); in f2fs_show_options() local
1990 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) in f2fs_show_options()
1992 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON) in f2fs_show_options()
1994 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) in f2fs_show_options()
1997 if (test_opt(sbi, GC_MERGE)) in f2fs_show_options()
2002 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_show_options()
2004 if (test_opt(sbi, NORECOVERY)) in f2fs_show_options()
2006 if (test_opt(sbi, DISCARD)) { in f2fs_show_options()
2008 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK) in f2fs_show_options()
2010 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) in f2fs_show_options()
2012 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) in f2fs_show_options()
2018 if (test_opt(sbi, XATTR_USER)) in f2fs_show_options()
2022 if (test_opt(sbi, INLINE_XATTR)) in f2fs_show_options()
2026 if (test_opt(sbi, INLINE_XATTR_SIZE)) in f2fs_show_options()
2028 F2FS_OPTION(sbi).inline_xattr_size); in f2fs_show_options()
2031 if (test_opt(sbi, POSIX_ACL)) in f2fs_show_options()
2036 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) in f2fs_show_options()
2038 if (test_opt(sbi, INLINE_DATA)) in f2fs_show_options()
2042 if (test_opt(sbi, INLINE_DENTRY)) in f2fs_show_options()
2046 if (test_opt(sbi, FLUSH_MERGE)) in f2fs_show_options()
2050 if (test_opt(sbi, NOBARRIER)) in f2fs_show_options()
2054 if (test_opt(sbi, FASTBOOT)) in f2fs_show_options()
2056 if (test_opt(sbi, READ_EXTENT_CACHE)) in f2fs_show_options()
2060 if (test_opt(sbi, AGE_EXTENT_CACHE)) in f2fs_show_options()
2062 if (test_opt(sbi, DATA_FLUSH)) in f2fs_show_options()
2066 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE) in f2fs_show_options()
2068 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS) in f2fs_show_options()
2070 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG) in f2fs_show_options()
2072 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in f2fs_show_options()
2074 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); in f2fs_show_options()
2075 if (test_opt(sbi, RESERVE_ROOT)) in f2fs_show_options()
2077 F2FS_OPTION(sbi).root_reserved_blocks, in f2fs_show_options()
2079 F2FS_OPTION(sbi).s_resuid), in f2fs_show_options()
2081 F2FS_OPTION(sbi).s_resgid)); in f2fs_show_options()
2083 if (test_opt(sbi, FAULT_INJECTION)) { in f2fs_show_options()
2085 F2FS_OPTION(sbi).fault_info.inject_rate); in f2fs_show_options()
2087 F2FS_OPTION(sbi).fault_info.inject_type); in f2fs_show_options()
2091 if (test_opt(sbi, QUOTA)) in f2fs_show_options()
2093 if (test_opt(sbi, USRQUOTA)) in f2fs_show_options()
2095 if (test_opt(sbi, GRPQUOTA)) in f2fs_show_options()
2097 if (test_opt(sbi, PRJQUOTA)) in f2fs_show_options()
2100 f2fs_show_quota_options(seq, sbi->sb); in f2fs_show_options()
2102 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); in f2fs_show_options()
2104 if (sbi->sb->s_flags & SB_INLINECRYPT) in f2fs_show_options()
2107 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) in f2fs_show_options()
2109 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) in f2fs_show_options()
2112 if (test_opt(sbi, DISABLE_CHECKPOINT)) in f2fs_show_options()
2114 F2FS_OPTION(sbi).unusable_cap); in f2fs_show_options()
2115 if (test_opt(sbi, MERGE_CHECKPOINT)) in f2fs_show_options()
2119 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) in f2fs_show_options()
2121 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) in f2fs_show_options()
2123 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) in f2fs_show_options()
2127 f2fs_show_compress_options(seq, sbi->sb); in f2fs_show_options()
2130 if (test_opt(sbi, ATGC)) in f2fs_show_options()
2133 if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL) in f2fs_show_options()
2135 else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW) in f2fs_show_options()
2138 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) in f2fs_show_options()
2140 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE) in f2fs_show_options()
2142 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC) in f2fs_show_options()
2145 if (test_opt(sbi, NAT_BITS)) in f2fs_show_options()
2151 static void default_options(struct f2fs_sb_info *sbi, bool remount) in default_options() argument
2155 set_opt(sbi, READ_EXTENT_CACHE); in default_options()
2156 clear_opt(sbi, DISABLE_CHECKPOINT); in default_options()
2158 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) in default_options()
2159 set_opt(sbi, DISCARD); in default_options()
2161 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
2162 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION; in default_options()
2164 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK; in default_options()
2167 if (f2fs_sb_has_readonly(sbi)) in default_options()
2168 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE; in default_options()
2170 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE; in default_options()
2172 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; in default_options()
2173 if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <= in default_options()
2175 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in default_options()
2177 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in default_options()
2178 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in default_options()
2179 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); in default_options()
2180 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); in default_options()
2181 if (f2fs_sb_has_compression(sbi)) { in default_options()
2182 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; in default_options()
2183 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; in default_options()
2184 F2FS_OPTION(sbi).compress_ext_cnt = 0; in default_options()
2185 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; in default_options()
2187 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in default_options()
2188 F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL; in default_options()
2189 F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE; in default_options()
2191 set_opt(sbi, INLINE_XATTR); in default_options()
2192 set_opt(sbi, INLINE_DATA); in default_options()
2193 set_opt(sbi, INLINE_DENTRY); in default_options()
2194 set_opt(sbi, MERGE_CHECKPOINT); in default_options()
2195 set_opt(sbi, LAZYTIME); in default_options()
2196 F2FS_OPTION(sbi).unusable_cap = 0; in default_options()
2197 if (!f2fs_is_readonly(sbi)) in default_options()
2198 set_opt(sbi, FLUSH_MERGE); in default_options()
2199 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
2200 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in default_options()
2202 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in default_options()
2205 set_opt(sbi, XATTR_USER); in default_options()
2208 set_opt(sbi, POSIX_ACL); in default_options()
2211 f2fs_build_fault_attr(sbi, 0, 0); in default_options()
2218 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_disable_checkpoint() argument
2220 unsigned int s_flags = sbi->sb->s_flags; in f2fs_disable_checkpoint()
2222 unsigned int gc_mode = sbi->gc_mode; in f2fs_disable_checkpoint()
2228 f2fs_err(sbi, "checkpoint=disable on readonly fs"); in f2fs_disable_checkpoint()
2231 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_disable_checkpoint()
2234 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
2235 if (!f2fs_disable_cp_again(sbi, unusable)) in f2fs_disable_checkpoint()
2238 f2fs_update_time(sbi, DISABLE_TIME); in f2fs_disable_checkpoint()
2240 sbi->gc_mode = GC_URGENT_HIGH; in f2fs_disable_checkpoint()
2242 while (!f2fs_time_over(sbi, DISABLE_TIME)) { in f2fs_disable_checkpoint()
2251 f2fs_down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2252 stat_inc_gc_call_count(sbi, FOREGROUND); in f2fs_disable_checkpoint()
2253 err = f2fs_gc(sbi, &gc_control); in f2fs_disable_checkpoint()
2262 ret = sync_filesystem(sbi->sb); in f2fs_disable_checkpoint()
2268 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
2269 if (f2fs_disable_cp_again(sbi, unusable)) { in f2fs_disable_checkpoint()
2275 f2fs_down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2277 set_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_disable_checkpoint()
2278 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_disable_checkpoint()
2279 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_disable_checkpoint()
2283 spin_lock(&sbi->stat_lock); in f2fs_disable_checkpoint()
2284 sbi->unusable_block_count = unusable; in f2fs_disable_checkpoint()
2285 spin_unlock(&sbi->stat_lock); in f2fs_disable_checkpoint()
2288 f2fs_up_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2290 sbi->gc_mode = gc_mode; in f2fs_disable_checkpoint()
2291 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_disable_checkpoint()
2295 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_enable_checkpoint() argument
2301 sync_inodes_sb(sbi->sb); in f2fs_enable_checkpoint()
2303 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--); in f2fs_enable_checkpoint()
2306 f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); in f2fs_enable_checkpoint()
2308 f2fs_down_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
2309 f2fs_dirty_to_prefree(sbi); in f2fs_enable_checkpoint()
2311 clear_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_enable_checkpoint()
2312 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_enable_checkpoint()
2313 f2fs_up_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
2315 f2fs_sync_fs(sbi->sb, 1); in f2fs_enable_checkpoint()
2318 f2fs_flush_ckpt_thread(sbi); in f2fs_enable_checkpoint()
2323 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_remount() local
2331 bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE); in f2fs_remount()
2332 bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE); in f2fs_remount()
2333 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT); in f2fs_remount()
2334 bool no_atgc = !test_opt(sbi, ATGC); in f2fs_remount()
2335 bool no_discard = !test_opt(sbi, DISCARD); in f2fs_remount()
2336 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE); in f2fs_remount()
2337 bool block_unit_discard = f2fs_block_unit_discard(sbi); in f2fs_remount()
2338 bool no_nat_bits = !test_opt(sbi, NAT_BITS); in f2fs_remount()
2347 org_mount_opt = sbi->mount_opt; in f2fs_remount()
2350 sbi->umount_lock_holder = current; in f2fs_remount()
2353 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; in f2fs_remount()
2355 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_remount()
2357 kstrdup(F2FS_OPTION(sbi).s_qf_names[i], in f2fs_remount()
2371 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { in f2fs_remount()
2372 err = f2fs_commit_super(sbi, false); in f2fs_remount()
2373 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", in f2fs_remount()
2376 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_remount()
2379 default_options(sbi, true); in f2fs_remount()
2382 err = parse_options(sbi, data, true); in f2fs_remount()
2387 if (f2fs_sb_has_blkzoned(sbi) && in f2fs_remount()
2388 sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) { in f2fs_remount()
2389 f2fs_err(sbi, in f2fs_remount()
2391 sbi->max_open_zones, F2FS_OPTION(sbi).active_logs); in f2fs_remount()
2397 err = f2fs_default_check(sbi); in f2fs_remount()
2402 flush_work(&sbi->s_error_work); in f2fs_remount()
2411 if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) { in f2fs_remount()
2426 } else if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_remount()
2433 if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) { in f2fs_remount()
2435 f2fs_warn(sbi, "LFS is not compatible with IPU"); in f2fs_remount()
2440 if (no_atgc == !!test_opt(sbi, ATGC)) { in f2fs_remount()
2442 f2fs_warn(sbi, "switch atgc option is not allowed"); in f2fs_remount()
2447 if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) { in f2fs_remount()
2449 f2fs_warn(sbi, "switch extent_cache option is not allowed"); in f2fs_remount()
2453 if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) { in f2fs_remount()
2455 f2fs_warn(sbi, "switch age_extent_cache option is not allowed"); in f2fs_remount()
2459 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) { in f2fs_remount()
2461 f2fs_warn(sbi, "switch compress_cache option is not allowed"); in f2fs_remount()
2465 if (block_unit_discard != f2fs_block_unit_discard(sbi)) { in f2fs_remount()
2467 f2fs_warn(sbi, "switch discard_unit option is not allowed"); in f2fs_remount()
2471 if (no_nat_bits == !!test_opt(sbi, NAT_BITS)) { in f2fs_remount()
2473 f2fs_warn(sbi, "switch nat_bits option is not allowed"); in f2fs_remount()
2477 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2479 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); in f2fs_remount()
2489 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && in f2fs_remount()
2490 !test_opt(sbi, GC_MERGE))) { in f2fs_remount()
2491 if (sbi->gc_thread) { in f2fs_remount()
2492 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2495 } else if (!sbi->gc_thread) { in f2fs_remount()
2496 err = f2fs_start_gc_thread(sbi); in f2fs_remount()
2505 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_remount()
2506 set_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
2508 clear_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
2515 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { in f2fs_remount()
2516 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2517 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2520 err = f2fs_create_flush_cmd_control(sbi); in f2fs_remount()
2526 if (no_discard == !!test_opt(sbi, DISCARD)) { in f2fs_remount()
2527 if (test_opt(sbi, DISCARD)) { in f2fs_remount()
2528 err = f2fs_start_discard_thread(sbi); in f2fs_remount()
2533 f2fs_stop_discard_thread(sbi); in f2fs_remount()
2534 f2fs_issue_discard_timeout(sbi); in f2fs_remount()
2539 adjust_unusable_cap_perc(sbi); in f2fs_remount()
2540 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2541 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2542 err = f2fs_disable_checkpoint(sbi); in f2fs_remount()
2547 f2fs_enable_checkpoint(sbi); in f2fs_remount()
2557 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || in f2fs_remount()
2558 !test_opt(sbi, MERGE_CHECKPOINT)) { in f2fs_remount()
2559 f2fs_stop_ckpt_thread(sbi); in f2fs_remount()
2562 f2fs_flush_ckpt_thread(sbi); in f2fs_remount()
2564 err = f2fs_start_ckpt_thread(sbi); in f2fs_remount()
2566 f2fs_err(sbi, in f2fs_remount()
2581 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_remount()
2583 limit_reserve_root(sbi); in f2fs_remount()
2586 sbi->umount_lock_holder = NULL; in f2fs_remount()
2590 f2fs_enable_checkpoint(sbi); in f2fs_remount()
2592 if (f2fs_disable_checkpoint(sbi)) in f2fs_remount()
2593 f2fs_warn(sbi, "checkpoint has not been disabled"); in f2fs_remount()
2597 if (f2fs_start_discard_thread(sbi)) in f2fs_remount()
2598 f2fs_warn(sbi, "discard has been stopped"); in f2fs_remount()
2600 f2fs_stop_discard_thread(sbi); in f2fs_remount()
2604 if (f2fs_create_flush_cmd_control(sbi)) in f2fs_remount()
2605 f2fs_warn(sbi, "background flush thread has stopped"); in f2fs_remount()
2607 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2608 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2612 if (f2fs_start_gc_thread(sbi)) in f2fs_remount()
2613 f2fs_warn(sbi, "background gc thread has stopped"); in f2fs_remount()
2615 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2619 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; in f2fs_remount()
2621 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_remount()
2622 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; in f2fs_remount()
2625 sbi->mount_opt = org_mount_opt; in f2fs_remount()
2628 sbi->umount_lock_holder = NULL; in f2fs_remount()
2638 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) in f2fs_need_recovery() argument
2641 if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) in f2fs_need_recovery()
2644 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_need_recovery()
2646 if (test_opt(sbi, NORECOVERY)) in f2fs_need_recovery()
2648 return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG); in f2fs_need_recovery()
2651 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi) in f2fs_recover_quota_begin() argument
2653 bool readonly = f2fs_readonly(sbi->sb); in f2fs_recover_quota_begin()
2655 if (!f2fs_need_recovery(sbi)) in f2fs_recover_quota_begin()
2659 if (f2fs_hw_is_readonly(sbi)) in f2fs_recover_quota_begin()
2663 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_quota_begin()
2664 set_sbi_flag(sbi, SBI_IS_WRITABLE); in f2fs_recover_quota_begin()
2671 return f2fs_enable_quota_files(sbi, readonly); in f2fs_recover_quota_begin()
2674 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi, in f2fs_recover_quota_end() argument
2678 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_quota_end()
2680 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) { in f2fs_recover_quota_end()
2681 clear_sbi_flag(sbi, SBI_IS_WRITABLE); in f2fs_recover_quota_end()
2682 sbi->sb->s_flags |= SB_RDONLY; in f2fs_recover_quota_end()
2806 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) in f2fs_quota_on_mount() argument
2808 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { in f2fs_quota_on_mount()
2809 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); in f2fs_quota_on_mount()
2813 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type], in f2fs_quota_on_mount()
2814 F2FS_OPTION(sbi).s_jquota_fmt, type); in f2fs_quota_on_mount()
2817 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) in f2fs_enable_quota_files() argument
2822 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { in f2fs_enable_quota_files()
2823 err = f2fs_enable_quotas(sbi->sb); in f2fs_enable_quota_files()
2825 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); in f2fs_enable_quota_files()
2832 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_enable_quota_files()
2833 err = f2fs_quota_on_mount(sbi, i); in f2fs_enable_quota_files()
2838 f2fs_err(sbi, "Cannot turn on quotas: %d on %d", in f2fs_enable_quota_files()
2882 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_enable_quotas() local
2886 test_opt(sbi, USRQUOTA), in f2fs_enable_quotas()
2887 test_opt(sbi, GRPQUOTA), in f2fs_enable_quotas()
2888 test_opt(sbi, PRJQUOTA), in f2fs_enable_quotas()
2892 f2fs_err(sbi, "quota file may be corrupted, skip loading it"); in f2fs_enable_quotas()
2905 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", in f2fs_enable_quotas()
2918 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) in f2fs_quota_sync_file() argument
2920 struct quota_info *dqopt = sb_dqopt(sbi->sb); in f2fs_quota_sync_file()
2924 ret = dquot_writeback_dquots(sbi->sb, type); in f2fs_quota_sync_file()
2933 if (is_journalled_quota(sbi)) in f2fs_quota_sync_file()
2941 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_sync_file()
2947 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_do_quota_sync() local
2964 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_do_quota_sync()
2976 f2fs_lock_op(sbi); in f2fs_do_quota_sync()
2977 f2fs_down_read(&sbi->quota_sem); in f2fs_do_quota_sync()
2979 ret = f2fs_quota_sync_file(sbi, cnt); in f2fs_do_quota_sync()
2981 f2fs_up_read(&sbi->quota_sem); in f2fs_do_quota_sync()
2982 f2fs_unlock_op(sbi); in f2fs_do_quota_sync()
2984 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_do_quota_sync()
3076 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_off() local
3088 if (is_journalled_quota(sbi)) in f2fs_quota_off()
3089 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_off()
3133 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_commit() local
3136 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); in f2fs_dquot_commit()
3139 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit()
3140 f2fs_up_read(&sbi->quota_sem); in f2fs_dquot_commit()
3146 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_acquire() local
3149 f2fs_down_read(&sbi->quota_sem); in f2fs_dquot_acquire()
3152 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_acquire()
3153 f2fs_up_read(&sbi->quota_sem); in f2fs_dquot_acquire()
3159 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_release() local
3163 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_release()
3170 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_mark_dquot_dirty() local
3174 if (is_journalled_quota(sbi)) in f2fs_dquot_mark_dquot_dirty()
3175 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in f2fs_dquot_mark_dquot_dirty()
3182 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_commit_info() local
3186 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit_info()
3268 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_set_context() local
3276 if (f2fs_sb_has_lost_found(sbi) && in f2fs_set_context()
3277 inode->i_ino == F2FS_ROOT_INO(sbi)) in f2fs_set_context()
3298 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_get_devices() local
3302 if (!f2fs_is_multi_device(sbi)) in f2fs_get_devices()
3305 devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL); in f2fs_get_devices()
3309 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_devices()
3311 *num_devs = sbi->s_ndevs; in f2fs_get_devices()
3332 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_nfs_get_inode() local
3335 if (f2fs_check_nid_range(sbi, ino)) in f2fs_nfs_get_inode()
3414 static int __f2fs_commit_super(struct f2fs_sb_info *sbi, struct folio *folio, in __f2fs_commit_super() argument
3425 memcpy(F2FS_SUPER_BLOCK(folio, index), F2FS_RAW_SUPER(sbi), in __f2fs_commit_super()
3432 bio = bio_alloc(sbi->sb->s_bdev, 1, opf, GFP_NOFS); in __f2fs_commit_super()
3438 f2fs_bug_on(sbi, 1); in __f2fs_commit_super()
3446 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, in sanity_check_area_boundary() argument
3450 struct super_block *sb = sbi->sb; in sanity_check_area_boundary()
3470 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", in sanity_check_area_boundary()
3477 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3485 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3493 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3501 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3508 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
3520 if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) { in sanity_check_area_boundary()
3521 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in sanity_check_area_boundary()
3524 err = __f2fs_commit_super(sbi, folio, index, false); in sanity_check_area_boundary()
3527 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
3536 static int sanity_check_raw_super(struct f2fs_sb_info *sbi, in sanity_check_raw_super() argument
3546 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", in sanity_check_raw_super()
3556 f2fs_info(sbi, "Invalid SB checksum offset: %zu", in sanity_check_raw_super()
3561 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { in sanity_check_raw_super()
3562 f2fs_info(sbi, "Invalid SB checksum value: %u", crc); in sanity_check_raw_super()
3569 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", in sanity_check_raw_super()
3577 f2fs_info(sbi, "Invalid log blocks per segment (%u)", in sanity_check_raw_super()
3587 f2fs_info(sbi, "Invalid log sectorsize (%u)", in sanity_check_raw_super()
3594 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", in sanity_check_raw_super()
3611 f2fs_info(sbi, "Invalid segment count (%u)", segment_count); in sanity_check_raw_super()
3617 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", in sanity_check_raw_super()
3623 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)", in sanity_check_raw_super()
3629 f2fs_info(sbi, "Small segment_count (%u < %u * %u)", in sanity_check_raw_super()
3635 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", in sanity_check_raw_super()
3649 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)", in sanity_check_raw_super()
3655 !bdev_is_zoned(sbi->sb->s_bdev)) { in sanity_check_raw_super()
3656 f2fs_info(sbi, "Zoned block device path is missing"); in sanity_check_raw_super()
3662 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", in sanity_check_raw_super()
3670 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", in sanity_check_raw_super()
3680 f2fs_info(sbi, "Insane cp_payload (%u >= %u)", in sanity_check_raw_super()
3691 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", in sanity_check_raw_super()
3699 if (sanity_check_area_boundary(sbi, folio, index)) in sanity_check_raw_super()
3705 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) in f2fs_sanity_check_ckpt() argument
3708 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_sanity_check_ckpt()
3709 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_sanity_check_ckpt()
3737 if (!f2fs_sb_has_readonly(sbi) && in f2fs_sanity_check_ckpt()
3740 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); in f2fs_sanity_check_ckpt()
3745 (f2fs_sb_has_readonly(sbi) ? 1 : 0); in f2fs_sanity_check_ckpt()
3749 f2fs_err(sbi, "Wrong user_block_count: %u", in f2fs_sanity_check_ckpt()
3756 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", in f2fs_sanity_check_ckpt()
3762 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_sanity_check_ckpt()
3764 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", in f2fs_sanity_check_ckpt()
3770 blocks_per_seg = BLKS_PER_SEG(sbi); in f2fs_sanity_check_ckpt()
3777 if (f2fs_sb_has_readonly(sbi)) in f2fs_sanity_check_ckpt()
3783 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3796 if (f2fs_sb_has_readonly(sbi)) in f2fs_sanity_check_ckpt()
3802 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3813 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3826 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", in f2fs_sanity_check_ckpt()
3831 cp_pack_start_sum = __start_sum_addr(sbi); in f2fs_sanity_check_ckpt()
3832 cp_payload = __cp_payload(sbi); in f2fs_sanity_check_ckpt()
3836 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", in f2fs_sanity_check_ckpt()
3843 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, " in f2fs_sanity_check_ckpt()
3856 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)", in f2fs_sanity_check_ckpt()
3861 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_sanity_check_ckpt()
3862 f2fs_err(sbi, "A bug case: need to run fsck"); in f2fs_sanity_check_ckpt()
3868 static void init_sb_info(struct f2fs_sb_info *sbi) in init_sb_info() argument
3870 struct f2fs_super_block *raw_super = sbi->raw_super; in init_sb_info()
3873 sbi->log_sectors_per_block = in init_sb_info()
3875 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); in init_sb_info()
3876 sbi->blocksize = BIT(sbi->log_blocksize); in init_sb_info()
3877 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); in init_sb_info()
3878 sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg); in init_sb_info()
3879 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); in init_sb_info()
3880 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); in init_sb_info()
3881 sbi->total_sections = le32_to_cpu(raw_super->section_count); in init_sb_info()
3882 sbi->total_node_count = SEGS_TO_BLKS(sbi, in init_sb_info()
3885 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino); in init_sb_info()
3886 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino); in init_sb_info()
3887 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino); in init_sb_info()
3888 sbi->cur_victim_sec = NULL_SECNO; in init_sb_info()
3889 sbi->gc_mode = GC_NORMAL; in init_sb_info()
3890 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in init_sb_info()
3891 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in init_sb_info()
3892 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; in init_sb_info()
3893 sbi->migration_granularity = SEGS_PER_SEC(sbi); in init_sb_info()
3894 sbi->migration_window_granularity = f2fs_sb_has_blkzoned(sbi) ? in init_sb_info()
3895 DEF_MIGRATION_WINDOW_GRANULARITY_ZONED : SEGS_PER_SEC(sbi); in init_sb_info()
3896 sbi->seq_file_ra_mul = MIN_RA_MUL; in init_sb_info()
3897 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE; in init_sb_info()
3898 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE; in init_sb_info()
3899 spin_lock_init(&sbi->gc_remaining_trials_lock); in init_sb_info()
3900 atomic64_set(&sbi->current_atomic_write, 0); in init_sb_info()
3902 sbi->dir_level = DEF_DIR_LEVEL; in init_sb_info()
3903 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; in init_sb_info()
3904 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3905 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3906 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3907 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL; in init_sb_info()
3908 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] = in init_sb_info()
3910 clear_sbi_flag(sbi, SBI_NEED_FSCK); in init_sb_info()
3913 atomic_set(&sbi->nr_pages[i], 0); in init_sb_info()
3916 atomic_set(&sbi->wb_sync_req[i], 0); in init_sb_info()
3918 INIT_LIST_HEAD(&sbi->s_list); in init_sb_info()
3919 mutex_init(&sbi->umount_mutex); in init_sb_info()
3920 init_f2fs_rwsem(&sbi->io_order_lock); in init_sb_info()
3921 spin_lock_init(&sbi->cp_lock); in init_sb_info()
3923 sbi->dirty_device = 0; in init_sb_info()
3924 spin_lock_init(&sbi->dev_lock); in init_sb_info()
3926 init_f2fs_rwsem(&sbi->sb_lock); in init_sb_info()
3927 init_f2fs_rwsem(&sbi->pin_sem); in init_sb_info()
3930 static int init_percpu_info(struct f2fs_sb_info *sbi) in init_percpu_info() argument
3934 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); in init_percpu_info()
3938 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL); in init_percpu_info()
3942 err = percpu_counter_init(&sbi->total_valid_inode_count, 0, in init_percpu_info()
3949 percpu_counter_destroy(&sbi->rf_node_block_count); in init_percpu_info()
3951 percpu_counter_destroy(&sbi->alloc_valid_block_count); in init_percpu_info()
3958 struct f2fs_sb_info *sbi; member
3973 if (!rz_args->sbi->unusable_blocks_per_sec) { in f2fs_report_zone_cb()
3974 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks; in f2fs_report_zone_cb()
3977 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) { in f2fs_report_zone_cb()
3978 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n"); in f2fs_report_zone_cb()
3984 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) in init_blkz_info() argument
3993 if (!f2fs_sb_has_blkzoned(sbi)) in init_blkz_info()
3998 if (max_open_zones && (max_open_zones < sbi->max_open_zones)) in init_blkz_info()
3999 sbi->max_open_zones = max_open_zones; in init_blkz_info()
4000 if (sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) { in init_blkz_info()
4001 f2fs_err(sbi, in init_blkz_info()
4003 sbi->max_open_zones, F2FS_OPTION(sbi).active_logs); in init_blkz_info()
4009 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != in init_blkz_info()
4012 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors); in init_blkz_info()
4014 sbi->blocks_per_blkz); in init_blkz_info()
4018 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, in init_blkz_info()
4025 rep_zone_arg.sbi = sbi; in init_blkz_info()
4042 static int read_raw_super_block(struct f2fs_sb_info *sbi, in read_raw_super_block() argument
4046 struct super_block *sb = sbi->sb; in read_raw_super_block()
4059 f2fs_err(sbi, "Unable to read %dth superblock", in read_raw_super_block()
4067 err = sanity_check_raw_super(sbi, folio, block); in read_raw_super_block()
4069 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", in read_raw_super_block()
4094 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) in f2fs_commit_super() argument
4101 if ((recover && f2fs_readonly(sbi->sb)) || in f2fs_commit_super()
4102 f2fs_hw_is_readonly(sbi)) { in f2fs_commit_super()
4103 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_commit_super()
4108 if (!recover && f2fs_sb_has_sb_chksum(sbi)) { in f2fs_commit_super()
4109 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi), in f2fs_commit_super()
4111 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc); in f2fs_commit_super()
4115 index = sbi->valid_super_block ? 0 : 1; in f2fs_commit_super()
4116 folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL); in f2fs_commit_super()
4119 err = __f2fs_commit_super(sbi, folio, index, true); in f2fs_commit_super()
4127 index = sbi->valid_super_block; in f2fs_commit_super()
4128 folio = read_mapping_folio(sbi->sb->s_bdev->bd_mapping, index, NULL); in f2fs_commit_super()
4131 err = __f2fs_commit_super(sbi, folio, index, true); in f2fs_commit_super()
4136 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason) in save_stop_reason() argument
4140 spin_lock_irqsave(&sbi->error_lock, flags); in save_stop_reason()
4141 if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0)) in save_stop_reason()
4142 sbi->stop_reason[reason]++; in save_stop_reason()
4143 spin_unlock_irqrestore(&sbi->error_lock, flags); in save_stop_reason()
4146 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi) in f2fs_record_stop_reason() argument
4148 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_record_stop_reason()
4152 f2fs_down_write(&sbi->sb_lock); in f2fs_record_stop_reason()
4154 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_record_stop_reason()
4155 if (sbi->error_dirty) { in f2fs_record_stop_reason()
4156 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, in f2fs_record_stop_reason()
4158 sbi->error_dirty = false; in f2fs_record_stop_reason()
4160 memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON); in f2fs_record_stop_reason()
4161 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_record_stop_reason()
4163 err = f2fs_commit_super(sbi, false); in f2fs_record_stop_reason()
4165 f2fs_up_write(&sbi->sb_lock); in f2fs_record_stop_reason()
4167 f2fs_err_ratelimited(sbi, in f2fs_record_stop_reason()
4172 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) in f2fs_save_errors() argument
4176 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_save_errors()
4177 if (!test_bit(flag, (unsigned long *)sbi->errors)) { in f2fs_save_errors()
4178 set_bit(flag, (unsigned long *)sbi->errors); in f2fs_save_errors()
4179 sbi->error_dirty = true; in f2fs_save_errors()
4181 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_save_errors()
4184 static bool f2fs_update_errors(struct f2fs_sb_info *sbi) in f2fs_update_errors() argument
4189 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_update_errors()
4190 if (sbi->error_dirty) { in f2fs_update_errors()
4191 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, in f2fs_update_errors()
4193 sbi->error_dirty = false; in f2fs_update_errors()
4196 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_update_errors()
4201 static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_record_errors() argument
4205 f2fs_down_write(&sbi->sb_lock); in f2fs_record_errors()
4207 if (!f2fs_update_errors(sbi)) in f2fs_record_errors()
4210 err = f2fs_commit_super(sbi, false); in f2fs_record_errors()
4212 f2fs_err_ratelimited(sbi, in f2fs_record_errors()
4216 f2fs_up_write(&sbi->sb_lock); in f2fs_record_errors()
4219 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_handle_error() argument
4221 f2fs_save_errors(sbi, error); in f2fs_handle_error()
4222 f2fs_record_errors(sbi, error); in f2fs_handle_error()
4225 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_handle_error_async() argument
4227 f2fs_save_errors(sbi, error); in f2fs_handle_error_async()
4229 if (!sbi->error_dirty) in f2fs_handle_error_async()
4231 if (!test_bit(error, (unsigned long *)sbi->errors)) in f2fs_handle_error_async()
4233 schedule_work(&sbi->s_error_work); in f2fs_handle_error_async()
4242 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason) in f2fs_handle_critical_error() argument
4244 struct super_block *sb = sbi->sb; in f2fs_handle_critical_error()
4247 F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE; in f2fs_handle_critical_error()
4249 set_ckpt_flags(sbi, CP_ERROR_FLAG); in f2fs_handle_critical_error()
4251 if (!f2fs_hw_is_readonly(sbi)) { in f2fs_handle_critical_error()
4252 save_stop_reason(sbi, reason); in f2fs_handle_critical_error()
4259 schedule_work(&sbi->s_error_work); in f2fs_handle_critical_error()
4267 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC && in f2fs_handle_critical_error()
4269 !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) in f2fs_handle_critical_error()
4274 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); in f2fs_handle_critical_error()
4284 f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason); in f2fs_handle_critical_error()
4288 f2fs_warn(sbi, "Remounting filesystem read-only"); in f2fs_handle_critical_error()
4301 struct f2fs_sb_info *sbi = container_of(work, in f2fs_record_error_work() local
4304 f2fs_record_stop_reason(sbi); in f2fs_record_error_work()
4307 static inline unsigned int get_first_zoned_segno(struct f2fs_sb_info *sbi) in get_first_zoned_segno() argument
4311 for (devi = 0; devi < sbi->s_ndevs; devi++) in get_first_zoned_segno()
4313 return GET_SEGNO(sbi, FDEV(devi).start_blk); in get_first_zoned_segno()
4317 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) in f2fs_scan_devices() argument
4319 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_scan_devices()
4322 blk_mode_t mode = sb_open_mode(sbi->sb->s_flags); in f2fs_scan_devices()
4327 if (!bdev_is_zoned(sbi->sb->s_bdev)) in f2fs_scan_devices()
4336 sbi->devs = f2fs_kzalloc(sbi, in f2fs_scan_devices()
4340 if (!sbi->devs) in f2fs_scan_devices()
4343 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev); in f2fs_scan_devices()
4344 sbi->aligned_blksize = true; in f2fs_scan_devices()
4346 sbi->max_open_zones = UINT_MAX; in f2fs_scan_devices()
4347 sbi->blkzone_alloc_policy = BLKZONE_ALLOC_PRIOR_SEQ; in f2fs_scan_devices()
4352 FDEV(0).bdev_file = sbi->sb->s_bdev_file; in f2fs_scan_devices()
4364 SEGS_TO_BLKS(sbi, in f2fs_scan_devices()
4370 SEGS_TO_BLKS(sbi, in f2fs_scan_devices()
4373 FDEV(i).path, mode, sbi->sb, NULL); in f2fs_scan_devices()
4381 sbi->s_ndevs = i + 1; in f2fs_scan_devices()
4384 sbi->aligned_blksize = false; in f2fs_scan_devices()
4388 if (!f2fs_sb_has_blkzoned(sbi)) { in f2fs_scan_devices()
4389 f2fs_err(sbi, "Zoned block device feature not enabled"); in f2fs_scan_devices()
4392 if (init_blkz_info(sbi, i)) { in f2fs_scan_devices()
4393 f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); in f2fs_scan_devices()
4398 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: Host-managed)", in f2fs_scan_devices()
4405 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", in f2fs_scan_devices()
4413 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) in f2fs_setup_casefold() argument
4416 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { in f2fs_setup_casefold()
4421 encoding_info = f2fs_sb_read_encoding(sbi->raw_super); in f2fs_setup_casefold()
4423 f2fs_err(sbi, in f2fs_setup_casefold()
4428 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags); in f2fs_setup_casefold()
4431 f2fs_err(sbi, in f2fs_setup_casefold()
4441 f2fs_info(sbi, "Using encoding defined by superblock: " in f2fs_setup_casefold()
4448 sbi->sb->s_encoding = encoding; in f2fs_setup_casefold()
4449 sbi->sb->s_encoding_flags = encoding_flags; in f2fs_setup_casefold()
4452 if (f2fs_sb_has_casefold(sbi)) { in f2fs_setup_casefold()
4453 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); in f2fs_setup_casefold()
4460 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) in f2fs_tuning_parameters() argument
4463 if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) { in f2fs_tuning_parameters()
4464 if (f2fs_block_unit_discard(sbi)) in f2fs_tuning_parameters()
4465 SM_I(sbi)->dcc_info->discard_granularity = in f2fs_tuning_parameters()
4467 if (!f2fs_lfs_mode(sbi)) in f2fs_tuning_parameters()
4468 SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) | in f2fs_tuning_parameters()
4472 sbi->readdir_ra = true; in f2fs_tuning_parameters()
4477 struct f2fs_sb_info *sbi; in f2fs_fill_super() local
4497 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); in f2fs_fill_super()
4498 if (!sbi) in f2fs_fill_super()
4501 sbi->sb = sb; in f2fs_fill_super()
4504 init_f2fs_rwsem(&sbi->gc_lock); in f2fs_fill_super()
4505 mutex_init(&sbi->writepages); in f2fs_fill_super()
4506 init_f2fs_rwsem(&sbi->cp_global_sem); in f2fs_fill_super()
4507 init_f2fs_rwsem(&sbi->node_write); in f2fs_fill_super()
4508 init_f2fs_rwsem(&sbi->node_change); in f2fs_fill_super()
4509 spin_lock_init(&sbi->stat_lock); in f2fs_fill_super()
4510 init_f2fs_rwsem(&sbi->cp_rwsem); in f2fs_fill_super()
4511 init_f2fs_rwsem(&sbi->quota_sem); in f2fs_fill_super()
4512 init_waitqueue_head(&sbi->cp_wait); in f2fs_fill_super()
4513 spin_lock_init(&sbi->error_lock); in f2fs_fill_super()
4516 INIT_LIST_HEAD(&sbi->inode_list[i]); in f2fs_fill_super()
4517 spin_lock_init(&sbi->inode_lock[i]); in f2fs_fill_super()
4519 mutex_init(&sbi->flush_lock); in f2fs_fill_super()
4523 f2fs_err(sbi, "unable to set blocksize"); in f2fs_fill_super()
4527 err = read_raw_super_block(sbi, &raw_super, &valid_super_block, in f2fs_fill_super()
4532 sb->s_fs_info = sbi; in f2fs_fill_super()
4533 sbi->raw_super = raw_super; in f2fs_fill_super()
4535 INIT_WORK(&sbi->s_error_work, f2fs_record_error_work); in f2fs_fill_super()
4536 memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS); in f2fs_fill_super()
4537 memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON); in f2fs_fill_super()
4540 if (f2fs_sb_has_inode_chksum(sbi)) in f2fs_fill_super()
4541 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid, in f2fs_fill_super()
4544 default_options(sbi, false); in f2fs_fill_super()
4552 err = parse_options(sbi, options, false); in f2fs_fill_super()
4556 err = f2fs_default_check(sbi); in f2fs_fill_super()
4564 err = f2fs_setup_casefold(sbi); in f2fs_fill_super()
4573 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_fill_super()
4575 if (f2fs_qf_ino(sbi->sb, i)) in f2fs_fill_super()
4576 sbi->nquota_files++; in f2fs_fill_super()
4593 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_fill_super()
4594 if (test_opt(sbi, INLINECRYPT)) in f2fs_fill_super()
4597 if (test_opt(sbi, LAZYTIME)) in f2fs_fill_super()
4607 sbi->valid_super_block = valid_super_block; in f2fs_fill_super()
4610 set_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
4612 err = f2fs_init_write_merge_io(sbi); in f2fs_fill_super()
4616 init_sb_info(sbi); in f2fs_fill_super()
4618 err = f2fs_init_iostat(sbi); in f2fs_fill_super()
4622 err = init_percpu_info(sbi); in f2fs_fill_super()
4626 /* init per sbi slab cache */ in f2fs_fill_super()
4627 err = f2fs_init_xattr_caches(sbi); in f2fs_fill_super()
4630 err = f2fs_init_page_array_cache(sbi); in f2fs_fill_super()
4635 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); in f2fs_fill_super()
4636 if (IS_ERR(sbi->meta_inode)) { in f2fs_fill_super()
4637 f2fs_err(sbi, "Failed to read F2FS meta data inode"); in f2fs_fill_super()
4638 err = PTR_ERR(sbi->meta_inode); in f2fs_fill_super()
4642 err = f2fs_get_valid_checkpoint(sbi); in f2fs_fill_super()
4644 f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); in f2fs_fill_super()
4648 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG)) in f2fs_fill_super()
4649 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_fill_super()
4650 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) { in f2fs_fill_super()
4651 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
4652 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; in f2fs_fill_super()
4655 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG)) in f2fs_fill_super()
4656 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
4659 err = f2fs_scan_devices(sbi); in f2fs_fill_super()
4661 f2fs_err(sbi, "Failed to find devices"); in f2fs_fill_super()
4665 err = f2fs_init_post_read_wq(sbi); in f2fs_fill_super()
4667 f2fs_err(sbi, "Failed to initialize post read workqueue"); in f2fs_fill_super()
4671 sbi->total_valid_node_count = in f2fs_fill_super()
4672 le32_to_cpu(sbi->ckpt->valid_node_count); in f2fs_fill_super()
4673 percpu_counter_set(&sbi->total_valid_inode_count, in f2fs_fill_super()
4674 le32_to_cpu(sbi->ckpt->valid_inode_count)); in f2fs_fill_super()
4675 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); in f2fs_fill_super()
4676 sbi->total_valid_block_count = in f2fs_fill_super()
4677 le64_to_cpu(sbi->ckpt->valid_block_count); in f2fs_fill_super()
4678 sbi->last_valid_block_count = sbi->total_valid_block_count; in f2fs_fill_super()
4679 sbi->reserved_blocks = 0; in f2fs_fill_super()
4680 sbi->current_reserved_blocks = 0; in f2fs_fill_super()
4681 limit_reserve_root(sbi); in f2fs_fill_super()
4682 adjust_unusable_cap_perc(sbi); in f2fs_fill_super()
4684 f2fs_init_extent_cache_info(sbi); in f2fs_fill_super()
4686 f2fs_init_ino_entry_info(sbi); in f2fs_fill_super()
4688 f2fs_init_fsync_node_info(sbi); in f2fs_fill_super()
4691 f2fs_init_ckpt_req_control(sbi); in f2fs_fill_super()
4692 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) && in f2fs_fill_super()
4693 test_opt(sbi, MERGE_CHECKPOINT)) { in f2fs_fill_super()
4694 err = f2fs_start_ckpt_thread(sbi); in f2fs_fill_super()
4696 f2fs_err(sbi, in f2fs_fill_super()
4704 err = f2fs_build_segment_manager(sbi); in f2fs_fill_super()
4706 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", in f2fs_fill_super()
4710 err = f2fs_build_node_manager(sbi); in f2fs_fill_super()
4712 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", in f2fs_fill_super()
4718 sbi->sectors_written_start = f2fs_get_sectors_written(sbi); in f2fs_fill_super()
4721 sbi->first_zoned_segno = get_first_zoned_segno(sbi); in f2fs_fill_super()
4724 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in f2fs_fill_super()
4725 if (__exist_node_summaries(sbi)) in f2fs_fill_super()
4726 sbi->kbytes_written = in f2fs_fill_super()
4729 f2fs_build_gc_manager(sbi); in f2fs_fill_super()
4731 err = f2fs_build_stats(sbi); in f2fs_fill_super()
4736 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); in f2fs_fill_super()
4737 if (IS_ERR(sbi->node_inode)) { in f2fs_fill_super()
4738 f2fs_err(sbi, "Failed to read node inode"); in f2fs_fill_super()
4739 err = PTR_ERR(sbi->node_inode); in f2fs_fill_super()
4744 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); in f2fs_fill_super()
4746 f2fs_err(sbi, "Failed to read root inode"); in f2fs_fill_super()
4764 err = f2fs_init_compress_inode(sbi); in f2fs_fill_super()
4768 err = f2fs_register_sysfs(sbi); in f2fs_fill_super()
4772 sbi->umount_lock_holder = current; in f2fs_fill_super()
4775 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { in f2fs_fill_super()
4778 f2fs_err(sbi, "Cannot turn on quotas: error %d", err); in f2fs_fill_super()
4781 quota_enabled = f2fs_recover_quota_begin(sbi); in f2fs_fill_super()
4784 err = f2fs_recover_orphan_inodes(sbi); in f2fs_fill_super()
4788 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) { in f2fs_fill_super()
4794 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && in f2fs_fill_super()
4795 !test_opt(sbi, NORECOVERY)) { in f2fs_fill_super()
4800 if (f2fs_hw_is_readonly(sbi)) { in f2fs_fill_super()
4801 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in f2fs_fill_super()
4802 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
4805 f2fs_err(sbi, "Need to recover fsync data, but " in f2fs_fill_super()
4812 f2fs_info(sbi, "write access unavailable, skipping recovery"); in f2fs_fill_super()
4817 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
4822 err = f2fs_recover_fsync_data(sbi, false); in f2fs_fill_super()
4827 f2fs_err(sbi, "Cannot recover all fsync data errno=%d", in f2fs_fill_super()
4832 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
4836 f2fs_err(sbi, "Need to recover fsync data"); in f2fs_fill_super()
4843 f2fs_recover_quota_end(sbi, quota_enabled); in f2fs_fill_super()
4853 err = f2fs_check_and_fix_write_pointer(sbi); in f2fs_fill_super()
4858 clear_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
4860 err = f2fs_init_inmem_curseg(sbi); in f2fs_fill_super()
4864 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_fill_super()
4865 err = f2fs_disable_checkpoint(sbi); in f2fs_fill_super()
4868 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { in f2fs_fill_super()
4869 f2fs_enable_checkpoint(sbi); in f2fs_fill_super()
4876 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF || in f2fs_fill_super()
4877 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) { in f2fs_fill_super()
4879 err = f2fs_start_gc_thread(sbi); in f2fs_fill_super()
4887 err = f2fs_commit_super(sbi, true); in f2fs_fill_super()
4888 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", in f2fs_fill_super()
4889 sbi->valid_super_block ? 1 : 2, err); in f2fs_fill_super()
4892 f2fs_join_shrinker(sbi); in f2fs_fill_super()
4894 f2fs_tuning_parameters(sbi); in f2fs_fill_super()
4896 f2fs_notice(sbi, "Mounted with checkpoint version = %llx", in f2fs_fill_super()
4897 cur_cp_version(F2FS_CKPT(sbi))); in f2fs_fill_super()
4898 f2fs_update_time(sbi, CP_TIME); in f2fs_fill_super()
4899 f2fs_update_time(sbi, REQ_TIME); in f2fs_fill_super()
4900 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
4902 sbi->umount_lock_holder = NULL; in f2fs_fill_super()
4907 sync_filesystem(sbi->sb); in f2fs_fill_super()
4913 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) in f2fs_fill_super()
4914 f2fs_quota_off_umount(sbi->sb); in f2fs_fill_super()
4922 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_fill_super()
4925 f2fs_unregister_sysfs(sbi); in f2fs_fill_super()
4927 f2fs_destroy_compress_inode(sbi); in f2fs_fill_super()
4932 f2fs_release_ino_entry(sbi, true); in f2fs_fill_super()
4933 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_fill_super()
4934 iput(sbi->node_inode); in f2fs_fill_super()
4935 sbi->node_inode = NULL; in f2fs_fill_super()
4937 f2fs_destroy_stats(sbi); in f2fs_fill_super()
4940 f2fs_stop_discard_thread(sbi); in f2fs_fill_super()
4941 f2fs_destroy_node_manager(sbi); in f2fs_fill_super()
4943 f2fs_destroy_segment_manager(sbi); in f2fs_fill_super()
4945 f2fs_stop_ckpt_thread(sbi); in f2fs_fill_super()
4946 /* flush s_error_work before sbi destroy */ in f2fs_fill_super()
4947 flush_work(&sbi->s_error_work); in f2fs_fill_super()
4948 f2fs_destroy_post_read_wq(sbi); in f2fs_fill_super()
4950 destroy_device_list(sbi); in f2fs_fill_super()
4951 kvfree(sbi->ckpt); in f2fs_fill_super()
4953 make_bad_inode(sbi->meta_inode); in f2fs_fill_super()
4954 iput(sbi->meta_inode); in f2fs_fill_super()
4955 sbi->meta_inode = NULL; in f2fs_fill_super()
4957 f2fs_destroy_page_array_cache(sbi); in f2fs_fill_super()
4959 f2fs_destroy_xattr_caches(sbi); in f2fs_fill_super()
4961 destroy_percpu_info(sbi); in f2fs_fill_super()
4963 f2fs_destroy_iostat(sbi); in f2fs_fill_super()
4966 kvfree(sbi->write_io[i]); in f2fs_fill_super()
4975 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_fill_super()
4977 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_fill_super()
4982 kfree(sbi); in f2fs_fill_super()
5002 struct f2fs_sb_info *sbi = F2FS_SB(sb); in kill_f2fs_super() local
5005 sbi->umount_lock_holder = current; in kill_f2fs_super()
5007 set_sbi_flag(sbi, SBI_IS_CLOSE); in kill_f2fs_super()
5008 f2fs_stop_gc_thread(sbi); in kill_f2fs_super()
5009 f2fs_stop_discard_thread(sbi); in kill_f2fs_super()
5016 if (test_opt(sbi, COMPRESS_CACHE)) in kill_f2fs_super()
5017 truncate_inode_pages_final(COMPRESS_MAPPING(sbi)); in kill_f2fs_super()
5020 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in kill_f2fs_super()
5021 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in kill_f2fs_super()
5025 stat_inc_cp_call_count(sbi, TOTAL_CALL); in kill_f2fs_super()
5026 f2fs_write_checkpoint(sbi, &cpc); in kill_f2fs_super()
5029 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb)) in kill_f2fs_super()
5034 if (sbi) { in kill_f2fs_super()
5035 destroy_device_list(sbi); in kill_f2fs_super()
5036 kfree(sbi); in kill_f2fs_super()