Lines Matching full:if
44 if (!ret) in f2fs_filemap_fault()
62 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_vm_page_mkwrite()
67 if (!f2fs_is_checkpoint_ready(sbi)) { in f2fs_vm_page_mkwrite()
73 if (f2fs_compressed_file(inode)) { in f2fs_vm_page_mkwrite()
76 if (ret < 0) { in f2fs_vm_page_mkwrite()
79 } else if (ret) { in f2fs_vm_page_mkwrite()
80 if (ret < F2FS_I(inode)->i_cluster_size) { in f2fs_vm_page_mkwrite()
89 if (need_alloc) in f2fs_vm_page_mkwrite()
99 if (unlikely(page->mapping != inode->i_mapping || in f2fs_vm_page_mkwrite()
107 if (need_alloc) { in f2fs_vm_page_mkwrite()
117 if (!need_alloc) { in f2fs_vm_page_mkwrite()
123 if (err) { in f2fs_vm_page_mkwrite()
134 * check to see if the page is mapped already (no holes) in f2fs_vm_page_mkwrite()
136 if (PageMappedToDisk(page)) in f2fs_vm_page_mkwrite()
140 if (((loff_t)(page->index + 1) << PAGE_SHIFT) > in f2fs_vm_page_mkwrite()
148 if (!PageUptodate(page)) in f2fs_vm_page_mkwrite()
178 if (!dentry) in get_parent_ino()
191 if (!S_ISREG(inode->i_mode)) in need_do_checkpoint()
193 else if (f2fs_compressed_file(inode)) in need_do_checkpoint()
195 else if (inode->i_nlink != 1) in need_do_checkpoint()
197 else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) in need_do_checkpoint()
199 else if (file_wrong_pino(inode)) in need_do_checkpoint()
201 else if (!f2fs_space_for_roll_forward(sbi)) in need_do_checkpoint()
203 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) in need_do_checkpoint()
205 else if (test_opt(sbi, FASTBOOT)) in need_do_checkpoint()
207 else if (F2FS_OPTION(sbi).active_logs == 2) in need_do_checkpoint()
209 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && in need_do_checkpoint()
223 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) in need_inode_page_update()
235 if (file_wrong_pino(inode) && inode->i_nlink == 1 && in try_to_fix_pino()
258 if (unlikely(f2fs_readonly(inode->i_sb) || in f2fs_do_sync_file()
264 if (S_ISDIR(inode->i_mode)) in f2fs_do_sync_file()
267 /* if fdatasync is triggered, let's do in-place-update */ in f2fs_do_sync_file()
268 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) in f2fs_do_sync_file()
273 if (ret) { in f2fs_do_sync_file()
278 /* if the inode is dirty, let's recover all the time */ in f2fs_do_sync_file()
279 if (!f2fs_skip_inode_update(inode, datasync)) { in f2fs_do_sync_file()
285 * if there is no written data, don't waste time to write recovery info. in f2fs_do_sync_file()
287 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && in f2fs_do_sync_file()
291 if (need_inode_page_update(sbi, ino)) in f2fs_do_sync_file()
294 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || in f2fs_do_sync_file()
308 if (cp_reason) { in f2fs_do_sync_file()
325 if (ret) in f2fs_do_sync_file()
328 /* if cp_error was enabled, we should avoid infinite loop */ in f2fs_do_sync_file()
329 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_do_sync_file()
334 if (f2fs_need_inode_block_update(sbi, ino)) { in f2fs_do_sync_file()
341 * If it's atomic_write, it's just fine to keep write ordering. So in f2fs_do_sync_file()
343 * node chain which serializes node blocks. If one of node writes are in f2fs_do_sync_file()
348 if (!atomic) { in f2fs_do_sync_file()
350 if (ret) in f2fs_do_sync_file()
358 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) in f2fs_do_sync_file()
360 if (!ret) { in f2fs_do_sync_file()
374 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) in f2fs_sync_file()
384 if (__is_valid_data_blkaddr(blkaddr)) in __found_offset()
386 if (blkaddr == NEW_ADDR && in __found_offset()
391 if (blkaddr == NULL_ADDR) in __found_offset()
411 if (offset >= isize) in f2fs_seek_block()
415 if (f2fs_has_inline_data(inode) && whence == SEEK_HOLE) { in f2fs_seek_block()
425 if (err && err != -ENOENT) { in f2fs_seek_block()
427 } else if (err == -ENOENT) { in f2fs_seek_block()
429 if (whence == SEEK_DATA) { in f2fs_seek_block()
447 if (__is_valid_data_blkaddr(blkaddr) && in f2fs_seek_block()
454 if (__found_offset(file->f_mapping, blkaddr, in f2fs_seek_block()
463 if (whence == SEEK_DATA) in f2fs_seek_block()
466 if (whence == SEEK_HOLE && data_ofs > isize) in f2fs_seek_block()
488 if (offset < 0) in f2fs_llseek()
501 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_file_mmap()
504 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_mmap()
509 if (err) in f2fs_file_mmap()
522 if (err) in f2fs_file_open()
525 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_open()
529 if (err) in f2fs_file_open()
549 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) in f2fs_truncate_data_blocks_range()
559 if (f2fs_compressed_file(dn->inode) && in f2fs_truncate_data_blocks_range()
561 if (compressed_cluster) in f2fs_truncate_data_blocks_range()
568 if (blkaddr == NULL_ADDR) in f2fs_truncate_data_blocks_range()
574 if (__is_valid_data_blkaddr(blkaddr)) { in f2fs_truncate_data_blocks_range()
575 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, in f2fs_truncate_data_blocks_range()
578 if (compressed_cluster) in f2fs_truncate_data_blocks_range()
582 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) in f2fs_truncate_data_blocks_range()
587 if (!released || blkaddr != COMPRESS_ADDR) in f2fs_truncate_data_blocks_range()
591 if (compressed_cluster) in f2fs_truncate_data_blocks_range()
594 if (nr_free) { in f2fs_truncate_data_blocks_range()
625 if (!offset && !cache_only) in truncate_partial_data_page()
628 if (cache_only) { in truncate_partial_data_page()
630 if (page && PageUptodate(page)) in truncate_partial_data_page()
637 if (IS_ERR(page)) in truncate_partial_data_page()
645 if (!cache_only) in truncate_partial_data_page()
664 if (free_from >= sbi->max_file_blocks) in f2fs_do_truncate_blocks()
667 if (lock) in f2fs_do_truncate_blocks()
671 if (IS_ERR(ipage)) { in f2fs_do_truncate_blocks()
676 if (f2fs_has_inline_data(inode)) { in f2fs_do_truncate_blocks()
685 if (err) { in f2fs_do_truncate_blocks()
686 if (err == -ENOENT) in f2fs_do_truncate_blocks()
696 if (dn.ofs_in_node || IS_INODE(dn.node_page)) { in f2fs_do_truncate_blocks()
705 if (lock) in f2fs_do_truncate_blocks()
709 if (!err) in f2fs_do_truncate_blocks()
726 if (f2fs_compressed_file(inode)) in f2fs_truncate_blocks()
732 if (err) in f2fs_truncate_blocks()
736 if (from != free_from) { in f2fs_truncate_blocks()
738 if (err) in f2fs_truncate_blocks()
750 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_truncate()
753 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in f2fs_truncate()
759 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { in f2fs_truncate()
765 if (!f2fs_may_inline_data(inode)) { in f2fs_truncate()
767 if (err) in f2fs_truncate()
772 if (err) in f2fs_truncate()
788 if (f2fs_has_extra_attr(inode) && in f2fs_getattr()
797 if (flags & F2FS_COMPR_FL) in f2fs_getattr()
799 if (flags & F2FS_APPEND_FL) in f2fs_getattr()
801 if (IS_ENCRYPTED(inode)) in f2fs_getattr()
803 if (flags & F2FS_IMMUTABLE_FL) in f2fs_getattr()
805 if (flags & F2FS_NODUMP_FL) in f2fs_getattr()
807 if (IS_VERITY(inode)) in f2fs_getattr()
820 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || in f2fs_getattr()
832 if (ia_valid & ATTR_UID) in __setattr_copy()
834 if (ia_valid & ATTR_GID) in __setattr_copy()
836 if (ia_valid & ATTR_ATIME) in __setattr_copy()
838 if (ia_valid & ATTR_MTIME) in __setattr_copy()
840 if (ia_valid & ATTR_CTIME) in __setattr_copy()
842 if (ia_valid & ATTR_MODE) { in __setattr_copy()
845 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) in __setattr_copy()
859 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_setattr()
862 if ((attr->ia_valid & ATTR_SIZE) && in f2fs_setattr()
867 if (err) in f2fs_setattr()
871 if (err) in f2fs_setattr()
875 if (err) in f2fs_setattr()
878 if (is_quota_modification(inode, attr)) { in f2fs_setattr()
880 if (err) in f2fs_setattr()
883 if ((attr->ia_valid & ATTR_UID && in f2fs_setattr()
889 if (err) { in f2fs_setattr()
899 if (attr->ia_valid & ATTR_UID) in f2fs_setattr()
901 if (attr->ia_valid & ATTR_GID) in f2fs_setattr()
907 if (attr->ia_valid & ATTR_SIZE) { in f2fs_setattr()
910 if (attr->ia_size > MAX_INLINE_DATA(inode)) { in f2fs_setattr()
916 if (err) in f2fs_setattr()
925 if (attr->ia_size <= old_size) in f2fs_setattr()
928 * do not trim all blocks after i_size if target size is in f2fs_setattr()
933 if (err) in f2fs_setattr()
944 if (attr->ia_valid & ATTR_MODE) { in f2fs_setattr()
946 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { in f2fs_setattr()
976 if (!len) in fill_zero()
985 if (IS_ERR(page)) in fill_zero()
1005 if (err) { in f2fs_truncate_hole()
1006 if (err == -ENOENT) { in f2fs_truncate_hole()
1034 if (ret) in punch_hole()
1043 if (pg_start == pg_end) { in punch_hole()
1046 if (ret) in punch_hole()
1049 if (off_start) { in punch_hole()
1052 if (ret) in punch_hole()
1055 if (off_end) { in punch_hole()
1057 if (ret) in punch_hole()
1061 if (pg_start < pg_end) { in punch_hole()
1099 if (ret && ret != -ENOENT) { in __read_out_blkaddrs()
1101 } else if (ret == -ENOENT) { in __read_out_blkaddrs()
1102 if (dn.max_level == 0) in __read_out_blkaddrs()
1116 if (__is_valid_data_blkaddr(*blkaddr) && in __read_out_blkaddrs()
1123 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { in __read_out_blkaddrs()
1125 if (f2fs_lfs_mode(sbi)) { in __read_out_blkaddrs()
1139 if (len) in __read_out_blkaddrs()
1152 if (*do_replace == 0) in __roll_back_blkaddrs()
1157 if (ret) { in __roll_back_blkaddrs()
1177 if (blkaddr[i] == NULL_ADDR && !full) { in __clone_blkaddrs()
1182 if (do_replace[i] || blkaddr[i] == NULL_ADDR) { in __clone_blkaddrs()
1190 if (ret) in __clone_blkaddrs()
1194 if (ret) { in __clone_blkaddrs()
1206 if (do_replace[i]) { in __clone_blkaddrs()
1219 if (dst_inode->i_size < new_size) in __clone_blkaddrs()
1229 if (IS_ERR(psrc)) in __clone_blkaddrs()
1233 if (IS_ERR(pdst)) { in __clone_blkaddrs()
1244 if (ret) in __clone_blkaddrs()
1267 if (!src_blkaddr) in __exchange_data_block()
1273 if (!do_replace) { in __exchange_data_block()
1280 if (ret) in __exchange_data_block()
1285 if (ret) in __exchange_data_block()
1334 if (offset + len >= i_size_read(inode)) in f2fs_collapse_range()
1338 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) in f2fs_collapse_range()
1342 if (ret) in f2fs_collapse_range()
1347 if (ret) in f2fs_collapse_range()
1351 if (ret) in f2fs_collapse_range()
1354 /* write out all moved pages, if possible */ in f2fs_collapse_range()
1362 if (!ret) in f2fs_collapse_range()
1377 if (f2fs_data_blkaddr(dn) == NULL_ADDR) in f2fs_do_zero_range()
1383 if (ret) in f2fs_do_zero_range()
1393 if (dn->data_blkaddr == NULL_ADDR) { in f2fs_do_zero_range()
1397 if (dn->data_blkaddr != NEW_ADDR) { in f2fs_do_zero_range()
1420 if (ret) in f2fs_zero_range()
1424 if (ret) in f2fs_zero_range()
1428 if (ret) in f2fs_zero_range()
1437 if (pg_start == pg_end) { in f2fs_zero_range()
1440 if (ret) in f2fs_zero_range()
1445 if (off_start) { in f2fs_zero_range()
1448 if (ret) in f2fs_zero_range()
1471 if (ret) { in f2fs_zero_range()
1490 if (ret) in f2fs_zero_range()
1498 if (off_end) { in f2fs_zero_range()
1500 if (ret) in f2fs_zero_range()
1508 if (new_size > i_size_read(inode)) { in f2fs_zero_range()
1509 if (mode & FALLOC_FL_KEEP_SIZE) in f2fs_zero_range()
1526 if (ret) in f2fs_insert_range()
1529 if (offset >= i_size_read(inode)) in f2fs_insert_range()
1533 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) in f2fs_insert_range()
1537 if (ret) in f2fs_insert_range()
1545 if (ret) in f2fs_insert_range()
1550 if (ret) in f2fs_insert_range()
1565 if (nr > delta) in f2fs_insert_range()
1579 /* write out all moved pages, if possible */ in f2fs_insert_range()
1585 if (!ret) in f2fs_insert_range()
1603 if (err) in expand_inode_data()
1607 if (err) in expand_inode_data()
1617 if (off_end) in expand_inode_data()
1620 if (!map.m_len) in expand_inode_data()
1623 if (f2fs_is_pinned_file(inode)) { in expand_inode_data()
1628 if (map.m_len % sbi->blocks_per_seg) in expand_inode_data()
1633 if (has_not_enough_free_secs(sbi, 0, in expand_inode_data()
1637 if (err && err != -ENODATA && err != -EAGAIN) in expand_inode_data()
1655 if (!err && len) in expand_inode_data()
1663 if (err) { in expand_inode_data()
1666 if (!map.m_len) in expand_inode_data()
1678 if (new_size > i_size_read(inode)) { in expand_inode_data()
1679 if (mode & FALLOC_FL_KEEP_SIZE) in expand_inode_data()
1694 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) in f2fs_fallocate()
1696 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) in f2fs_fallocate()
1698 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_fallocate()
1702 if (!S_ISREG(inode->i_mode)) in f2fs_fallocate()
1705 if (IS_ENCRYPTED(inode) && in f2fs_fallocate()
1709 if (f2fs_compressed_file(inode) && in f2fs_fallocate()
1714 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | in f2fs_fallocate()
1721 if (mode & FALLOC_FL_PUNCH_HOLE) { in f2fs_fallocate()
1722 if (offset >= inode->i_size) in f2fs_fallocate()
1726 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { in f2fs_fallocate()
1728 } else if (mode & FALLOC_FL_ZERO_RANGE) { in f2fs_fallocate()
1730 } else if (mode & FALLOC_FL_INSERT_RANGE) { in f2fs_fallocate()
1736 if (!ret) { in f2fs_fallocate()
1755 if (!(filp->f_mode & FMODE_WRITE) || in f2fs_release_file()
1760 if (f2fs_is_atomic_file(inode)) in f2fs_release_file()
1762 if (f2fs_is_volatile_file(inode)) { in f2fs_release_file()
1777 * If the process doing a transaction is crashed, we should do in f2fs_file_flush()
1782 if (f2fs_is_atomic_file(inode) && in f2fs_file_flush()
1796 if (IS_NOQUOTA(inode)) in f2fs_setflags_common()
1799 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { in f2fs_setflags_common()
1800 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) in f2fs_setflags_common()
1802 if (!f2fs_empty_dir(inode)) in f2fs_setflags_common()
1806 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) { in f2fs_setflags_common()
1807 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_setflags_common()
1809 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL)) in f2fs_setflags_common()
1813 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { in f2fs_setflags_common()
1814 if (masked_flags & F2FS_COMPR_FL) { in f2fs_setflags_common()
1815 if (!f2fs_disable_compressed_file(inode)) in f2fs_setflags_common()
1818 if (iflags & F2FS_NOCOMP_FL) in f2fs_setflags_common()
1820 if (iflags & F2FS_COMPR_FL) { in f2fs_setflags_common()
1821 if (!f2fs_may_compress(inode)) in f2fs_setflags_common()
1823 if (S_ISREG(inode->i_mode) && inode->i_size) in f2fs_setflags_common()
1829 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) { in f2fs_setflags_common()
1830 if (masked_flags & F2FS_COMPR_FL) in f2fs_setflags_common()
1838 if (fi->i_flags & F2FS_PROJINHERIT_FL) in f2fs_setflags_common()
1911 if (iflags & f2fs_fsflags_map[i].iflag) in f2fs_iflags_to_fsflags()
1924 if (fsflags & f2fs_fsflags_map[i].fsflag) in f2fs_fsflags_to_iflags()
1936 if (IS_ENCRYPTED(inode)) in f2fs_ioc_getflags()
1938 if (IS_VERITY(inode)) in f2fs_ioc_getflags()
1940 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) in f2fs_ioc_getflags()
1942 if (is_inode_flag_set(inode, FI_PIN_FILE)) in f2fs_ioc_getflags()
1958 if (!inode_owner_or_capable(inode)) in f2fs_ioc_setflags()
1961 if (get_user(fsflags, (int __user *)arg)) in f2fs_ioc_setflags()
1964 if (fsflags & ~F2FS_GETTABLE_FS_FL) in f2fs_ioc_setflags()
1969 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) in f2fs_ioc_setflags()
1973 if (ret) in f2fs_ioc_setflags()
1980 if (ret) in f2fs_ioc_setflags()
2005 if (!inode_owner_or_capable(inode)) in f2fs_ioc_start_atomic_write()
2008 if (!S_ISREG(inode->i_mode)) in f2fs_ioc_start_atomic_write()
2011 if (filp->f_flags & O_DIRECT) in f2fs_ioc_start_atomic_write()
2015 if (ret) in f2fs_ioc_start_atomic_write()
2022 if (f2fs_is_atomic_file(inode)) { in f2fs_ioc_start_atomic_write()
2023 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) in f2fs_ioc_start_atomic_write()
2029 if (ret) in f2fs_ioc_start_atomic_write()
2038 if (get_dirty_pages(inode)) in f2fs_ioc_start_atomic_write()
2042 if (ret) { in f2fs_ioc_start_atomic_write()
2048 if (list_empty(&fi->inmem_ilist)) in f2fs_ioc_start_atomic_write()
2072 if (!inode_owner_or_capable(inode)) in f2fs_ioc_commit_atomic_write()
2076 if (ret) in f2fs_ioc_commit_atomic_write()
2083 if (f2fs_is_volatile_file(inode)) { in f2fs_ioc_commit_atomic_write()
2088 if (f2fs_is_atomic_file(inode)) { in f2fs_ioc_commit_atomic_write()
2090 if (ret) in f2fs_ioc_commit_atomic_write()
2094 if (!ret) in f2fs_ioc_commit_atomic_write()
2100 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { in f2fs_ioc_commit_atomic_write()
2114 if (!inode_owner_or_capable(inode)) in f2fs_ioc_start_volatile_write()
2117 if (!S_ISREG(inode->i_mode)) in f2fs_ioc_start_volatile_write()
2121 if (ret) in f2fs_ioc_start_volatile_write()
2126 if (f2fs_is_volatile_file(inode)) in f2fs_ioc_start_volatile_write()
2130 if (ret) in f2fs_ioc_start_volatile_write()
2149 if (!inode_owner_or_capable(inode)) in f2fs_ioc_release_volatile_write()
2153 if (ret) in f2fs_ioc_release_volatile_write()
2158 if (!f2fs_is_volatile_file(inode)) in f2fs_ioc_release_volatile_write()
2161 if (!f2fs_is_first_block_written(inode)) { in f2fs_ioc_release_volatile_write()
2178 if (!inode_owner_or_capable(inode)) in f2fs_ioc_abort_volatile_write()
2182 if (ret) in f2fs_ioc_abort_volatile_write()
2187 if (f2fs_is_atomic_file(inode)) in f2fs_ioc_abort_volatile_write()
2189 if (f2fs_is_volatile_file(inode)) { in f2fs_ioc_abort_volatile_write()
2212 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_shutdown()
2215 if (get_user(in, (__u32 __user *)arg)) in f2fs_ioc_shutdown()
2218 if (in != F2FS_GOING_DOWN_FULLSYNC) { in f2fs_ioc_shutdown()
2220 if (ret) { in f2fs_ioc_shutdown()
2221 if (ret == -EROFS) { in f2fs_ioc_shutdown()
2234 if (IS_ERR(sb)) { in f2fs_ioc_shutdown()
2238 if (sb) { in f2fs_ioc_shutdown()
2247 if (ret) in f2fs_ioc_shutdown()
2281 if (in != F2FS_GOING_DOWN_FULLSYNC) in f2fs_ioc_shutdown()
2297 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_fitrim()
2300 if (!f2fs_hw_support_discard(F2FS_SB(sb))) in f2fs_ioc_fitrim()
2303 if (copy_from_user(&range, (struct fstrim_range __user *)arg, in f2fs_ioc_fitrim()
2308 if (ret) in f2fs_ioc_fitrim()
2315 if (ret < 0) in f2fs_ioc_fitrim()
2318 if (copy_to_user((struct fstrim_range __user *)arg, &range, in f2fs_ioc_fitrim()
2330 if (u[i]) in uuid_is_nonzero()
2339 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) in f2fs_ioc_set_encryption_policy()
2349 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_get_encryption_policy()
2360 if (!f2fs_sb_has_encrypt(sbi)) in f2fs_ioc_get_encryption_pwsalt()
2364 if (err) in f2fs_ioc_get_encryption_pwsalt()
2369 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) in f2fs_ioc_get_encryption_pwsalt()
2376 if (err) { in f2fs_ioc_get_encryption_pwsalt()
2382 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, in f2fs_ioc_get_encryption_pwsalt()
2394 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_get_encryption_policy_ex()
2402 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_add_encryption_key()
2410 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_remove_encryption_key()
2419 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_remove_encryption_key_all_users()
2428 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_get_encryption_key_status()
2436 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_get_encryption_nonce()
2449 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_gc()
2452 if (get_user(sync, (__u32 __user *)arg)) in f2fs_ioc_gc()
2455 if (f2fs_readonly(sbi->sb)) in f2fs_ioc_gc()
2459 if (ret) in f2fs_ioc_gc()
2462 if (!sync) { in f2fs_ioc_gc()
2463 if (!down_write_trylock(&sbi->gc_lock)) { in f2fs_ioc_gc()
2485 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_gc_range()
2488 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, in f2fs_ioc_gc_range()
2492 if (f2fs_readonly(sbi->sb)) in f2fs_ioc_gc_range()
2496 if (end < range.start || range.start < MAIN_BLKADDR(sbi) || in f2fs_ioc_gc_range()
2501 if (ret) in f2fs_ioc_gc_range()
2505 if (!range.sync) { in f2fs_ioc_gc_range()
2506 if (!down_write_trylock(&sbi->gc_lock)) { in f2fs_ioc_gc_range()
2515 if (ret) { in f2fs_ioc_gc_range()
2516 if (ret == -EBUSY) in f2fs_ioc_gc_range()
2521 if (range.start <= end) in f2fs_ioc_gc_range()
2534 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_write_checkpoint()
2537 if (f2fs_readonly(sbi->sb)) in f2fs_ioc_write_checkpoint()
2540 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_ioc_write_checkpoint()
2546 if (ret) in f2fs_ioc_write_checkpoint()
2571 /* if in-place-update policy is enabled, don't waste time here */ in f2fs_defragment_range()
2572 if (f2fs_should_update_inplace(inode, NULL)) in f2fs_defragment_range()
2585 if (err) in f2fs_defragment_range()
2589 * lookup mapping info in extent cache, skip defragmenting if physical in f2fs_defragment_range()
2592 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { in f2fs_defragment_range()
2593 if (ei.fofs + ei.len >= pg_end) in f2fs_defragment_range()
2601 * lookup mapping info in dnode page cache, skip defragmenting if all in f2fs_defragment_range()
2602 * physical block addresses are continuous even if there are hole(s) in f2fs_defragment_range()
2608 if (err) in f2fs_defragment_range()
2611 if (!(map.m_flags & F2FS_MAP_FLAGS)) { in f2fs_defragment_range()
2616 if (blk_end && blk_end != map.m_pblk) in f2fs_defragment_range()
2627 if (!fragmented) { in f2fs_defragment_range()
2639 if (has_not_enough_free_secs(sbi, 0, sec_num)) { in f2fs_defragment_range()
2655 if (err) in f2fs_defragment_range()
2658 if (!(map.m_flags & F2FS_MAP_FLAGS)) { in f2fs_defragment_range()
2670 if (IS_ERR(page)) { in f2fs_defragment_range()
2685 if (map.m_lblk < pg_end && cnt < blk_per_seg) in f2fs_defragment_range()
2691 if (err) in f2fs_defragment_range()
2698 if (!err) in f2fs_defragment_range()
2710 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_defragment()
2713 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) in f2fs_ioc_defragment()
2716 if (f2fs_readonly(sbi->sb)) in f2fs_ioc_defragment()
2719 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, in f2fs_ioc_defragment()
2724 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) in f2fs_ioc_defragment()
2727 if (unlikely((range.start + range.len) >> PAGE_SHIFT > in f2fs_ioc_defragment()
2732 if (err) in f2fs_ioc_defragment()
2739 if (err < 0) in f2fs_ioc_defragment()
2742 if (copy_to_user((struct f2fs_defragment __user *)arg, &range, in f2fs_ioc_defragment()
2759 if (file_in->f_path.mnt != file_out->f_path.mnt || in f2fs_move_file_range()
2763 if (unlikely(f2fs_readonly(src->i_sb))) in f2fs_move_file_range()
2766 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) in f2fs_move_file_range()
2769 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) in f2fs_move_file_range()
2772 if (pos_out < 0 || pos_in < 0) in f2fs_move_file_range()
2775 if (src == dst) { in f2fs_move_file_range()
2776 if (pos_in == pos_out) in f2fs_move_file_range()
2778 if (pos_out > pos_in && pos_out < pos_in + len) in f2fs_move_file_range()
2783 if (src != dst) { in f2fs_move_file_range()
2785 if (!inode_trylock(dst)) in f2fs_move_file_range()
2790 if (pos_in + len > src->i_size || pos_in + len < pos_in) in f2fs_move_file_range()
2792 if (len == 0) in f2fs_move_file_range()
2794 if (pos_in + len == src->i_size) in f2fs_move_file_range()
2796 if (len == 0) { in f2fs_move_file_range()
2802 if (pos_out + olen > dst->i_size) in f2fs_move_file_range()
2806 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || in f2fs_move_file_range()
2812 if (ret) in f2fs_move_file_range()
2816 if (ret) in f2fs_move_file_range()
2822 if (ret) in f2fs_move_file_range()
2827 if (ret) in f2fs_move_file_range()
2833 if (src != dst) { in f2fs_move_file_range()
2835 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) in f2fs_move_file_range()
2844 if (!ret) { in f2fs_move_file_range()
2845 if (dst_max_i_size) in f2fs_move_file_range()
2847 else if (dst_osize != dst->i_size) in f2fs_move_file_range()
2852 if (src != dst) in f2fs_move_file_range()
2857 if (src != dst) in f2fs_move_file_range()
2870 if (!(filp->f_mode & FMODE_READ) || in f2fs_ioc_move_range()
2874 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, in f2fs_ioc_move_range()
2879 if (!dst.file) in f2fs_ioc_move_range()
2882 if (!(dst.file->f_mode & FMODE_WRITE)) { in f2fs_ioc_move_range()
2888 if (err) in f2fs_ioc_move_range()
2895 if (err) in f2fs_ioc_move_range()
2898 if (copy_to_user((struct f2fs_move_range __user *)arg, in f2fs_ioc_move_range()
2916 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_flush_device()
2919 if (f2fs_readonly(sbi->sb)) in f2fs_ioc_flush_device()
2922 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_ioc_flush_device()
2925 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, in f2fs_ioc_flush_device()
2929 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || in f2fs_ioc_flush_device()
2937 if (ret) in f2fs_ioc_flush_device()
2940 if (range.dev_num != 0) in f2fs_ioc_flush_device()
2945 if (start_segno < dev_start_segno || start_segno >= dev_end_segno) in f2fs_ioc_flush_device()
2950 if (!down_write_trylock(&sbi->gc_lock)) { in f2fs_ioc_flush_device()
2958 if (ret == -EAGAIN) in f2fs_ioc_flush_device()
2960 else if (ret < 0) in f2fs_ioc_flush_device()
2989 if (!IS_ERR(transfer_to[PRJQUOTA])) { in f2fs_transfer_project_quota()
2991 if (err) in f2fs_transfer_project_quota()
3007 if (!f2fs_sb_has_project_quota(sbi)) { in f2fs_ioc_setproject()
3008 if (projid != F2FS_DEF_PROJID) in f2fs_ioc_setproject()
3014 if (!f2fs_has_extra_attr(inode)) in f2fs_ioc_setproject()
3019 if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) in f2fs_ioc_setproject()
3024 if (IS_NOQUOTA(inode)) in f2fs_ioc_setproject()
3028 if (IS_ERR(ipage)) in f2fs_ioc_setproject()
3031 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, in f2fs_ioc_setproject()
3040 if (err) in f2fs_ioc_setproject()
3045 if (err) in f2fs_ioc_setproject()
3063 if (projid != F2FS_DEF_PROJID) in f2fs_ioc_setproject()
3104 if (iflags & f2fs_xflags_map[i].iflag) in f2fs_iflags_to_xflags()
3117 if (xflags & f2fs_xflags_map[i].xflag) in f2fs_xflags_to_iflags()
3129 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) in f2fs_fill_fsxattr()
3140 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) in f2fs_ioc_fsgetxattr()
3152 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) in f2fs_ioc_fssetxattr()
3156 if (!inode_owner_or_capable(inode)) in f2fs_ioc_fssetxattr()
3159 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) in f2fs_ioc_fssetxattr()
3163 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) in f2fs_ioc_fssetxattr()
3167 if (err) in f2fs_ioc_fssetxattr()
3174 if (err) in f2fs_ioc_fssetxattr()
3179 if (err) in f2fs_ioc_fssetxattr()
3195 if (inc) in f2fs_pin_file_control()
3199 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { in f2fs_pin_file_control()
3215 if (get_user(pin, (__u32 __user *)arg)) in f2fs_ioc_set_pin_file()
3218 if (!S_ISREG(inode->i_mode)) in f2fs_ioc_set_pin_file()
3221 if (f2fs_readonly(F2FS_I_SB(inode)->sb)) in f2fs_ioc_set_pin_file()
3225 if (ret) in f2fs_ioc_set_pin_file()
3230 if (f2fs_should_update_outplace(inode, NULL)) { in f2fs_ioc_set_pin_file()
3235 if (!pin) { in f2fs_ioc_set_pin_file()
3241 if (f2fs_pin_file_control(inode, false)) { in f2fs_ioc_set_pin_file()
3247 if (ret) in f2fs_ioc_set_pin_file()
3250 if (!f2fs_disable_compressed_file(inode)) { in f2fs_ioc_set_pin_file()
3270 if (is_inode_flag_set(inode, FI_PIN_FILE)) in f2fs_ioc_get_pin_file()
3283 if (is_inode_flag_set(inode, FI_NO_EXTENT)) in f2fs_precache_extents()
3299 if (err) in f2fs_precache_extents()
3318 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_resize_fs()
3321 if (f2fs_readonly(sbi->sb)) in f2fs_ioc_resize_fs()
3324 if (copy_from_user(&block_count, (void __user *)arg, in f2fs_ioc_resize_fs()
3337 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { in f2fs_ioc_enable_verity()
3349 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) in f2fs_ioc_measure_verity()
3364 if (!vbuf) in f2fs_ioc_getfslabel()
3373 if (copy_to_user((char __user *)arg, vbuf, in f2fs_ioc_getfslabel()
3388 if (!capable(CAP_SYS_ADMIN)) in f2fs_ioc_setfslabel()
3392 if (IS_ERR(vbuf)) in f2fs_ioc_setfslabel()
3396 if (err) in f2fs_ioc_setfslabel()
3422 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_get_compress_blocks()
3425 if (!f2fs_compressed_file(inode)) in f2fs_get_compress_blocks()
3444 if (!__is_valid_data_blkaddr(blkaddr)) in release_compress_blocks()
3446 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, in release_compress_blocks()
3457 if (i == 0) { in release_compress_blocks()
3458 if (blkaddr == COMPRESS_ADDR) in release_compress_blocks()
3464 if (__is_valid_data_blkaddr(blkaddr)) in release_compress_blocks()
3467 if (blkaddr != NEW_ADDR) in release_compress_blocks()
3495 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_release_compress_blocks()
3498 if (!f2fs_compressed_file(inode)) in f2fs_release_compress_blocks()
3501 if (f2fs_readonly(sbi->sb)) in f2fs_release_compress_blocks()
3505 if (ret) in f2fs_release_compress_blocks()
3513 if ((filp->f_mode & FMODE_WRITE && writecount != 1) || in f2fs_release_compress_blocks()
3519 if (IS_IMMUTABLE(inode)) { in f2fs_release_compress_blocks()
3525 if (ret) in f2fs_release_compress_blocks()
3533 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) in f2fs_release_compress_blocks()
3547 if (ret) { in f2fs_release_compress_blocks()
3548 if (ret == -ENOENT) { in f2fs_release_compress_blocks()
3565 if (ret < 0) in f2fs_release_compress_blocks()
3579 if (ret >= 0) { in f2fs_release_compress_blocks()
3581 } else if (released_blocks && in f2fs_release_compress_blocks()
3607 if (!__is_valid_data_blkaddr(blkaddr)) in reserve_compress_blocks()
3609 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, in reserve_compress_blocks()
3622 if (i == 0) { in reserve_compress_blocks()
3623 if (blkaddr == COMPRESS_ADDR) in reserve_compress_blocks()
3629 if (__is_valid_data_blkaddr(blkaddr)) { in reserve_compress_blocks()
3640 if (ret) in reserve_compress_blocks()
3643 if (reserved != cluster_size - compr_blocks) in reserve_compress_blocks()
3664 if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) in f2fs_reserve_compress_blocks()
3667 if (!f2fs_compressed_file(inode)) in f2fs_reserve_compress_blocks()
3670 if (f2fs_readonly(sbi->sb)) in f2fs_reserve_compress_blocks()
3674 if (ret) in f2fs_reserve_compress_blocks()
3677 if (atomic_read(&F2FS_I(inode)->i_compr_blocks)) in f2fs_reserve_compress_blocks()
3684 if (!IS_IMMUTABLE(inode)) { in f2fs_reserve_compress_blocks()
3700 if (ret) { in f2fs_reserve_compress_blocks()
3701 if (ret == -ENOENT) { in f2fs_reserve_compress_blocks()
3718 if (ret < 0) in f2fs_reserve_compress_blocks()
3728 if (ret >= 0) { in f2fs_reserve_compress_blocks()
3739 if (ret >= 0) { in f2fs_reserve_compress_blocks()
3741 } else if (reserved_blocks && in f2fs_reserve_compress_blocks()
3763 if (!q) in f2fs_secure_erase()
3766 if (flags & F2FS_TRIM_FILE_DISCARD) in f2fs_secure_erase()
3771 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { in f2fs_secure_erase()
3772 if (IS_ENCRYPTED(inode)) in f2fs_secure_erase()
3795 if (!(filp->f_mode & FMODE_WRITE)) in f2fs_sec_trim_file()
3798 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg, in f2fs_sec_trim_file()
3802 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || in f2fs_sec_trim_file()
3806 if (((range.flags & F2FS_TRIM_FILE_DISCARD) && in f2fs_sec_trim_file()
3815 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || in f2fs_sec_trim_file()
3821 if (range.len == 0) in f2fs_sec_trim_file()
3824 if (inode->i_size - range.start > range.len) { in f2fs_sec_trim_file()
3832 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || in f2fs_sec_trim_file()
3842 if (ret) in f2fs_sec_trim_file()
3850 if (ret) in f2fs_sec_trim_file()
3863 if (ret) { in f2fs_sec_trim_file()
3864 if (ret == -ENOENT) { in f2fs_sec_trim_file()
3877 if (!__is_valid_data_blkaddr(blkaddr)) in f2fs_sec_trim_file()
3880 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, in f2fs_sec_trim_file()
3888 if (f2fs_is_multi_device(sbi)) { in f2fs_sec_trim_file()
3894 if (len) { in f2fs_sec_trim_file()
3895 if (prev_bdev == cur_bdev && in f2fs_sec_trim_file()
3903 if (ret) { in f2fs_sec_trim_file()
3912 if (!len) { in f2fs_sec_trim_file()
3922 if (fatal_signal_pending(current)) { in f2fs_sec_trim_file()
3929 if (len) in f2fs_sec_trim_file()
3944 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) in f2fs_ioctl()
3946 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) in f2fs_ioctl()
4041 if (!f2fs_is_compress_backend_ready(inode)) in f2fs_file_read_iter()
4046 if (ret > 0) in f2fs_file_read_iter()
4058 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { in f2fs_file_write_iter()
4063 if (!f2fs_is_compress_backend_ready(inode)) { in f2fs_file_write_iter()
4068 if (iocb->ki_flags & IOCB_NOWAIT) { in f2fs_file_write_iter()
4069 if (!inode_trylock(inode)) { in f2fs_file_write_iter()
4078 if (ret > 0) { in f2fs_file_write_iter()
4083 if (iov_iter_fault_in_readable(from, iov_iter_count(from))) in f2fs_file_write_iter()
4086 if ((iocb->ki_flags & IOCB_NOWAIT)) { in f2fs_file_write_iter()
4087 if (!f2fs_overwrite_io(inode, iocb->ki_pos, in f2fs_file_write_iter()
4099 if (is_inode_flag_set(inode, FI_NO_PREALLOC)) in f2fs_file_write_iter()
4102 if (iocb->ki_flags & IOCB_DIRECT) { in f2fs_file_write_iter()
4108 if (err) in f2fs_file_write_iter()
4111 * If force_buffere_io() is true, we have to allocate in f2fs_file_write_iter()
4115 if (!f2fs_force_buffered_io(inode, iocb, from) && in f2fs_file_write_iter()
4123 if (err) { in f2fs_file_write_iter()
4134 /* if we couldn't write data, we should deallocate blocks. */ in f2fs_file_write_iter()
4135 if (preallocated && i_size_read(inode) < target_size) in f2fs_file_write_iter()
4138 if (ret > 0) in f2fs_file_write_iter()
4145 if (ret > 0) in f2fs_file_write_iter()