Lines Matching +full:foo +full:- +full:queue

1 // SPDX-License-Identifier: GPL-2.0
16 #include <linux/backing-dev.h>
37 #include "disk-io.h"
40 #include "print-tree.h"
41 #include "ordered-data.h"
43 #include "tree-log.h"
47 #include "free-space-cache.h"
48 #include "inode-map.h"
51 #include "delalloc-space.h"
52 #include "block-group.h"
53 #include "space-info.h"
113 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; in btrfs_cleanup_ordered_extents()
115 u64 page_end = page_start + PAGE_SIZE - 1; in btrfs_cleanup_ordered_extents()
120 page = find_get_page(inode->vfs_inode.i_mapping, index); in btrfs_cleanup_ordered_extents()
133 if (page_start >= offset && page_end <= (offset + bytes - 1)) { in btrfs_cleanup_ordered_extents()
135 bytes -= PAGE_SIZE; in btrfs_cleanup_ordered_extents()
193 path->leave_spinning = 1; in insert_inline_extent()
199 leaf = path->nodes[0]; in insert_inline_extent()
200 ei = btrfs_item_ptr(leaf, path->slots[0], in insert_inline_extent()
202 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in insert_inline_extent()
223 compressed_size -= cur_size; in insert_inline_extent()
228 page = find_get_page(inode->i_mapping, in insert_inline_extent()
244 size = ALIGN(size, root->fs_info->sectorsize); in insert_inline_extent()
258 BTRFS_I(inode)->disk_i_size = inode->i_size; in insert_inline_extent()
276 struct btrfs_root *root = inode->root; in cow_file_range_inline()
277 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range_inline()
279 u64 isize = i_size_read(&inode->vfs_inode); in cow_file_range_inline()
281 u64 inline_len = actual_end - start; in cow_file_range_inline()
282 u64 aligned_end = ALIGN(end, fs_info->sectorsize); in cow_file_range_inline()
293 actual_end > fs_info->sectorsize || in cow_file_range_inline()
296 (actual_end & (fs_info->sectorsize - 1)) == 0) || in cow_file_range_inline()
298 data_len > fs_info->max_inline) { in cow_file_range_inline()
304 return -ENOMEM; in cow_file_range_inline()
311 trans->block_rsv = &inode->block_rsv; in cow_file_range_inline()
331 root, &inode->vfs_inode, start, in cow_file_range_inline()
334 if (ret && ret != -ENOSPC) { in cow_file_range_inline()
337 } else if (ret == -ENOSPC) { in cow_file_range_inline()
342 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); in cow_file_range_inline()
343 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); in cow_file_range_inline()
395 BUG_ON(!async_extent); /* -ENOMEM */ in add_async_extent()
396 async_extent->start = start; in add_async_extent()
397 async_extent->ram_size = ram_size; in add_async_extent()
398 async_extent->compressed_size = compressed_size; in add_async_extent()
399 async_extent->pages = pages; in add_async_extent()
400 async_extent->nr_pages = nr_pages; in add_async_extent()
401 async_extent->compress_type = compress_type; in add_async_extent()
402 list_add_tail(&async_extent->list, &cow->extents); in add_async_extent()
411 if (inode->flags & BTRFS_INODE_NODATACOW || in inode_can_compress()
412 inode->flags & BTRFS_INODE_NODATASUM) in inode_can_compress()
424 struct btrfs_fs_info *fs_info = inode->root->fs_info; in inode_need_compress()
436 if (inode->defrag_compress) in inode_need_compress()
439 if (inode->flags & BTRFS_INODE_NOCOMPRESS) in inode_need_compress()
442 inode->flags & BTRFS_INODE_COMPRESS || in inode_need_compress()
443 inode->prop_compress) in inode_need_compress()
444 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); in inode_need_compress()
453 (start > 0 || end + 1 < inode->disk_i_size)) in inode_should_defrag()
462 * This is done inside an ordered work queue, and the compression
464 * two, and the ordered work queue takes care of making sure that
465 * happens in the same order things were put onto the queue by
469 * entry onto the work queue to write the uncompressed bytes. This
476 struct inode *inode = async_chunk->inode; in compress_file_range()
477 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in compress_file_range()
478 u64 blocksize = fs_info->sectorsize; in compress_file_range()
479 u64 start = async_chunk->start; in compress_file_range()
480 u64 end = async_chunk->end; in compress_file_range()
490 int compress_type = fs_info->compress_type; in compress_file_range()
494 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, in compress_file_range()
512 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; in compress_file_range()
530 total_compressed = actual_end - start; in compress_file_range()
537 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) in compress_file_range()
546 * we do compression for mount -o compress and when the in compress_file_range()
559 if (BTRFS_I(inode)->defrag_compress) in compress_file_range()
560 compress_type = BTRFS_I(inode)->defrag_compress; in compress_file_range()
561 else if (BTRFS_I(inode)->prop_compress) in compress_file_range()
562 compress_type = BTRFS_I(inode)->prop_compress; in compress_file_range()
583 compress_type | (fs_info->compress_level << 4), in compress_file_range()
584 inode->i_mapping, start, in compress_file_range()
592 struct page *page = pages[nr_pages - 1]; in compress_file_range()
601 PAGE_SIZE - offset); in compress_file_range()
657 WARN_ON(pages[i]->mapping); in compress_file_range()
707 WARN_ON(pages[i]->mapping); in compress_file_range()
717 !(BTRFS_I(inode)->prop_compress)) { in compress_file_range()
718 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; in compress_file_range()
725 * to our extent and set things up for the async work queue to run in compress_file_range()
728 if (async_chunk->locked_page && in compress_file_range()
729 (page_offset(async_chunk->locked_page) >= start && in compress_file_range()
730 page_offset(async_chunk->locked_page)) <= end) { in compress_file_range()
731 __set_page_dirty_nobuffers(async_chunk->locked_page); in compress_file_range()
737 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, in compress_file_range()
748 if (!async_extent->pages) in free_async_extent_pages()
751 for (i = 0; i < async_extent->nr_pages; i++) { in free_async_extent_pages()
752 WARN_ON(async_extent->pages[i]->mapping); in free_async_extent_pages()
753 put_page(async_extent->pages[i]); in free_async_extent_pages()
755 kfree(async_extent->pages); in free_async_extent_pages()
756 async_extent->nr_pages = 0; in free_async_extent_pages()
757 async_extent->pages = NULL; in free_async_extent_pages()
768 struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); in submit_compressed_extents()
769 struct btrfs_fs_info *fs_info = inode->root->fs_info; in submit_compressed_extents()
774 struct btrfs_root *root = inode->root; in submit_compressed_extents()
775 struct extent_io_tree *io_tree = &inode->io_tree; in submit_compressed_extents()
779 while (!list_empty(&async_chunk->extents)) { in submit_compressed_extents()
780 async_extent = list_entry(async_chunk->extents.next, in submit_compressed_extents()
782 list_del(&async_extent->list); in submit_compressed_extents()
785 lock_extent(io_tree, async_extent->start, in submit_compressed_extents()
786 async_extent->start + async_extent->ram_size - 1); in submit_compressed_extents()
788 if (!async_extent->pages) { in submit_compressed_extents()
793 ret = cow_file_range(inode, async_chunk->locked_page, in submit_compressed_extents()
794 async_extent->start, in submit_compressed_extents()
795 async_extent->start + in submit_compressed_extents()
796 async_extent->ram_size - 1, in submit_compressed_extents()
808 extent_write_locked_range(&inode->vfs_inode, in submit_compressed_extents()
809 async_extent->start, in submit_compressed_extents()
810 async_extent->start + in submit_compressed_extents()
811 async_extent->ram_size - 1, in submit_compressed_extents()
813 else if (ret && async_chunk->locked_page) in submit_compressed_extents()
814 unlock_page(async_chunk->locked_page); in submit_compressed_extents()
820 ret = btrfs_reserve_extent(root, async_extent->ram_size, in submit_compressed_extents()
821 async_extent->compressed_size, in submit_compressed_extents()
822 async_extent->compressed_size, in submit_compressed_extents()
827 if (ret == -ENOSPC) { in submit_compressed_extents()
828 unlock_extent(io_tree, async_extent->start, in submit_compressed_extents()
829 async_extent->start + in submit_compressed_extents()
830 async_extent->ram_size - 1); in submit_compressed_extents()
838 extent_range_redirty_for_io(&inode->vfs_inode, in submit_compressed_extents()
839 async_extent->start, in submit_compressed_extents()
840 async_extent->start + in submit_compressed_extents()
841 async_extent->ram_size - 1); in submit_compressed_extents()
851 em = create_io_em(inode, async_extent->start, in submit_compressed_extents()
852 async_extent->ram_size, /* len */ in submit_compressed_extents()
853 async_extent->start, /* orig_start */ in submit_compressed_extents()
857 async_extent->ram_size, /* ram_bytes */ in submit_compressed_extents()
858 async_extent->compress_type, in submit_compressed_extents()
866 async_extent->start, in submit_compressed_extents()
868 async_extent->ram_size, in submit_compressed_extents()
871 async_extent->compress_type); in submit_compressed_extents()
873 btrfs_drop_extent_cache(inode, async_extent->start, in submit_compressed_extents()
874 async_extent->start + in submit_compressed_extents()
875 async_extent->ram_size - 1, 0); in submit_compressed_extents()
883 extent_clear_unlock_delalloc(inode, async_extent->start, in submit_compressed_extents()
884 async_extent->start + in submit_compressed_extents()
885 async_extent->ram_size - 1, in submit_compressed_extents()
889 if (btrfs_submit_compressed_write(inode, async_extent->start, in submit_compressed_extents()
890 async_extent->ram_size, in submit_compressed_extents()
892 ins.offset, async_extent->pages, in submit_compressed_extents()
893 async_extent->nr_pages, in submit_compressed_extents()
894 async_chunk->write_flags, in submit_compressed_extents()
895 async_chunk->blkcg_css)) { in submit_compressed_extents()
896 struct page *p = async_extent->pages[0]; in submit_compressed_extents()
897 const u64 start = async_extent->start; in submit_compressed_extents()
898 const u64 end = start + async_extent->ram_size - 1; in submit_compressed_extents()
900 p->mapping = inode->vfs_inode.i_mapping; in submit_compressed_extents()
903 p->mapping = NULL; in submit_compressed_extents()
918 extent_clear_unlock_delalloc(inode, async_extent->start, in submit_compressed_extents()
919 async_extent->start + in submit_compressed_extents()
920 async_extent->ram_size - 1, in submit_compressed_extents()
935 struct extent_map_tree *em_tree = &inode->extent_tree; in get_extent_allocation_hint()
939 read_lock(&em_tree->lock); in get_extent_allocation_hint()
947 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { in get_extent_allocation_hint()
950 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) in get_extent_allocation_hint()
951 alloc_hint = em->block_start; in get_extent_allocation_hint()
955 alloc_hint = em->block_start; in get_extent_allocation_hint()
959 read_unlock(&em_tree->lock); in get_extent_allocation_hint()
982 struct btrfs_root *root = inode->root; in cow_file_range()
983 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range()
989 u64 blocksize = fs_info->sectorsize; in cow_file_range()
999 ret = -EINVAL; in cow_file_range()
1003 num_bytes = ALIGN(end - start + 1, blocksize); in cow_file_range()
1005 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); in cow_file_range()
1027 (end - start + PAGE_SIZE) / PAGE_SIZE; in cow_file_range()
1036 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); in cow_file_range()
1049 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in cow_file_range()
1052 min_alloc_size = fs_info->sectorsize; in cow_file_range()
1084 if (root->root_key.objectid == in cow_file_range()
1101 start + ram_size - 1, 0); in cow_file_range()
1116 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, in cow_file_range()
1123 num_bytes -= cur_alloc_size; in cow_file_range()
1140 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); in cow_file_range()
1161 start + cur_alloc_size - 1, in cow_file_range()
1176 * work queue call back to started compression on a file and pages
1187 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_start()
1188 async_chunk->inode = NULL; in async_cow_start()
1193 * work queue call back to submit previously compressed pages
1202 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> in async_cow_submit()
1206 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < in async_cow_submit()
1208 cond_wake_up_nomb(&fs_info->async_submit_wait); in async_cow_submit()
1211 * ->inode could be NULL if async_chunk_start has failed to compress, in async_cow_submit()
1213 * always adjust ->async_delalloc_pages as its paired with the init in async_cow_submit()
1216 if (async_chunk->inode) in async_cow_submit()
1225 if (async_chunk->inode) in async_cow_free()
1226 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_free()
1227 if (async_chunk->blkcg_css) in async_cow_free()
1228 css_put(async_chunk->blkcg_css); in async_cow_free()
1233 if (atomic_dec_and_test(async_chunk->pending)) in async_cow_free()
1234 kvfree(async_chunk->pending); in async_cow_free()
1243 struct btrfs_fs_info *fs_info = inode->root->fs_info; in cow_file_range_async()
1249 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); in cow_file_range_async()
1255 unlock_extent(&inode->io_tree, start, end); in cow_file_range_async()
1257 if (inode->flags & BTRFS_INODE_NOCOMPRESS && in cow_file_range_async()
1279 return -ENOMEM; in cow_file_range_async()
1282 async_chunk = ctx->chunks; in cow_file_range_async()
1283 atomic_set(&ctx->num_chunks, num_chunks); in cow_file_range_async()
1287 cur_end = min(end, start + SZ_512K - 1); in cow_file_range_async()
1295 ihold(&inode->vfs_inode); in cow_file_range_async()
1296 async_chunk[i].pending = &ctx->num_chunks; in cow_file_range_async()
1297 async_chunk[i].inode = &inode->vfs_inode; in cow_file_range_async()
1323 cur_end - start); in cow_file_range_async()
1340 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); in cow_file_range_async()
1341 atomic_add(nr_pages, &fs_info->async_delalloc_pages); in cow_file_range_async()
1343 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); in cow_file_range_async()
1359 ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr, in csum_exist_in_range()
1360 bytenr + num_bytes - 1, &list, 0); in csum_exist_in_range()
1366 list_del(&sums->list); in csum_exist_in_range()
1379 const bool is_reloc_ino = (inode->root->root_key.objectid == in fallback_to_cow()
1381 const u64 range_bytes = end + 1 - start; in fallback_to_cow()
1382 struct extent_io_tree *io_tree = &inode->io_tree; in fallback_to_cow()
1422 struct btrfs_fs_info *fs_info = inode->root->fs_info; in fallback_to_cow()
1423 struct btrfs_space_info *sinfo = fs_info->data_sinfo; in fallback_to_cow()
1428 spin_lock(&sinfo->lock); in fallback_to_cow()
1430 spin_unlock(&sinfo->lock); in fallback_to_cow()
1454 struct btrfs_fs_info *fs_info = inode->root->fs_info; in run_delalloc_nocow()
1455 struct btrfs_root *root = inode->root; in run_delalloc_nocow()
1457 u64 cow_start = (u64)-1; in run_delalloc_nocow()
1475 return -ENOMEM; in run_delalloc_nocow()
1501 if (ret > 0 && path->slots[0] > 0 && check_prev) { in run_delalloc_nocow()
1502 leaf = path->nodes[0]; in run_delalloc_nocow()
1504 path->slots[0] - 1); in run_delalloc_nocow()
1507 path->slots[0]--; in run_delalloc_nocow()
1512 leaf = path->nodes[0]; in run_delalloc_nocow()
1513 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in run_delalloc_nocow()
1516 if (cow_start != (u64)-1) in run_delalloc_nocow()
1522 leaf = path->nodes[0]; in run_delalloc_nocow()
1525 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in run_delalloc_nocow()
1536 path->slots[0]++; in run_delalloc_nocow()
1559 fi = btrfs_item_ptr(leaf, path->slots[0], in run_delalloc_nocow()
1577 path->slots[0]++; in run_delalloc_nocow()
1597 btrfs_root_last_snapshot(&root->root_item)) in run_delalloc_nocow()
1605 found_key.offset - in run_delalloc_nocow()
1609 * ret could be -EIO if the above fails to read in run_delalloc_nocow()
1613 if (cow_start != (u64)-1) in run_delalloc_nocow()
1622 disk_bytenr += cur_offset - found_key.offset; in run_delalloc_nocow()
1623 num_bytes = min(end + 1, extent_end) - cur_offset; in run_delalloc_nocow()
1628 if (!freespace_inode && atomic_read(&root->snapshot_force_cow)) in run_delalloc_nocow()
1639 * ret could be -EIO if the above fails to read in run_delalloc_nocow()
1643 if (cow_start != (u64)-1) in run_delalloc_nocow()
1655 extent_end = ALIGN(extent_end, fs_info->sectorsize); in run_delalloc_nocow()
1658 path->slots[0]++; in run_delalloc_nocow()
1671 if (cow_start == (u64)-1) in run_delalloc_nocow()
1676 path->slots[0]++; in run_delalloc_nocow()
1683 * COW range from cow_start to found_key.offset - 1. As the key in run_delalloc_nocow()
1687 if (cow_start != (u64)-1) { in run_delalloc_nocow()
1689 cow_start, found_key.offset - 1, in run_delalloc_nocow()
1693 cow_start = (u64)-1; in run_delalloc_nocow()
1697 u64 orig_start = found_key.offset - extent_offset; in run_delalloc_nocow()
1718 cur_offset + num_bytes - 1, in run_delalloc_nocow()
1735 if (root->root_key.objectid == in run_delalloc_nocow()
1746 cur_offset + num_bytes - 1, in run_delalloc_nocow()
1766 if (cur_offset <= end && cow_start == (u64)-1) in run_delalloc_nocow()
1769 if (cow_start != (u64)-1) { in run_delalloc_nocow()
1796 if (!(inode->flags & BTRFS_INODE_NODATACOW) && in need_force_cow()
1797 !(inode->flags & BTRFS_INODE_PREALLOC)) in need_force_cow()
1805 if (inode->defrag_bytes && in need_force_cow()
1806 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 0, NULL)) in need_force_cow()
1823 if (inode->flags & BTRFS_INODE_NODATACOW && !force_cow) { in btrfs_run_delalloc_range()
1826 } else if (inode->flags & BTRFS_INODE_PREALLOC && !force_cow) { in btrfs_run_delalloc_range()
1834 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); in btrfs_run_delalloc_range()
1840 end - start + 1); in btrfs_run_delalloc_range()
1850 if (!(orig->state & EXTENT_DELALLOC)) in btrfs_split_delalloc_extent()
1853 size = orig->end - orig->start + 1; in btrfs_split_delalloc_extent()
1862 new_size = orig->end - split + 1; in btrfs_split_delalloc_extent()
1864 new_size = split - orig->start; in btrfs_split_delalloc_extent()
1870 spin_lock(&BTRFS_I(inode)->lock); in btrfs_split_delalloc_extent()
1872 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_split_delalloc_extent()
1887 if (!(other->state & EXTENT_DELALLOC)) in btrfs_merge_delalloc_extent()
1890 if (new->start > other->start) in btrfs_merge_delalloc_extent()
1891 new_size = new->end - other->start + 1; in btrfs_merge_delalloc_extent()
1893 new_size = other->end - new->start + 1; in btrfs_merge_delalloc_extent()
1897 spin_lock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1898 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); in btrfs_merge_delalloc_extent()
1899 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1921 old_size = other->end - other->start + 1; in btrfs_merge_delalloc_extent()
1923 old_size = new->end - new->start + 1; in btrfs_merge_delalloc_extent()
1928 spin_lock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1929 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); in btrfs_merge_delalloc_extent()
1930 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1936 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_add_delalloc_inodes()
1938 spin_lock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
1939 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { in btrfs_add_delalloc_inodes()
1940 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, in btrfs_add_delalloc_inodes()
1941 &root->delalloc_inodes); in btrfs_add_delalloc_inodes()
1943 &BTRFS_I(inode)->runtime_flags); in btrfs_add_delalloc_inodes()
1944 root->nr_delalloc_inodes++; in btrfs_add_delalloc_inodes()
1945 if (root->nr_delalloc_inodes == 1) { in btrfs_add_delalloc_inodes()
1946 spin_lock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
1947 BUG_ON(!list_empty(&root->delalloc_root)); in btrfs_add_delalloc_inodes()
1948 list_add_tail(&root->delalloc_root, in btrfs_add_delalloc_inodes()
1949 &fs_info->delalloc_roots); in btrfs_add_delalloc_inodes()
1950 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
1953 spin_unlock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
1960 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_del_delalloc_inode()
1962 if (!list_empty(&inode->delalloc_inodes)) { in __btrfs_del_delalloc_inode()
1963 list_del_init(&inode->delalloc_inodes); in __btrfs_del_delalloc_inode()
1965 &inode->runtime_flags); in __btrfs_del_delalloc_inode()
1966 root->nr_delalloc_inodes--; in __btrfs_del_delalloc_inode()
1967 if (!root->nr_delalloc_inodes) { in __btrfs_del_delalloc_inode()
1968 ASSERT(list_empty(&root->delalloc_inodes)); in __btrfs_del_delalloc_inode()
1969 spin_lock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
1970 BUG_ON(list_empty(&root->delalloc_root)); in __btrfs_del_delalloc_inode()
1971 list_del_init(&root->delalloc_root); in __btrfs_del_delalloc_inode()
1972 spin_unlock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
1980 spin_lock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
1982 spin_unlock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
1992 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_set_delalloc_extent()
2001 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { in btrfs_set_delalloc_extent()
2002 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_set_delalloc_extent()
2003 u64 len = state->end + 1 - state->start; in btrfs_set_delalloc_extent()
2007 spin_lock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2009 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2015 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, in btrfs_set_delalloc_extent()
2016 fs_info->delalloc_batch); in btrfs_set_delalloc_extent()
2017 spin_lock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2018 BTRFS_I(inode)->delalloc_bytes += len; in btrfs_set_delalloc_extent()
2020 BTRFS_I(inode)->defrag_bytes += len; in btrfs_set_delalloc_extent()
2022 &BTRFS_I(inode)->runtime_flags)) in btrfs_set_delalloc_extent()
2024 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2027 if (!(state->state & EXTENT_DELALLOC_NEW) && in btrfs_set_delalloc_extent()
2029 spin_lock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2030 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 - in btrfs_set_delalloc_extent()
2031 state->start; in btrfs_set_delalloc_extent()
2032 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2044 struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb); in btrfs_clear_delalloc_extent()
2045 u64 len = state->end + 1 - state->start; in btrfs_clear_delalloc_extent()
2048 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) { in btrfs_clear_delalloc_extent()
2049 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2050 inode->defrag_bytes -= len; in btrfs_clear_delalloc_extent()
2051 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2059 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { in btrfs_clear_delalloc_extent()
2060 struct btrfs_root *root = inode->root; in btrfs_clear_delalloc_extent()
2063 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2064 btrfs_mod_outstanding_extents(inode, -num_extents); in btrfs_clear_delalloc_extent()
2065 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2073 root != fs_info->tree_root) in btrfs_clear_delalloc_extent()
2080 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && in btrfs_clear_delalloc_extent()
2081 do_list && !(state->state & EXTENT_NORESERVE) && in btrfs_clear_delalloc_extent()
2085 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, in btrfs_clear_delalloc_extent()
2086 fs_info->delalloc_batch); in btrfs_clear_delalloc_extent()
2087 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2088 inode->delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2089 if (do_list && inode->delalloc_bytes == 0 && in btrfs_clear_delalloc_extent()
2091 &inode->runtime_flags)) in btrfs_clear_delalloc_extent()
2093 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2096 if ((state->state & EXTENT_DELALLOC_NEW) && in btrfs_clear_delalloc_extent()
2098 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2099 ASSERT(inode->new_delalloc_bytes >= len); in btrfs_clear_delalloc_extent()
2100 inode->new_delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2101 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2106 * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
2110 * @page - The page we are about to add to the bio
2111 * @size - size we want to add to the bio
2112 * @bio - bio we want to ensure is smaller than a stripe
2113 * @bio_flags - flags of the bio
2122 struct inode *inode = page->mapping->host; in btrfs_bio_fits_in_stripe()
2123 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_bio_fits_in_stripe()
2124 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_bio_fits_in_stripe()
2133 length = bio->bi_iter.bi_size; in btrfs_bio_fits_in_stripe()
2171 * c-1) if bio is issued by fsync: sync submit
2174 * c-2) if root is reloc root: sync submit
2177 * c-3) otherwise: async submit
2183 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_data_bio()
2184 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_submit_data_bio()
2188 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); in btrfs_submit_data_bio()
2190 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; in btrfs_submit_data_bio()
2206 ret = btrfs_lookup_bio_sums(inode, bio, (u64)-1, NULL); in btrfs_submit_data_bio()
2213 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in btrfs_submit_data_bio()
2230 bio->bi_status = ret; in btrfs_submit_data_bio()
2247 trans->adding_csums = true; in add_pending_csums()
2248 ret = btrfs_csum_file_blocks(trans, trans->fs_info->csum_root, sum); in add_pending_csums()
2249 trans->adding_csums = false; in add_pending_csums()
2262 const u64 end = start + len - 1; in btrfs_find_new_delalloc_bytes()
2265 const u64 search_len = end - search_start + 1; in btrfs_find_new_delalloc_bytes()
2274 if (em->block_start != EXTENT_MAP_HOLE) in btrfs_find_new_delalloc_bytes()
2277 em_len = em->len; in btrfs_find_new_delalloc_bytes()
2278 if (em->start < search_start) in btrfs_find_new_delalloc_bytes()
2279 em_len -= search_start - em->start; in btrfs_find_new_delalloc_bytes()
2283 ret = set_extent_bit(&inode->io_tree, search_start, in btrfs_find_new_delalloc_bytes()
2284 search_start + em_len - 1, in btrfs_find_new_delalloc_bytes()
2302 if (start >= i_size_read(&inode->vfs_inode) && in btrfs_set_extent_delalloc()
2303 !(inode->flags & BTRFS_INODE_PREALLOC)) { in btrfs_set_extent_delalloc()
2313 end + 1 - start, in btrfs_set_extent_delalloc()
2319 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, in btrfs_set_extent_delalloc()
2344 page = fixup->page; in btrfs_writepage_fixup_worker()
2345 inode = BTRFS_I(fixup->inode); in btrfs_writepage_fixup_worker()
2347 page_end = page_offset(page) + PAGE_SIZE - 1; in btrfs_writepage_fixup_worker()
2360 * page->mapping may go NULL, but it shouldn't be moved to a different in btrfs_writepage_fixup_worker()
2363 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { in btrfs_writepage_fixup_worker()
2398 lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker()
2406 unlock_extent_cached(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker()
2433 unlock_extent_cached(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker()
2441 mapping_set_error(page->mapping, ret); in btrfs_writepage_fixup_worker()
2456 btrfs_add_delayed_iput(&inode->vfs_inode); in btrfs_writepage_fixup_worker()
2472 struct inode *inode = page->mapping->host; in btrfs_writepage_cow_fixup()
2473 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_writepage_cow_fixup()
2488 return -EAGAIN; in btrfs_writepage_cow_fixup()
2492 return -EAGAIN; in btrfs_writepage_cow_fixup()
2498 * page->mapping outside of the page lock. in btrfs_writepage_cow_fixup()
2503 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); in btrfs_writepage_cow_fixup()
2504 fixup->page = page; in btrfs_writepage_cow_fixup()
2505 fixup->inode = inode; in btrfs_writepage_cow_fixup()
2506 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); in btrfs_writepage_cow_fixup()
2508 return -EAGAIN; in btrfs_writepage_cow_fixup()
2516 struct btrfs_root *root = inode->root; in insert_reserved_file_extent()
2529 return -ENOMEM; in insert_reserved_file_extent()
2551 path->leave_spinning = 1; in insert_reserved_file_extent()
2557 leaf = path->nodes[0]; in insert_reserved_file_extent()
2558 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); in insert_reserved_file_extent()
2560 btrfs_item_ptr_offset(leaf, path->slots[0]), in insert_reserved_file_extent()
2566 inode_add_bytes(&inode->vfs_inode, num_bytes); in insert_reserved_file_extent()
2592 spin_lock(&cache->lock); in btrfs_release_delalloc_bytes()
2593 cache->delalloc_bytes -= len; in btrfs_release_delalloc_bytes()
2594 spin_unlock(&cache->lock); in btrfs_release_delalloc_bytes()
2607 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); in insert_ordered_extent_file_extent()
2609 oe->disk_num_bytes); in insert_ordered_extent_file_extent()
2610 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) in insert_ordered_extent_file_extent()
2611 logical_len = oe->truncated_len; in insert_ordered_extent_file_extent()
2613 logical_len = oe->num_bytes; in insert_ordered_extent_file_extent()
2616 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); in insert_ordered_extent_file_extent()
2619 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), in insert_ordered_extent_file_extent()
2620 oe->file_offset, &stack_fi, in insert_ordered_extent_file_extent()
2621 oe->qgroup_rsv); in insert_ordered_extent_file_extent()
2631 struct inode *inode = ordered_extent->inode; in btrfs_finish_ordered_io()
2632 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_finish_ordered_io()
2633 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_finish_ordered_io()
2635 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_finish_ordered_io()
2640 u64 logical_len = ordered_extent->num_bytes; in btrfs_finish_ordered_io()
2648 start = ordered_extent->file_offset; in btrfs_finish_ordered_io()
2649 end = start + ordered_extent->num_bytes - 1; in btrfs_finish_ordered_io()
2651 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_ordered_io()
2652 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && in btrfs_finish_ordered_io()
2653 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) in btrfs_finish_ordered_io()
2658 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2659 ret = -EIO; in btrfs_finish_ordered_io()
2665 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2667 logical_len = ordered_extent->truncated_len; in btrfs_finish_ordered_io()
2673 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2674 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ in btrfs_finish_ordered_io()
2686 trans->block_rsv = &BTRFS_I(inode)->block_rsv; in btrfs_finish_ordered_io()
2688 if (ret) /* -ENOMEM or corruption */ in btrfs_finish_ordered_io()
2706 trans->block_rsv = &BTRFS_I(inode)->block_rsv; in btrfs_finish_ordered_io()
2708 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) in btrfs_finish_ordered_io()
2709 compress_type = ordered_extent->compress_type; in btrfs_finish_ordered_io()
2710 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2713 ordered_extent->file_offset, in btrfs_finish_ordered_io()
2714 ordered_extent->file_offset + in btrfs_finish_ordered_io()
2717 BUG_ON(root == fs_info->tree_root); in btrfs_finish_ordered_io()
2722 ordered_extent->disk_bytenr, in btrfs_finish_ordered_io()
2723 ordered_extent->disk_num_bytes); in btrfs_finish_ordered_io()
2726 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, in btrfs_finish_ordered_io()
2727 ordered_extent->file_offset, in btrfs_finish_ordered_io()
2728 ordered_extent->num_bytes, trans->transid); in btrfs_finish_ordered_io()
2734 ret = add_pending_csums(trans, &ordered_extent->list); in btrfs_finish_ordered_io()
2742 if (ret) { /* -ENOMEM or corruption */ in btrfs_finish_ordered_io()
2753 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, in btrfs_finish_ordered_io()
2782 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_ordered_io()
2783 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2790 ordered_extent->disk_bytenr, in btrfs_finish_ordered_io()
2791 ordered_extent->disk_num_bytes, in btrfs_finish_ordered_io()
2794 ordered_extent->disk_bytenr, in btrfs_finish_ordered_io()
2795 ordered_extent->disk_num_bytes, 1); in btrfs_finish_ordered_io()
2823 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_writepage_endio_finish_ordered()
2824 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_writepage_endio_finish_ordered()
2832 end - start + 1, uptodate)) in btrfs_writepage_endio_finish_ordered()
2836 wq = fs_info->endio_freespace_worker; in btrfs_writepage_endio_finish_ordered()
2838 wq = fs_info->endio_write_workers; in btrfs_writepage_endio_finish_ordered()
2840 btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); in btrfs_writepage_endio_finish_ordered()
2841 btrfs_queue_work(wq, &ordered_extent->work); in btrfs_writepage_endio_finish_ordered()
2848 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in check_data_csum()
2849 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in check_data_csum()
2851 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); in check_data_csum()
2855 csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size; in check_data_csum()
2858 shash->tfm = fs_info->csum_shash; in check_data_csum()
2869 io_bio->mirror_num); in check_data_csum()
2870 if (io_bio->device) in check_data_csum()
2871 btrfs_dev_stat_inc_and_print(io_bio->device, in check_data_csum()
2876 return -EIO; in check_data_csum()
2887 size_t offset = start - page_offset(page); in btrfs_verify_data_csum()
2888 struct inode *inode = page->mapping->host; in btrfs_verify_data_csum()
2889 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_verify_data_csum()
2890 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_verify_data_csum()
2897 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) in btrfs_verify_data_csum()
2900 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && in btrfs_verify_data_csum()
2906 phy_offset >>= inode->i_sb->s_blocksize_bits; in btrfs_verify_data_csum()
2908 (size_t)(end - start + 1)); in btrfs_verify_data_csum()
2912 * btrfs_add_delayed_iput - perform a delayed iput on @inode
2923 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_add_delayed_iput()
2926 if (atomic_add_unless(&inode->i_count, -1, 1)) in btrfs_add_delayed_iput()
2929 atomic_inc(&fs_info->nr_delayed_iputs); in btrfs_add_delayed_iput()
2930 spin_lock(&fs_info->delayed_iput_lock); in btrfs_add_delayed_iput()
2931 ASSERT(list_empty(&binode->delayed_iput)); in btrfs_add_delayed_iput()
2932 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); in btrfs_add_delayed_iput()
2933 spin_unlock(&fs_info->delayed_iput_lock); in btrfs_add_delayed_iput()
2934 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) in btrfs_add_delayed_iput()
2935 wake_up_process(fs_info->cleaner_kthread); in btrfs_add_delayed_iput()
2941 list_del_init(&inode->delayed_iput); in run_delayed_iput_locked()
2942 spin_unlock(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
2943 iput(&inode->vfs_inode); in run_delayed_iput_locked()
2944 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) in run_delayed_iput_locked()
2945 wake_up(&fs_info->delayed_iputs_wait); in run_delayed_iput_locked()
2946 spin_lock(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
2952 if (!list_empty(&inode->delayed_iput)) { in btrfs_run_delayed_iput()
2953 spin_lock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
2954 if (!list_empty(&inode->delayed_iput)) in btrfs_run_delayed_iput()
2956 spin_unlock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
2963 spin_lock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
2964 while (!list_empty(&fs_info->delayed_iputs)) { in btrfs_run_delayed_iputs()
2967 inode = list_first_entry(&fs_info->delayed_iputs, in btrfs_run_delayed_iputs()
2971 spin_unlock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
2975 * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running
2976 * @fs_info - the fs_info for this fs
2977 * @return - EINTR if we were killed, 0 if nothing's pending
2986 int ret = wait_event_killable(fs_info->delayed_iputs_wait, in btrfs_wait_on_delayed_iputs()
2987 atomic_read(&fs_info->nr_delayed_iputs) == 0); in btrfs_wait_on_delayed_iputs()
2989 return -EINTR; in btrfs_wait_on_delayed_iputs()
3002 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_add()
3003 if (ret && ret != -EEXIST) { in btrfs_orphan_add()
3018 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_del()
3027 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_orphan_cleanup()
3036 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) in btrfs_orphan_cleanup()
3041 ret = -ENOMEM; in btrfs_orphan_cleanup()
3044 path->reada = READA_BACK; in btrfs_orphan_cleanup()
3048 key.offset = (u64)-1; in btrfs_orphan_cleanup()
3062 if (path->slots[0] == 0) in btrfs_orphan_cleanup()
3064 path->slots[0]--; in btrfs_orphan_cleanup()
3068 leaf = path->nodes[0]; in btrfs_orphan_cleanup()
3069 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_orphan_cleanup()
3089 ret = -EINVAL; in btrfs_orphan_cleanup()
3098 inode = btrfs_iget(fs_info->sb, last_objectid, root); in btrfs_orphan_cleanup()
3100 if (ret && ret != -ENOENT) in btrfs_orphan_cleanup()
3103 if (ret == -ENOENT && root == fs_info->tree_root) { in btrfs_orphan_cleanup()
3119 spin_lock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3120 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, in btrfs_orphan_cleanup()
3122 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) in btrfs_orphan_cleanup()
3124 spin_unlock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3128 key.offset = found_key.objectid - 1; in btrfs_orphan_cleanup()
3144 * only if this filesystem was last used on a pre-v3.12 kernel in btrfs_orphan_cleanup()
3153 if (ret == -ENOENT || inode->i_nlink) { in btrfs_orphan_cleanup()
3179 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; in btrfs_orphan_cleanup()
3181 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { in btrfs_orphan_cleanup()
3221 *first_xattr_slot = -1; in acls_after_inode_item()
3231 if (*first_xattr_slot == -1) in acls_after_inode_item()
3261 if (*first_xattr_slot == -1) in acls_after_inode_item()
3267 * read an inode from the btree into the in-memory inode
3272 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_read_locked_inode()
3276 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_read_locked_inode()
3292 return -ENOMEM; in btrfs_read_locked_inode()
3295 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); in btrfs_read_locked_inode()
3304 leaf = path->nodes[0]; in btrfs_read_locked_inode()
3309 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_read_locked_inode()
3311 inode->i_mode = btrfs_inode_mode(leaf, inode_item); in btrfs_read_locked_inode()
3317 round_up(i_size_read(inode), fs_info->sectorsize)); in btrfs_read_locked_inode()
3319 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3320 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3322 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3323 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3325 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); in btrfs_read_locked_inode()
3326 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); in btrfs_read_locked_inode()
3328 BTRFS_I(inode)->i_otime.tv_sec = in btrfs_read_locked_inode()
3329 btrfs_timespec_sec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3330 BTRFS_I(inode)->i_otime.tv_nsec = in btrfs_read_locked_inode()
3331 btrfs_timespec_nsec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3334 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); in btrfs_read_locked_inode()
3335 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); in btrfs_read_locked_inode()
3339 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_read_locked_inode()
3340 inode->i_rdev = 0; in btrfs_read_locked_inode()
3343 BTRFS_I(inode)->index_cnt = (u64)-1; in btrfs_read_locked_inode()
3344 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); in btrfs_read_locked_inode()
3349 * and then re-read we need to do a full sync since we don't have any in btrfs_read_locked_inode()
3353 * This is required for both inode re-read from disk and delayed inode in btrfs_read_locked_inode()
3356 if (BTRFS_I(inode)->last_trans == fs_info->generation) in btrfs_read_locked_inode()
3358 &BTRFS_I(inode)->runtime_flags); in btrfs_read_locked_inode()
3368 * touch mydir/foo in btrfs_read_locked_inode()
3369 * ln mydir/foo mydir/bar in btrfs_read_locked_inode()
3373 * xfs_io -c fsync mydir/foo in btrfs_read_locked_inode()
3377 * We must make sure that when we fsync our inode foo we also log its in btrfs_read_locked_inode()
3379 * dentry with the "bar" name but our inode foo has a link count of 1 in btrfs_read_locked_inode()
3387 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3395 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3397 path->slots[0]++; in btrfs_read_locked_inode()
3398 if (inode->i_nlink != 1 || in btrfs_read_locked_inode()
3399 path->slots[0] >= btrfs_header_nritems(leaf)) in btrfs_read_locked_inode()
3402 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); in btrfs_read_locked_inode()
3406 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_read_locked_inode()
3411 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); in btrfs_read_locked_inode()
3416 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, in btrfs_read_locked_inode()
3424 maybe_acls = acls_after_inode_item(leaf, path->slots[0], in btrfs_read_locked_inode()
3426 if (first_xattr_slot != -1) { in btrfs_read_locked_inode()
3427 path->slots[0] = first_xattr_slot; in btrfs_read_locked_inode()
3433 root->root_key.objectid, ret); in btrfs_read_locked_inode()
3441 switch (inode->i_mode & S_IFMT) { in btrfs_read_locked_inode()
3443 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3444 inode->i_fop = &btrfs_file_operations; in btrfs_read_locked_inode()
3445 inode->i_op = &btrfs_file_inode_operations; in btrfs_read_locked_inode()
3448 inode->i_fop = &btrfs_dir_file_operations; in btrfs_read_locked_inode()
3449 inode->i_op = &btrfs_dir_inode_operations; in btrfs_read_locked_inode()
3452 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_read_locked_inode()
3454 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3457 inode->i_op = &btrfs_special_inode_operations; in btrfs_read_locked_inode()
3458 init_special_inode(inode, inode->i_mode, rdev); in btrfs_read_locked_inode()
3480 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); in fill_inode_item()
3481 btrfs_set_token_inode_mode(&token, item, inode->i_mode); in fill_inode_item()
3482 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); in fill_inode_item()
3484 btrfs_set_token_timespec_sec(&token, &item->atime, in fill_inode_item()
3485 inode->i_atime.tv_sec); in fill_inode_item()
3486 btrfs_set_token_timespec_nsec(&token, &item->atime, in fill_inode_item()
3487 inode->i_atime.tv_nsec); in fill_inode_item()
3489 btrfs_set_token_timespec_sec(&token, &item->mtime, in fill_inode_item()
3490 inode->i_mtime.tv_sec); in fill_inode_item()
3491 btrfs_set_token_timespec_nsec(&token, &item->mtime, in fill_inode_item()
3492 inode->i_mtime.tv_nsec); in fill_inode_item()
3494 btrfs_set_token_timespec_sec(&token, &item->ctime, in fill_inode_item()
3495 inode->i_ctime.tv_sec); in fill_inode_item()
3496 btrfs_set_token_timespec_nsec(&token, &item->ctime, in fill_inode_item()
3497 inode->i_ctime.tv_nsec); in fill_inode_item()
3499 btrfs_set_token_timespec_sec(&token, &item->otime, in fill_inode_item()
3500 BTRFS_I(inode)->i_otime.tv_sec); in fill_inode_item()
3501 btrfs_set_token_timespec_nsec(&token, &item->otime, in fill_inode_item()
3502 BTRFS_I(inode)->i_otime.tv_nsec); in fill_inode_item()
3506 BTRFS_I(inode)->generation); in fill_inode_item()
3508 btrfs_set_token_inode_transid(&token, item, trans->transid); in fill_inode_item()
3509 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); in fill_inode_item()
3510 btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); in fill_inode_item()
3515 * copy everything in the in-memory inode into the btree.
3527 return -ENOMEM; in btrfs_update_inode_item()
3529 path->leave_spinning = 1; in btrfs_update_inode_item()
3530 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, in btrfs_update_inode_item()
3534 ret = -ENOENT; in btrfs_update_inode_item()
3538 leaf = path->nodes[0]; in btrfs_update_inode_item()
3539 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_update_inode_item()
3552 * copy everything in the in-memory inode into the btree.
3557 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_update_inode()
3568 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID in btrfs_update_inode()
3569 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { in btrfs_update_inode()
3588 if (ret == -ENOSPC) in btrfs_update_inode_fallback()
3604 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_unlink_inode()
3614 ret = -ENOMEM; in __btrfs_unlink_inode()
3618 path->leave_spinning = 1; in __btrfs_unlink_inode()
3620 name, name_len, -1); in __btrfs_unlink_inode()
3622 ret = di ? PTR_ERR(di) : -ENOENT; in __btrfs_unlink_inode()
3640 if (inode->dir_index) { in __btrfs_unlink_inode()
3643 index = inode->dir_index; in __btrfs_unlink_inode()
3666 if (ret != 0 && ret != -ENOENT) { in __btrfs_unlink_inode()
3673 if (ret == -ENOENT) in __btrfs_unlink_inode()
3680 * being run in btrfs-cleaner context. If we have enough of these built in __btrfs_unlink_inode()
3681 * up we can end up burning a lot of time in btrfs-cleaner without any in __btrfs_unlink_inode()
3693 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); in __btrfs_unlink_inode()
3694 inode_inc_iversion(&inode->vfs_inode); in __btrfs_unlink_inode()
3695 inode_inc_iversion(&dir->vfs_inode); in __btrfs_unlink_inode()
3696 inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = in __btrfs_unlink_inode()
3697 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); in __btrfs_unlink_inode()
3698 ret = btrfs_update_inode(trans, root, &dir->vfs_inode); in __btrfs_unlink_inode()
3711 drop_nlink(&inode->vfs_inode); in btrfs_unlink_inode()
3712 ret = btrfs_update_inode(trans, root, &inode->vfs_inode); in btrfs_unlink_inode()
3727 struct btrfs_root *root = BTRFS_I(dir)->root; in __unlink_start_trans()
3741 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_unlink()
3754 BTRFS_I(d_inode(dentry)), dentry->d_name.name, in btrfs_unlink()
3755 dentry->d_name.len); in btrfs_unlink()
3759 if (inode->i_nlink == 0) { in btrfs_unlink()
3767 btrfs_btree_balance_dirty(root->fs_info); in btrfs_unlink()
3774 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_unlink_subvol()
3780 const char *name = dentry->d_name.name; in btrfs_unlink_subvol()
3781 int name_len = dentry->d_name.len; in btrfs_unlink_subvol()
3788 objectid = inode->root->root_key.objectid; in btrfs_unlink_subvol()
3790 objectid = inode->location.objectid; in btrfs_unlink_subvol()
3793 return -EINVAL; in btrfs_unlink_subvol()
3798 return -ENOMEM; in btrfs_unlink_subvol()
3801 name, name_len, -1); in btrfs_unlink_subvol()
3803 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_unlink_subvol()
3807 leaf = path->nodes[0]; in btrfs_unlink_subvol()
3821 * depending on btrfs_del_root_ref to return -ENOENT here is incorret. in btrfs_unlink_subvol()
3831 ret = -ENOENT; in btrfs_unlink_subvol()
3838 leaf = path->nodes[0]; in btrfs_unlink_subvol()
3839 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_unlink_subvol()
3844 root->root_key.objectid, dir_ino, in btrfs_unlink_subvol()
3858 btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); in btrfs_unlink_subvol()
3860 dir->i_mtime = dir->i_ctime = current_time(dir); in btrfs_unlink_subvol()
3875 struct btrfs_fs_info *fs_info = root->fs_info; in may_destroy_subvol()
3884 return -ENOMEM; in may_destroy_subvol()
3887 dir_id = btrfs_super_root_dir(fs_info->super_copy); in may_destroy_subvol()
3888 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, in may_destroy_subvol()
3891 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); in may_destroy_subvol()
3892 if (key.objectid == root->root_key.objectid) { in may_destroy_subvol()
3893 ret = -EPERM; in may_destroy_subvol()
3902 key.objectid = root->root_key.objectid; in may_destroy_subvol()
3904 key.offset = (u64)-1; in may_destroy_subvol()
3906 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in may_destroy_subvol()
3912 if (path->slots[0] > 0) { in may_destroy_subvol()
3913 path->slots[0]--; in may_destroy_subvol()
3914 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in may_destroy_subvol()
3915 if (key.objectid == root->root_key.objectid && in may_destroy_subvol()
3917 ret = -ENOTEMPTY; in may_destroy_subvol()
3927 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_prune_dentries()
3934 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in btrfs_prune_dentries()
3935 WARN_ON(btrfs_root_refs(&root->root_item) != 0); in btrfs_prune_dentries()
3937 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
3939 node = root->inode_tree.rb_node; in btrfs_prune_dentries()
3946 node = node->rb_left; in btrfs_prune_dentries()
3948 node = node->rb_right; in btrfs_prune_dentries()
3965 inode = igrab(&entry->vfs_inode); in btrfs_prune_dentries()
3967 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
3968 if (atomic_read(&inode->i_count) > 1) in btrfs_prune_dentries()
3976 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
3980 if (cond_resched_lock(&root->inode_lock)) in btrfs_prune_dentries()
3985 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
3990 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); in btrfs_delete_subvolume()
3991 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_delete_subvolume()
3993 struct btrfs_root *dest = BTRFS_I(inode)->root; in btrfs_delete_subvolume()
4005 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4006 if (dest->send_in_progress) { in btrfs_delete_subvolume()
4007 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4010 dest->root_key.objectid); in btrfs_delete_subvolume()
4011 return -EPERM; in btrfs_delete_subvolume()
4013 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4014 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4016 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4018 down_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4039 trans->block_rsv = &block_rsv; in btrfs_delete_subvolume()
4040 trans->bytes_reserved = block_rsv.size; in btrfs_delete_subvolume()
4053 memset(&dest->root_item.drop_progress, 0, in btrfs_delete_subvolume()
4054 sizeof(dest->root_item.drop_progress)); in btrfs_delete_subvolume()
4055 dest->root_item.drop_level = 0; in btrfs_delete_subvolume()
4056 btrfs_set_root_refs(&dest->root_item, 0); in btrfs_delete_subvolume()
4058 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { in btrfs_delete_subvolume()
4060 fs_info->tree_root, in btrfs_delete_subvolume()
4061 dest->root_key.objectid); in btrfs_delete_subvolume()
4069 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, in btrfs_delete_subvolume()
4071 dest->root_key.objectid); in btrfs_delete_subvolume()
4072 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4077 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { in btrfs_delete_subvolume()
4079 dest->root_item.received_uuid, in btrfs_delete_subvolume()
4081 dest->root_key.objectid); in btrfs_delete_subvolume()
4082 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4089 free_anon_bdev(dest->anon_dev); in btrfs_delete_subvolume()
4090 dest->anon_dev = 0; in btrfs_delete_subvolume()
4092 trans->block_rsv = NULL; in btrfs_delete_subvolume()
4093 trans->bytes_reserved = 0; in btrfs_delete_subvolume()
4097 inode->i_flags |= S_DEAD; in btrfs_delete_subvolume()
4101 up_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4103 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4104 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4105 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4107 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4111 ASSERT(dest->send_in_progress == 0); in btrfs_delete_subvolume()
4114 if (dest->ino_cache_inode) { in btrfs_delete_subvolume()
4115 iput(dest->ino_cache_inode); in btrfs_delete_subvolume()
4116 dest->ino_cache_inode = NULL; in btrfs_delete_subvolume()
4127 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_rmdir()
4131 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rmdir()
4132 return -ENOTEMPTY; in btrfs_rmdir()
4149 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; in btrfs_rmdir()
4153 BTRFS_I(d_inode(dentry)), dentry->d_name.name, in btrfs_rmdir()
4154 dentry->d_name.len); in btrfs_rmdir()
4161 * 1) create dir foo in btrfs_rmdir()
4162 * 2) create snapshot under dir foo in btrfs_rmdir()
4164 * 4) rmdir foo in btrfs_rmdir()
4165 * 5) mkdir foo in btrfs_rmdir()
4166 * 6) fsync foo or some file inside foo in btrfs_rmdir()
4168 if (last_unlink_trans >= trans->transid) in btrfs_rmdir()
4169 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; in btrfs_rmdir()
4173 btrfs_btree_balance_dirty(root->fs_info); in btrfs_rmdir()
4200 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_truncate_inode_items()
4211 u32 found_type = (u8)-1; in btrfs_truncate_inode_items()
4216 int extent_type = -1; in btrfs_truncate_inode_items()
4222 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); in btrfs_truncate_inode_items()
4228 * For non-free space inodes and non-shareable roots, we want to back in btrfs_truncate_inode_items()
4233 test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_truncate_inode_items()
4238 return -ENOMEM; in btrfs_truncate_inode_items()
4239 path->reada = READA_BACK; in btrfs_truncate_inode_items()
4241 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_truncate_inode_items()
4242 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, in btrfs_truncate_inode_items()
4251 fs_info->sectorsize), in btrfs_truncate_inode_items()
4252 (u64)-1, 0); in btrfs_truncate_inode_items()
4257 * we relog the inode, so if root != BTRFS_I(inode)->root, it means in btrfs_truncate_inode_items()
4261 if (min_type == 0 && root == BTRFS_I(inode)->root) in btrfs_truncate_inode_items()
4265 key.offset = (u64)-1; in btrfs_truncate_inode_items()
4266 key.type = (u8)-1; in btrfs_truncate_inode_items()
4270 * with a 16K leaf size and 128MB extents, you can actually queue in btrfs_truncate_inode_items()
4276 ret = -EAGAIN; in btrfs_truncate_inode_items()
4280 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in btrfs_truncate_inode_items()
4289 if (path->slots[0] == 0) in btrfs_truncate_inode_items()
4291 path->slots[0]--; in btrfs_truncate_inode_items()
4298 leaf = path->nodes[0]; in btrfs_truncate_inode_items()
4299 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_truncate_inode_items()
4310 fi = btrfs_item_ptr(leaf, path->slots[0], in btrfs_truncate_inode_items()
4325 BTRFS_I(inode), leaf, fi, path->slots[0], in btrfs_truncate_inode_items()
4328 item_end--; in btrfs_truncate_inode_items()
4353 extent_num_bytes = ALIGN(new_size - in btrfs_truncate_inode_items()
4355 fs_info->sectorsize); in btrfs_truncate_inode_items()
4356 clear_start = ALIGN(new_size, fs_info->sectorsize); in btrfs_truncate_inode_items()
4359 num_dec = (orig_num_bytes - in btrfs_truncate_inode_items()
4362 &root->state) && in btrfs_truncate_inode_items()
4370 extent_offset = found_key.offset - in btrfs_truncate_inode_items()
4378 &root->state)) in btrfs_truncate_inode_items()
4392 u32 size = (u32)(new_size - found_key.offset); in btrfs_truncate_inode_items()
4410 clear_len = fs_info->sectorsize; in btrfs_truncate_inode_items()
4413 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_truncate_inode_items()
4414 inode_sub_bytes(inode, item_end + 1 - new_size); in btrfs_truncate_inode_items()
4422 if (root == BTRFS_I(inode)->root) { in btrfs_truncate_inode_items()
4438 pending_del_slot = path->slots[0]; in btrfs_truncate_inode_items()
4441 path->slots[0] + 1 == pending_del_slot) { in btrfs_truncate_inode_items()
4444 pending_del_slot = path->slots[0]; in btrfs_truncate_inode_items()
4454 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_truncate_inode_items()
4461 ref.real_root = root->root_key.objectid; in btrfs_truncate_inode_items()
4478 if (path->slots[0] == 0 || in btrfs_truncate_inode_items()
4479 path->slots[0] != pending_del_slot || in btrfs_truncate_inode_items()
4507 ret = -EAGAIN; in btrfs_truncate_inode_items()
4513 path->slots[0]--; in btrfs_truncate_inode_items()
4527 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_truncate_inode_items()
4532 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, in btrfs_truncate_inode_items()
4533 (u64)-1, &cached_state); in btrfs_truncate_inode_items()
4541 * btrfs_truncate_block - read, zero a chunk and write a block
4542 * @inode - inode that we're zeroing
4543 * @from - the offset to start zeroing
4544 * @len - the length to zero, 0 to zero the entire range respective to the
4546 * @front - zero up to the offset instead of from the offset on
4554 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_truncate_block()
4555 struct address_space *mapping = inode->i_mapping; in btrfs_truncate_block()
4556 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_truncate_block()
4562 u32 blocksize = fs_info->sectorsize; in btrfs_truncate_block()
4564 unsigned offset = from & (blocksize - 1); in btrfs_truncate_block()
4577 block_end = block_start + blocksize - 1; in btrfs_truncate_block()
4603 ret = -ENOMEM; in btrfs_truncate_block()
4610 if (page->mapping != mapping) { in btrfs_truncate_block()
4616 ret = -EIO; in btrfs_truncate_block()
4636 clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end, in btrfs_truncate_block()
4650 len = blocksize - offset; in btrfs_truncate_block()
4653 memset(kaddr + (block_start - page_offset(page)), in btrfs_truncate_block()
4656 memset(kaddr + (block_start - page_offset(page)) + offset, in btrfs_truncate_block()
4666 set_extent_bit(&BTRFS_I(inode)->io_tree, block_start, in btrfs_truncate_block()
4692 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in maybe_insert_hole()
4701 BTRFS_I(inode)->last_trans = fs_info->generation; in maybe_insert_hole()
4702 BTRFS_I(inode)->last_sub_trans = root->log_transid; in maybe_insert_hole()
4703 BTRFS_I(inode)->last_log_commit = root->last_log_commit; in maybe_insert_hole()
4708 * 1 - for the one we're dropping in maybe_insert_hole()
4709 * 1 - for the one we're adding in maybe_insert_hole()
4710 * 1 - for updating the inode. in maybe_insert_hole()
4741 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_cont_expand()
4742 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_cont_expand()
4743 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_cont_expand()
4746 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in btrfs_cont_expand()
4747 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); in btrfs_cont_expand()
4748 u64 block_end = ALIGN(size, fs_info->sectorsize); in btrfs_cont_expand()
4767 block_end - 1, &cached_state); in btrfs_cont_expand()
4771 block_end - cur_offset); in btrfs_cont_expand()
4778 last_byte = ALIGN(last_byte, fs_info->sectorsize); in btrfs_cont_expand()
4779 hole_size = last_byte - cur_offset; in btrfs_cont_expand()
4781 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { in btrfs_cont_expand()
4795 cur_offset + hole_size - 1, 0); in btrfs_cont_expand()
4799 &BTRFS_I(inode)->runtime_flags); in btrfs_cont_expand()
4802 hole_em->start = cur_offset; in btrfs_cont_expand()
4803 hole_em->len = hole_size; in btrfs_cont_expand()
4804 hole_em->orig_start = cur_offset; in btrfs_cont_expand()
4806 hole_em->block_start = EXTENT_MAP_HOLE; in btrfs_cont_expand()
4807 hole_em->block_len = 0; in btrfs_cont_expand()
4808 hole_em->orig_block_len = 0; in btrfs_cont_expand()
4809 hole_em->ram_bytes = hole_size; in btrfs_cont_expand()
4810 hole_em->compress_type = BTRFS_COMPRESS_NONE; in btrfs_cont_expand()
4811 hole_em->generation = fs_info->generation; in btrfs_cont_expand()
4814 write_lock(&em_tree->lock); in btrfs_cont_expand()
4816 write_unlock(&em_tree->lock); in btrfs_cont_expand()
4817 if (err != -EEXIST) in btrfs_cont_expand()
4822 hole_size - 1, 0); in btrfs_cont_expand()
4839 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state); in btrfs_cont_expand()
4845 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setsize()
4848 loff_t newsize = attr->ia_size; in btrfs_setsize()
4849 int mask = attr->ia_valid; in btrfs_setsize()
4861 inode->i_ctime = inode->i_mtime = in btrfs_setsize()
4869 * state of this file - if the snapshot captures this expanding in btrfs_setsize()
4873 btrfs_drew_write_lock(&root->snapshot_lock); in btrfs_setsize()
4876 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4882 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4890 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4901 &BTRFS_I(inode)->runtime_flags); in btrfs_setsize()
4908 if (ret && inode->i_nlink) { in btrfs_setsize()
4912 * Truncate failed, so fix up the in-memory size. We in btrfs_setsize()
4915 * in-memory size to match. in btrfs_setsize()
4917 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_setsize()
4920 i_size_write(inode, BTRFS_I(inode)->disk_i_size); in btrfs_setsize()
4930 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setattr()
4934 return -EROFS; in btrfs_setattr()
4940 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in btrfs_setattr()
4946 if (attr->ia_valid) { in btrfs_setattr()
4951 if (!err && attr->ia_valid & ATTR_MODE) in btrfs_setattr()
4952 err = posix_acl_chmod(inode, inode->i_mode); in btrfs_setattr()
4972 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in evict_inode_truncate_pages()
4973 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; in evict_inode_truncate_pages()
4976 ASSERT(inode->i_state & I_FREEING); in evict_inode_truncate_pages()
4977 truncate_inode_pages_final(&inode->i_data); in evict_inode_truncate_pages()
4979 write_lock(&map_tree->lock); in evict_inode_truncate_pages()
4980 while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) { in evict_inode_truncate_pages()
4983 node = rb_first_cached(&map_tree->map); in evict_inode_truncate_pages()
4985 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in evict_inode_truncate_pages()
4986 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); in evict_inode_truncate_pages()
4990 write_unlock(&map_tree->lock); in evict_inode_truncate_pages()
4992 write_lock(&map_tree->lock); in evict_inode_truncate_pages()
4995 write_unlock(&map_tree->lock); in evict_inode_truncate_pages()
5005 * queue kthread), inode references (inode->i_count) were not taken in evict_inode_truncate_pages()
5009 * reference count - if we don't do it, when they access the inode's in evict_inode_truncate_pages()
5011 * use-after-free issue. in evict_inode_truncate_pages()
5013 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5014 while (!RB_EMPTY_ROOT(&io_tree->state)) { in evict_inode_truncate_pages()
5021 node = rb_first(&io_tree->state); in evict_inode_truncate_pages()
5023 start = state->start; in evict_inode_truncate_pages()
5024 end = state->end; in evict_inode_truncate_pages()
5025 state_flags = state->state; in evict_inode_truncate_pages()
5026 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5040 end - start + 1); in evict_inode_truncate_pages()
5048 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5050 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5056 struct btrfs_fs_info *fs_info = root->fs_info; in evict_refill_and_join()
5057 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in evict_refill_and_join()
5075 ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra, in evict_refill_and_join()
5083 btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) { in evict_refill_and_join()
5086 return ERR_PTR(-ENOSPC); in evict_refill_and_join()
5096 trans->block_rsv = &fs_info->trans_block_rsv; in evict_refill_and_join()
5097 trans->bytes_reserved = delayed_refs_extra; in evict_refill_and_join()
5098 btrfs_block_rsv_migrate(rsv, trans->block_rsv, in evict_refill_and_join()
5106 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_evict_inode()
5108 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_evict_inode()
5121 if (inode->i_nlink && in btrfs_evict_inode()
5122 ((btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5123 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || in btrfs_evict_inode()
5130 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); in btrfs_evict_inode()
5132 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) in btrfs_evict_inode()
5135 if (inode->i_nlink > 0) { in btrfs_evict_inode()
5136 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5137 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); in btrfs_evict_inode()
5148 rsv->size = btrfs_calc_metadata_size(fs_info, 1); in btrfs_evict_inode()
5149 rsv->failfast = 1; in btrfs_evict_inode()
5158 trans->block_rsv = rsv; in btrfs_evict_inode()
5161 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5164 if (ret && ret != -ENOSPC && ret != -EAGAIN) in btrfs_evict_inode()
5181 trans->block_rsv = rsv; in btrfs_evict_inode()
5183 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5187 if (!(root == fs_info->tree_root || in btrfs_evict_inode()
5188 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) in btrfs_evict_inode()
5207 * If no dir entries were found, returns -ENOENT.
5208 * If found a corrupted location in dir entry, returns -EUCLEAN.
5213 const char *name = dentry->d_name.name; in btrfs_inode_by_name()
5214 int namelen = dentry->d_name.len; in btrfs_inode_by_name()
5217 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_inode_by_name()
5222 return -ENOMEM; in btrfs_inode_by_name()
5227 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_inode_by_name()
5231 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); in btrfs_inode_by_name()
5232 if (location->type != BTRFS_INODE_ITEM_KEY && in btrfs_inode_by_name()
5233 location->type != BTRFS_ROOT_ITEM_KEY) { in btrfs_inode_by_name()
5234 ret = -EUCLEAN; in btrfs_inode_by_name()
5235 btrfs_warn(root->fs_info, in btrfs_inode_by_name()
5238 location->objectid, location->type, location->offset); in btrfs_inode_by_name()
5241 *type = btrfs_dir_type(path->nodes[0], di); in btrfs_inode_by_name()
5268 err = -ENOMEM; in fixup_tree_root_location()
5272 err = -ENOENT; in fixup_tree_root_location()
5273 key.objectid = BTRFS_I(dir)->root->root_key.objectid; in fixup_tree_root_location()
5275 key.offset = location->objectid; in fixup_tree_root_location()
5277 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in fixup_tree_root_location()
5284 leaf = path->nodes[0]; in fixup_tree_root_location()
5285 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); in fixup_tree_root_location()
5287 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) in fixup_tree_root_location()
5290 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, in fixup_tree_root_location()
5292 dentry->d_name.len); in fixup_tree_root_location()
5298 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); in fixup_tree_root_location()
5305 location->objectid = btrfs_root_dirid(&new_root->root_item); in fixup_tree_root_location()
5306 location->type = BTRFS_INODE_ITEM_KEY; in fixup_tree_root_location()
5307 location->offset = 0; in fixup_tree_root_location()
5316 struct btrfs_root *root = BTRFS_I(inode)->root; in inode_tree_add()
5320 struct rb_node *new = &BTRFS_I(inode)->rb_node; in inode_tree_add()
5326 spin_lock(&root->inode_lock); in inode_tree_add()
5327 p = &root->inode_tree.rb_node; in inode_tree_add()
5333 p = &parent->rb_left; in inode_tree_add()
5335 p = &parent->rb_right; in inode_tree_add()
5337 WARN_ON(!(entry->vfs_inode.i_state & in inode_tree_add()
5339 rb_replace_node(parent, new, &root->inode_tree); in inode_tree_add()
5341 spin_unlock(&root->inode_lock); in inode_tree_add()
5346 rb_insert_color(new, &root->inode_tree); in inode_tree_add()
5347 spin_unlock(&root->inode_lock); in inode_tree_add()
5352 struct btrfs_root *root = inode->root; in inode_tree_del()
5355 spin_lock(&root->inode_lock); in inode_tree_del()
5356 if (!RB_EMPTY_NODE(&inode->rb_node)) { in inode_tree_del()
5357 rb_erase(&inode->rb_node, &root->inode_tree); in inode_tree_del()
5358 RB_CLEAR_NODE(&inode->rb_node); in inode_tree_del()
5359 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5361 spin_unlock(&root->inode_lock); in inode_tree_del()
5363 if (empty && btrfs_root_refs(&root->root_item) == 0) { in inode_tree_del()
5364 spin_lock(&root->inode_lock); in inode_tree_del()
5365 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5366 spin_unlock(&root->inode_lock); in inode_tree_del()
5377 inode->i_ino = args->ino; in btrfs_init_locked_inode()
5378 BTRFS_I(inode)->location.objectid = args->ino; in btrfs_init_locked_inode()
5379 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; in btrfs_init_locked_inode()
5380 BTRFS_I(inode)->location.offset = 0; in btrfs_init_locked_inode()
5381 BTRFS_I(inode)->root = btrfs_grab_root(args->root); in btrfs_init_locked_inode()
5382 BUG_ON(args->root && !BTRFS_I(inode)->root); in btrfs_init_locked_inode()
5390 return args->ino == BTRFS_I(inode)->location.objectid && in btrfs_find_actor()
5391 args->root == BTRFS_I(inode)->root; in btrfs_find_actor()
5423 return ERR_PTR(-ENOMEM); in btrfs_iget_path()
5425 if (inode->i_state & I_NEW) { in btrfs_iget_path()
5440 ret = -ENOENT; in btrfs_iget_path()
5460 return ERR_PTR(-ENOMEM); in new_simple_dir()
5462 BTRFS_I(inode)->root = btrfs_grab_root(root); in new_simple_dir()
5463 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); in new_simple_dir()
5464 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); in new_simple_dir()
5466 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; in new_simple_dir()
5468 * We only need lookup, the rest is read-only and there's no inode in new_simple_dir()
5471 inode->i_op = &simple_dir_inode_operations; in new_simple_dir()
5472 inode->i_opflags &= ~IOP_XATTR; in new_simple_dir()
5473 inode->i_fop = &simple_dir_operations; in new_simple_dir()
5474 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; in new_simple_dir()
5475 inode->i_mtime = current_time(inode); in new_simple_dir()
5476 inode->i_atime = inode->i_mtime; in new_simple_dir()
5477 inode->i_ctime = inode->i_mtime; in new_simple_dir()
5478 BTRFS_I(inode)->i_otime = inode->i_mtime; in new_simple_dir()
5486 * Compile-time asserts that generic FT_* types still match in btrfs_inode_type()
5498 return fs_umode_to_ftype(inode->i_mode); in btrfs_inode_type()
5503 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_lookup_dentry()
5505 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_lookup_dentry()
5511 if (dentry->d_name.len > BTRFS_NAME_LEN) in btrfs_lookup_dentry()
5512 return ERR_PTR(-ENAMETOOLONG); in btrfs_lookup_dentry()
5519 inode = btrfs_iget(dir->i_sb, location.objectid, root); in btrfs_lookup_dentry()
5527 inode->i_mode, btrfs_inode_type(inode), in btrfs_lookup_dentry()
5530 return ERR_PTR(-EUCLEAN); in btrfs_lookup_dentry()
5538 if (ret != -ENOENT) in btrfs_lookup_dentry()
5541 inode = new_simple_dir(dir->i_sb, &location, sub_root); in btrfs_lookup_dentry()
5543 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); in btrfs_lookup_dentry()
5549 down_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5550 if (!sb_rdonly(inode->i_sb)) in btrfs_lookup_dentry()
5552 up_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5568 inode = d_inode(dentry->d_parent); in btrfs_dentry_delete()
5571 root = BTRFS_I(inode)->root; in btrfs_dentry_delete()
5572 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_dentry_delete()
5586 if (inode == ERR_PTR(-ENOENT)) in btrfs_lookup()
5606 return -ENOMEM; in btrfs_opendir()
5607 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); in btrfs_opendir()
5608 if (!private->filldir_buf) { in btrfs_opendir()
5610 return -ENOMEM; in btrfs_opendir()
5612 file->private_data = private; in btrfs_opendir()
5625 while (entries--) { in btrfs_filldir()
5629 ctx->pos = get_unaligned(&entry->offset); in btrfs_filldir()
5630 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), in btrfs_filldir()
5631 get_unaligned(&entry->ino), in btrfs_filldir()
5632 get_unaligned(&entry->type))) in btrfs_filldir()
5635 get_unaligned(&entry->name_len); in btrfs_filldir()
5636 ctx->pos++; in btrfs_filldir()
5644 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_real_readdir()
5645 struct btrfs_file_private *private = file->private_data; in btrfs_real_readdir()
5668 return -ENOMEM; in btrfs_real_readdir()
5670 addr = private->filldir_buf; in btrfs_real_readdir()
5671 path->reada = READA_FORWARD; in btrfs_real_readdir()
5679 key.offset = ctx->pos; in btrfs_real_readdir()
5689 leaf = path->nodes[0]; in btrfs_real_readdir()
5690 slot = path->slots[0]; in btrfs_real_readdir()
5706 if (found_key.offset < ctx->pos) in btrfs_real_readdir()
5715 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5718 addr = private->filldir_buf; in btrfs_real_readdir()
5725 put_unaligned(name_len, &entry->name_len); in btrfs_real_readdir()
5730 &entry->type); in btrfs_real_readdir()
5732 put_unaligned(location.objectid, &entry->ino); in btrfs_real_readdir()
5733 put_unaligned(found_key.offset, &entry->offset); in btrfs_real_readdir()
5738 path->slots[0]++; in btrfs_real_readdir()
5742 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5758 * they're returned by readdir. Until we re-use freed offsets in btrfs_real_readdir()
5767 if (ctx->pos >= INT_MAX) in btrfs_real_readdir()
5768 ctx->pos = LLONG_MAX; in btrfs_real_readdir()
5770 ctx->pos = INT_MAX; in btrfs_real_readdir()
5788 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_dirty_inode()
5789 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_dirty_inode()
5793 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) in btrfs_dirty_inode()
5801 if (ret && ret == -ENOSPC) { in btrfs_dirty_inode()
5811 if (BTRFS_I(inode)->delayed_node) in btrfs_dirty_inode()
5824 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_update_time()
5828 return -EROFS; in btrfs_update_time()
5833 inode->i_ctime = *now; in btrfs_update_time()
5835 inode->i_mtime = *now; in btrfs_update_time()
5837 inode->i_atime = *now; in btrfs_update_time()
5843 * and then set the in-memory index_cnt variable to reflect
5848 struct btrfs_root *root = inode->root; in btrfs_set_inode_index_count()
5856 key.offset = (u64)-1; in btrfs_set_inode_index_count()
5860 return -ENOMEM; in btrfs_set_inode_index_count()
5876 if (path->slots[0] == 0) { in btrfs_set_inode_index_count()
5877 inode->index_cnt = 2; in btrfs_set_inode_index_count()
5881 path->slots[0]--; in btrfs_set_inode_index_count()
5883 leaf = path->nodes[0]; in btrfs_set_inode_index_count()
5884 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_set_inode_index_count()
5888 inode->index_cnt = 2; in btrfs_set_inode_index_count()
5892 inode->index_cnt = found_key.offset + 1; in btrfs_set_inode_index_count()
5906 if (dir->index_cnt == (u64)-1) { in btrfs_set_inode_index()
5915 *index = dir->index_cnt; in btrfs_set_inode_index()
5916 dir->index_cnt++; in btrfs_set_inode_index()
5925 args.ino = BTRFS_I(inode)->location.objectid; in btrfs_insert_inode_locked()
5926 args.root = BTRFS_I(inode)->root; in btrfs_insert_inode_locked()
5929 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), in btrfs_insert_inode_locked()
5945 flags = BTRFS_I(dir)->flags; in btrfs_inherit_iflags()
5948 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
5949 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
5951 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
5952 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
5956 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; in btrfs_inherit_iflags()
5957 if (S_ISREG(inode->i_mode)) in btrfs_inherit_iflags()
5958 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; in btrfs_inherit_iflags()
5971 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_new_inode()
5986 return ERR_PTR(-ENOMEM); in btrfs_new_inode()
5989 inode = new_inode(fs_info->sb); in btrfs_new_inode()
5993 return ERR_PTR(-ENOMEM); in btrfs_new_inode()
6007 inode->i_ino = objectid; in btrfs_new_inode()
6026 BTRFS_I(inode)->index_cnt = 2; in btrfs_new_inode()
6027 BTRFS_I(inode)->dir_index = *index; in btrfs_new_inode()
6028 BTRFS_I(inode)->root = btrfs_grab_root(root); in btrfs_new_inode()
6029 BTRFS_I(inode)->generation = trans->transid; in btrfs_new_inode()
6030 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_new_inode()
6038 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); in btrfs_new_inode()
6060 location = &BTRFS_I(inode)->location; in btrfs_new_inode()
6061 location->objectid = objectid; in btrfs_new_inode()
6062 location->offset = 0; in btrfs_new_inode()
6063 location->type = BTRFS_INODE_ITEM_KEY; in btrfs_new_inode()
6071 path->leave_spinning = 1; in btrfs_new_inode()
6079 inode->i_mtime = current_time(inode); in btrfs_new_inode()
6080 inode->i_atime = inode->i_mtime; in btrfs_new_inode()
6081 inode->i_ctime = inode->i_mtime; in btrfs_new_inode()
6082 BTRFS_I(inode)->i_otime = inode->i_mtime; in btrfs_new_inode()
6084 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_new_inode()
6086 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, in btrfs_new_inode()
6088 fill_inode_item(trans, path->nodes[0], inode_item, inode); in btrfs_new_inode()
6091 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, in btrfs_new_inode()
6093 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); in btrfs_new_inode()
6094 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); in btrfs_new_inode()
6096 write_extent_buffer(path->nodes[0], name, ptr, name_len); in btrfs_new_inode()
6099 btrfs_mark_buffer_dirty(path->nodes[0]); in btrfs_new_inode()
6106 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; in btrfs_new_inode()
6108 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | in btrfs_new_inode()
6123 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); in btrfs_new_inode()
6131 BTRFS_I(dir)->index_cnt--; in btrfs_new_inode()
6148 struct btrfs_root *root = parent_inode->root; in btrfs_add_link()
6153 memcpy(&key, &inode->root->root_key, sizeof(key)); in btrfs_add_link()
6162 root->root_key.objectid, parent_ino, in btrfs_add_link()
6174 btrfs_inode_type(&inode->vfs_inode), index); in btrfs_add_link()
6175 if (ret == -EEXIST || ret == -EOVERFLOW) in btrfs_add_link()
6182 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + in btrfs_add_link()
6184 inode_inc_iversion(&parent_inode->vfs_inode); in btrfs_add_link()
6191 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { in btrfs_add_link()
6192 struct timespec64 now = current_time(&parent_inode->vfs_inode); in btrfs_add_link()
6194 parent_inode->vfs_inode.i_mtime = now; in btrfs_add_link()
6195 parent_inode->vfs_inode.i_ctime = now; in btrfs_add_link()
6197 ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); in btrfs_add_link()
6207 root->root_key.objectid, parent_ino, in btrfs_add_link()
6230 dentry->d_name.name, dentry->d_name.len, in btrfs_add_nondir()
6233 err = -EEXIST; in btrfs_add_nondir()
6240 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_mknod()
6242 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_mknod()
6261 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_mknod()
6262 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, in btrfs_mknod()
6276 inode->i_op = &btrfs_special_inode_operations; in btrfs_mknod()
6277 init_special_inode(inode, inode->i_mode, rdev); in btrfs_mknod()
6279 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_mknod()
6304 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_create()
6306 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_create()
6325 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_create()
6326 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, in btrfs_create()
6339 inode->i_fop = &btrfs_file_operations; in btrfs_create()
6340 inode->i_op = &btrfs_file_inode_operations; in btrfs_create()
6341 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_create()
6343 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_create()
6372 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_link()
6374 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_link()
6380 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) in btrfs_link()
6381 return -EXDEV; in btrfs_link()
6383 if (inode->i_nlink >= BTRFS_LINK_MAX) in btrfs_link()
6384 return -EMLINK; in btrfs_link()
6396 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); in btrfs_link()
6404 BTRFS_I(inode)->dir_index = 0ULL; in btrfs_link()
6407 inode->i_ctime = current_time(inode); in btrfs_link()
6409 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); in btrfs_link()
6417 struct dentry *parent = dentry->d_parent; in btrfs_link()
6422 if (inode->i_nlink == 1) { in btrfs_link()
6448 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_mkdir()
6451 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_mkdir()
6469 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_mkdir()
6470 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, in btrfs_mkdir()
6479 inode->i_op = &btrfs_dir_inode_operations; in btrfs_mkdir()
6480 inode->i_fop = &btrfs_dir_file_operations; in btrfs_mkdir()
6482 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_mkdir()
6492 dentry->d_name.name, in btrfs_mkdir()
6493 dentry->d_name.len, 0, index); in btrfs_mkdir()
6515 struct extent_buffer *leaf = path->nodes[0]; in uncompress_inline()
6526 btrfs_item_nr(path->slots[0])); in uncompress_inline()
6529 return -ENOMEM; in uncompress_inline()
6548 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); in uncompress_inline()
6556 * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6564 * range, reading it from the B-tree and caching it if necessary. Note that
6571 * Return: ERR_PTR on error, non-NULL extent_map on success.
6577 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_get_extent()
6582 int extent_type = -1; in btrfs_get_extent()
6584 struct btrfs_root *root = inode->root; in btrfs_get_extent()
6589 struct extent_map_tree *em_tree = &inode->extent_tree; in btrfs_get_extent()
6590 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_get_extent()
6592 read_lock(&em_tree->lock); in btrfs_get_extent()
6594 read_unlock(&em_tree->lock); in btrfs_get_extent()
6597 if (em->start > start || em->start + em->len <= start) in btrfs_get_extent()
6599 else if (em->block_start == EXTENT_MAP_INLINE && page) in btrfs_get_extent()
6606 ret = -ENOMEM; in btrfs_get_extent()
6609 em->start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6610 em->orig_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6611 em->len = (u64)-1; in btrfs_get_extent()
6612 em->block_len = (u64)-1; in btrfs_get_extent()
6616 ret = -ENOMEM; in btrfs_get_extent()
6621 path->reada = READA_FORWARD; in btrfs_get_extent()
6627 path->leave_spinning = 1; in btrfs_get_extent()
6629 path->recurse = btrfs_is_free_space_inode(inode); in btrfs_get_extent()
6635 if (path->slots[0] == 0) in btrfs_get_extent()
6637 path->slots[0]--; in btrfs_get_extent()
6641 leaf = path->nodes[0]; in btrfs_get_extent()
6642 item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_get_extent()
6644 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6663 if (!S_ISREG(inode->vfs_inode.i_mode)) { in btrfs_get_extent()
6664 ret = -EUCLEAN; in btrfs_get_extent()
6666 "regular/prealloc extent found for non-regular inode %llu", in btrfs_get_extent()
6674 path->slots[0], in btrfs_get_extent()
6679 path->slots[0]++; in btrfs_get_extent()
6680 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in btrfs_get_extent()
6687 leaf = path->nodes[0]; in btrfs_get_extent()
6689 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6699 em->start = start; in btrfs_get_extent()
6700 em->orig_start = start; in btrfs_get_extent()
6701 em->len = found_key.offset - start; in btrfs_get_extent()
6702 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6722 extent_offset = page_offset(page) + pg_offset - extent_start; in btrfs_get_extent()
6723 copy_size = min_t(u64, PAGE_SIZE - pg_offset, in btrfs_get_extent()
6724 size - extent_offset); in btrfs_get_extent()
6725 em->start = extent_start + extent_offset; in btrfs_get_extent()
6726 em->len = ALIGN(copy_size, fs_info->sectorsize); in btrfs_get_extent()
6727 em->orig_block_len = em->len; in btrfs_get_extent()
6728 em->orig_start = em->start; in btrfs_get_extent()
6745 PAGE_SIZE - pg_offset - in btrfs_get_extent()
6752 set_extent_uptodate(io_tree, em->start, in btrfs_get_extent()
6753 extent_map_end(em) - 1, NULL, GFP_NOFS); in btrfs_get_extent()
6757 em->start = start; in btrfs_get_extent()
6758 em->orig_start = start; in btrfs_get_extent()
6759 em->len = len; in btrfs_get_extent()
6760 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6764 if (em->start > start || extent_map_end(em) <= start) { in btrfs_get_extent()
6767 em->start, em->len, start, len); in btrfs_get_extent()
6768 ret = -EIO; in btrfs_get_extent()
6772 write_lock(&em_tree->lock); in btrfs_get_extent()
6774 write_unlock(&em_tree->lock); in btrfs_get_extent()
6803 * - a hole or in btrfs_get_extent_fiemap()
6804 * - a pre-alloc extent, in btrfs_get_extent_fiemap()
6807 if (em->block_start != EXTENT_MAP_HOLE && in btrfs_get_extent_fiemap()
6808 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_get_extent_fiemap()
6813 /* check to see if we've wrapped (len == -1 or similar) */ in btrfs_get_extent_fiemap()
6816 end = (u64)-1; in btrfs_get_extent_fiemap()
6818 end -= 1; in btrfs_get_extent_fiemap()
6823 delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start, in btrfs_get_extent_fiemap()
6827 delalloc_end = (u64)-1; in btrfs_get_extent_fiemap()
6844 delalloc_len = delalloc_end - delalloc_start; in btrfs_get_extent_fiemap()
6853 err = -ENOMEM; in btrfs_get_extent_fiemap()
6865 if (hole_end <= start || hole_em->start > end) { in btrfs_get_extent_fiemap()
6869 hole_start = max(hole_em->start, start); in btrfs_get_extent_fiemap()
6870 hole_len = hole_end - hole_start; in btrfs_get_extent_fiemap()
6879 em->len = min(hole_len, delalloc_start - hole_start); in btrfs_get_extent_fiemap()
6880 em->start = hole_start; in btrfs_get_extent_fiemap()
6881 em->orig_start = hole_start; in btrfs_get_extent_fiemap()
6886 em->block_start = hole_em->block_start; in btrfs_get_extent_fiemap()
6887 em->block_len = hole_len; in btrfs_get_extent_fiemap()
6888 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) in btrfs_get_extent_fiemap()
6889 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); in btrfs_get_extent_fiemap()
6895 em->start = delalloc_start; in btrfs_get_extent_fiemap()
6896 em->len = delalloc_len; in btrfs_get_extent_fiemap()
6897 em->orig_start = delalloc_start; in btrfs_get_extent_fiemap()
6898 em->block_start = EXTENT_MAP_DELALLOC; in btrfs_get_extent_fiemap()
6899 em->block_len = delalloc_len; in btrfs_get_extent_fiemap()
6940 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); in btrfs_create_dio_extent()
6952 struct btrfs_root *root = inode->root; in btrfs_new_extent_direct()
6953 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_new_extent_direct()
6960 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, in btrfs_new_extent_direct()
6983 * @orig_len: (optional) Return the original on-disk length of the file extent
7003 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in can_nocow_extent()
7007 struct btrfs_root *root = BTRFS_I(inode)->root; in can_nocow_extent()
7008 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in can_nocow_extent()
7017 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); in can_nocow_extent()
7021 return -ENOMEM; in can_nocow_extent()
7028 slot = path->slots[0]; in can_nocow_extent()
7035 slot--; in can_nocow_extent()
7038 leaf = path->nodes[0]; in can_nocow_extent()
7081 btrfs_root_last_snapshot(&root->root_item))) in can_nocow_extent()
7087 *orig_start = key.offset - backref_offset; in can_nocow_extent()
7095 num_bytes = min(offset + *len, extent_end) - offset; in can_nocow_extent()
7100 root->fs_info->sectorsize) - 1; in can_nocow_extent()
7104 ret = -EAGAIN; in can_nocow_extent()
7117 key.offset - backref_offset, disk_bytenr, in can_nocow_extent()
7131 disk_bytenr += offset - key.offset; in can_nocow_extent()
7152 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, in lock_extent_direct()
7160 lockend - lockstart + 1); in lock_extent_direct()
7170 (!writing || !filemap_range_has_page(inode->i_mapping, in lock_extent_direct()
7174 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, in lock_extent_direct()
7194 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) in lock_extent_direct()
7197 ret = -ENOTBLK; in lock_extent_direct()
7213 ret = -ENOTBLK; in lock_extent_direct()
7241 em_tree = &inode->extent_tree; in create_io_em()
7244 return ERR_PTR(-ENOMEM); in create_io_em()
7246 em->start = start; in create_io_em()
7247 em->orig_start = orig_start; in create_io_em()
7248 em->len = len; in create_io_em()
7249 em->block_len = block_len; in create_io_em()
7250 em->block_start = block_start; in create_io_em()
7251 em->orig_block_len = orig_block_len; in create_io_em()
7252 em->ram_bytes = ram_bytes; in create_io_em()
7253 em->generation = -1; in create_io_em()
7254 set_bit(EXTENT_FLAG_PINNED, &em->flags); in create_io_em()
7256 set_bit(EXTENT_FLAG_FILLING, &em->flags); in create_io_em()
7258 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); in create_io_em()
7259 em->compress_type = compress_type; in create_io_em()
7263 btrfs_drop_extent_cache(inode, em->start, in create_io_em()
7264 em->start + em->len - 1, 0); in create_io_em()
7265 write_lock(&em_tree->lock); in create_io_em()
7267 write_unlock(&em_tree->lock); in create_io_em()
7272 } while (ret == -EEXIST); in create_io_em()
7289 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_get_blocks_direct_write()
7302 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || in btrfs_get_blocks_direct_write()
7303 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && in btrfs_get_blocks_direct_write()
7304 em->block_start != EXTENT_MAP_HOLE)) { in btrfs_get_blocks_direct_write()
7308 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_get_blocks_direct_write()
7312 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7313 block_start = em->block_start + (start - em->start); in btrfs_get_blocks_direct_write()
7352 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7362 dio_data->reserve -= len; in btrfs_get_blocks_direct_write()
7371 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_dio_iomap_begin()
7380 bool sync = (current->journal_info == BTRFS_DIO_SYNC_STUB); in btrfs_dio_iomap_begin()
7383 * We used current->journal_info here to see if we were sync, but in btrfs_dio_iomap_begin()
7385 * we have a journal_info set, so we need to clear this out and re-set in btrfs_dio_iomap_begin()
7388 ASSERT(current->journal_info == NULL || in btrfs_dio_iomap_begin()
7389 current->journal_info == BTRFS_DIO_SYNC_STUB); in btrfs_dio_iomap_begin()
7390 current->journal_info = NULL; in btrfs_dio_iomap_begin()
7393 len = min_t(u64, len, fs_info->sectorsize); in btrfs_dio_iomap_begin()
7396 lockend = start + len - 1; in btrfs_dio_iomap_begin()
7405 &BTRFS_I(inode)->runtime_flags)) { in btrfs_dio_iomap_begin()
7406 ret = filemap_fdatawrite_range(inode->i_mapping, start, in btrfs_dio_iomap_begin()
7407 start + length - 1); in btrfs_dio_iomap_begin()
7414 return -ENOMEM; in btrfs_dio_iomap_begin()
7416 dio_data->sync = sync; in btrfs_dio_iomap_begin()
7417 dio_data->length = length; in btrfs_dio_iomap_begin()
7419 dio_data->reserve = round_up(length, fs_info->sectorsize); in btrfs_dio_iomap_begin()
7421 &dio_data->data_reserved, in btrfs_dio_iomap_begin()
7422 start, dio_data->reserve); in btrfs_dio_iomap_begin()
7424 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_begin()
7429 iomap->private = dio_data; in btrfs_dio_iomap_begin()
7437 ret = -ENOTBLK; in btrfs_dio_iomap_begin()
7457 * We return -ENOTBLK because that's what makes DIO go ahead and go back in btrfs_dio_iomap_begin()
7461 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || in btrfs_dio_iomap_begin()
7462 em->block_start == EXTENT_MAP_INLINE) { in btrfs_dio_iomap_begin()
7464 ret = -ENOTBLK; in btrfs_dio_iomap_begin()
7468 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7476 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7488 unlock_extent_cached(&BTRFS_I(inode)->io_tree, in btrfs_dio_iomap_begin()
7498 if ((em->block_start == EXTENT_MAP_HOLE) || in btrfs_dio_iomap_begin()
7499 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { in btrfs_dio_iomap_begin()
7500 iomap->addr = IOMAP_NULL_ADDR; in btrfs_dio_iomap_begin()
7501 iomap->type = IOMAP_HOLE; in btrfs_dio_iomap_begin()
7503 iomap->addr = em->block_start + (start - em->start); in btrfs_dio_iomap_begin()
7504 iomap->type = IOMAP_MAPPED; in btrfs_dio_iomap_begin()
7506 iomap->offset = start; in btrfs_dio_iomap_begin()
7507 iomap->bdev = fs_info->fs_devices->latest_bdev; in btrfs_dio_iomap_begin()
7508 iomap->length = len; in btrfs_dio_iomap_begin()
7515 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, in btrfs_dio_iomap_begin()
7520 dio_data->data_reserved, start, in btrfs_dio_iomap_begin()
7521 dio_data->reserve, true); in btrfs_dio_iomap_begin()
7522 btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve); in btrfs_dio_iomap_begin()
7523 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_begin()
7533 struct btrfs_dio_data *dio_data = iomap->private; in btrfs_dio_iomap_end()
7534 size_t submitted = dio_data->submitted; in btrfs_dio_iomap_end()
7537 if (!write && (iomap->type == IOMAP_HOLE)) { in btrfs_dio_iomap_end()
7539 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); in btrfs_dio_iomap_end()
7545 length -= submitted; in btrfs_dio_iomap_end()
7550 unlock_extent(&BTRFS_I(inode)->io_tree, pos, in btrfs_dio_iomap_end()
7551 pos + length - 1); in btrfs_dio_iomap_end()
7552 ret = -ENOTBLK; in btrfs_dio_iomap_end()
7556 if (dio_data->reserve) in btrfs_dio_iomap_end()
7558 dio_data->data_reserved, pos, in btrfs_dio_iomap_end()
7559 dio_data->reserve, true); in btrfs_dio_iomap_end()
7560 btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length); in btrfs_dio_iomap_end()
7561 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_end()
7565 * We're all done, we can re-set the current->journal_info now safely in btrfs_dio_iomap_end()
7568 if (dio_data->sync) { in btrfs_dio_iomap_end()
7569 ASSERT(current->journal_info == NULL); in btrfs_dio_iomap_end()
7570 current->journal_info = BTRFS_DIO_SYNC_STUB; in btrfs_dio_iomap_end()
7573 iomap->private = NULL; in btrfs_dio_iomap_end()
7581 * This implies a barrier so that stores to dio_bio->bi_status before in btrfs_dio_private_put()
7582 * this and loads of dio_bio->bi_status after this are fully ordered. in btrfs_dio_private_put()
7584 if (!refcount_dec_and_test(&dip->refs)) in btrfs_dio_private_put()
7587 if (bio_op(dip->dio_bio) == REQ_OP_WRITE) { in btrfs_dio_private_put()
7588 __endio_write_update_ordered(BTRFS_I(dip->inode), in btrfs_dio_private_put()
7589 dip->logical_offset, in btrfs_dio_private_put()
7590 dip->bytes, in btrfs_dio_private_put()
7591 !dip->dio_bio->bi_status); in btrfs_dio_private_put()
7593 unlock_extent(&BTRFS_I(dip->inode)->io_tree, in btrfs_dio_private_put()
7594 dip->logical_offset, in btrfs_dio_private_put()
7595 dip->logical_offset + dip->bytes - 1); in btrfs_dio_private_put()
7598 bio_endio(dip->dio_bio); in btrfs_dio_private_put()
7606 struct btrfs_dio_private *dip = bio->bi_private; in submit_dio_repair_bio()
7607 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in submit_dio_repair_bio()
7616 refcount_inc(&dip->refs); in submit_dio_repair_bio()
7619 refcount_dec(&dip->refs); in submit_dio_repair_bio()
7627 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_check_read_dio_bio()
7628 const u32 sectorsize = fs_info->sectorsize; in btrfs_check_read_dio_bio()
7629 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; in btrfs_check_read_dio_bio()
7630 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_check_read_dio_bio()
7631 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); in btrfs_check_read_dio_bio()
7634 u64 start = io_bio->logical; in btrfs_check_read_dio_bio()
7638 __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) { in btrfs_check_read_dio_bio()
7657 &io_bio->bio, in btrfs_check_read_dio_bio()
7658 start - io_bio->logical, in btrfs_check_read_dio_bio()
7661 start + sectorsize - 1, in btrfs_check_read_dio_bio()
7662 io_bio->mirror_num, in btrfs_check_read_dio_bio()
7679 struct btrfs_fs_info *fs_info = inode->root->fs_info; in __endio_write_update_ordered()
7687 wq = fs_info->endio_freespace_worker; in __endio_write_update_ordered()
7689 wq = fs_info->endio_write_workers; in __endio_write_update_ordered()
7697 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, in __endio_write_update_ordered()
7699 btrfs_queue_work(wq, &ordered->work); in __endio_write_update_ordered()
7712 ordered_bytes = offset + bytes - ordered_offset; in __endio_write_update_ordered()
7728 struct btrfs_dio_private *dip = bio->bi_private; in btrfs_end_dio_bio()
7729 blk_status_t err = bio->bi_status; in btrfs_end_dio_bio()
7732 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, in btrfs_end_dio_bio()
7734 btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), in btrfs_end_dio_bio()
7735 bio->bi_opf, in btrfs_end_dio_bio()
7736 (unsigned long long)bio->bi_iter.bi_sector, in btrfs_end_dio_bio()
7737 bio->bi_iter.bi_size, err); in btrfs_end_dio_bio()
7740 err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio), in btrfs_end_dio_bio()
7745 dip->dio_bio->bi_status = err; in btrfs_end_dio_bio()
7754 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_dio_bio()
7755 struct btrfs_dio_private *dip = bio->bi_private; in btrfs_submit_dio_bio()
7761 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); in btrfs_submit_dio_bio()
7769 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) in btrfs_submit_dio_bio()
7788 csum_offset = file_offset - dip->logical_offset; in btrfs_submit_dio_bio()
7789 csum_offset >>= inode->i_sb->s_blocksize_bits; in btrfs_submit_dio_bio()
7790 csum_offset *= btrfs_super_csum_size(fs_info->super_copy); in btrfs_submit_dio_bio()
7791 btrfs_io_bio(bio)->csum = dip->csums + csum_offset; in btrfs_submit_dio_bio()
7808 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); in btrfs_create_dio_private()
7814 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_create_dio_private()
7815 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); in btrfs_create_dio_private()
7818 nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; in btrfs_create_dio_private()
7826 dip->inode = inode; in btrfs_create_dio_private()
7827 dip->logical_offset = file_offset; in btrfs_create_dio_private()
7828 dip->bytes = dio_bio->bi_iter.bi_size; in btrfs_create_dio_private()
7829 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; in btrfs_create_dio_private()
7830 dip->dio_bio = dio_bio; in btrfs_create_dio_private()
7831 refcount_set(&dip->refs, 1); in btrfs_create_dio_private()
7839 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); in btrfs_submit_direct()
7840 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_direct()
7853 struct btrfs_dio_data *dio_data = iomap->private; in btrfs_submit_direct()
7858 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, in btrfs_submit_direct()
7859 file_offset + dio_bio->bi_iter.bi_size - 1); in btrfs_submit_direct()
7861 dio_bio->bi_status = BLK_STS_RESOURCE; in btrfs_submit_direct()
7872 dip->csums); in btrfs_submit_direct()
7877 start_sector = dio_bio->bi_iter.bi_sector; in btrfs_submit_direct()
7878 submit_len = dio_bio->bi_iter.bi_size; in btrfs_submit_direct()
7897 bio->bi_private = dip; in btrfs_submit_direct()
7898 bio->bi_end_io = btrfs_end_dio_bio; in btrfs_submit_direct()
7899 btrfs_io_bio(bio)->logical = file_offset; in btrfs_submit_direct()
7902 submit_len -= clone_len; in btrfs_submit_direct()
7914 refcount_inc(&dip->refs); in btrfs_submit_direct()
7930 refcount_dec(&dip->refs); in btrfs_submit_direct()
7934 dio_data->submitted += clone_len; in btrfs_submit_direct()
7942 dip->dio_bio->bi_status = status; in btrfs_submit_direct()
7952 unsigned int blocksize_mask = fs_info->sectorsize - 1; in check_direct_IO()
7953 ssize_t retval = -EINVAL; in check_direct_IO()
7969 for (seg = 0; seg < iter->nr_segs; seg++) { in check_direct_IO()
7970 for (i = seg + 1; i < iter->nr_segs; i++) { in check_direct_IO()
7971 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) in check_direct_IO()
7988 if (current->journal_info == BTRFS_DIO_SYNC_STUB) { in btrfs_maybe_fsync_end_io()
7989 current->journal_info = NULL; in btrfs_maybe_fsync_end_io()
7997 iocb->ki_flags |= IOCB_DSYNC; in btrfs_maybe_fsync_end_io()
8020 struct file *file = iocb->ki_filp; in btrfs_direct_IO()
8021 struct inode *inode = file->f_mapping->host; in btrfs_direct_IO()
8022 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_direct_IO()
8024 loff_t offset = iocb->ki_pos; in btrfs_direct_IO()
8039 if (offset + count <= inode->i_size) { in btrfs_direct_IO()
8043 down_read(&BTRFS_I(inode)->dio_sem); in btrfs_direct_IO()
8050 if (current->journal_info) in btrfs_direct_IO()
8057 if (ret == -ENOTBLK) in btrfs_direct_IO()
8061 up_read(&BTRFS_I(inode)->dio_sem); in btrfs_direct_IO()
8084 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_readpage()
8086 u64 end = start + PAGE_SIZE - 1; in btrfs_readpage()
8101 struct inode *inode = page->mapping->host; in btrfs_writepage()
8104 if (current->flags & PF_MEMALLOC) { in btrfs_writepage()
8180 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_invalidatepage()
8181 struct extent_io_tree *tree = &inode->io_tree; in btrfs_invalidatepage()
8185 u64 page_end = page_start + PAGE_SIZE - 1; in btrfs_invalidatepage()
8188 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; in btrfs_invalidatepage()
8208 ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1); in btrfs_invalidatepage()
8211 ordered->file_offset + ordered->num_bytes - 1); in btrfs_invalidatepage()
8229 tree = &inode->ordered_tree; in btrfs_invalidatepage()
8231 spin_lock_irq(&tree->lock); in btrfs_invalidatepage()
8232 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); in btrfs_invalidatepage()
8233 new_len = start - ordered->file_offset; in btrfs_invalidatepage()
8234 if (new_len < ordered->truncated_len) in btrfs_invalidatepage()
8235 ordered->truncated_len = new_len; in btrfs_invalidatepage()
8236 spin_unlock_irq(&tree->lock); in btrfs_invalidatepage()
8240 end - start + 1, 1)) in btrfs_invalidatepage()
8299 struct page *page = vmf->page; in btrfs_page_mkwrite()
8300 struct inode *inode = file_inode(vmf->vma->vm_file); in btrfs_page_mkwrite()
8301 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_page_mkwrite()
8302 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_page_mkwrite()
8319 sb_start_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8321 page_end = page_start + PAGE_SIZE - 1; in btrfs_page_mkwrite()
8335 ret2 = file_update_time(vmf->vma->vm_file); in btrfs_page_mkwrite()
8350 if ((page->mapping != inode->i_mapping) || in btrfs_page_mkwrite()
8375 if (page->index == ((size - 1) >> PAGE_SHIFT)) { in btrfs_page_mkwrite()
8376 reserved_space = round_up(size - page_start, in btrfs_page_mkwrite()
8377 fs_info->sectorsize); in btrfs_page_mkwrite()
8379 end = page_start + reserved_space - 1; in btrfs_page_mkwrite()
8382 PAGE_SIZE - reserved_space, true); in btrfs_page_mkwrite()
8393 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, in btrfs_page_mkwrite()
8414 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); in btrfs_page_mkwrite()
8422 BTRFS_I(inode)->last_trans = fs_info->generation; in btrfs_page_mkwrite()
8423 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; in btrfs_page_mkwrite()
8424 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit; in btrfs_page_mkwrite()
8429 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8440 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8447 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_truncate()
8448 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_truncate()
8452 u64 mask = fs_info->sectorsize - 1; in btrfs_truncate()
8456 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), in btrfs_truncate()
8457 (u64)-1); in btrfs_truncate()
8485 * 1) rsv - for the truncate reservation, which we will steal from the in btrfs_truncate()
8487 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for in btrfs_truncate()
8492 return -ENOMEM; in btrfs_truncate()
8493 rsv->size = min_size; in btrfs_truncate()
8494 rsv->failfast = 1; in btrfs_truncate()
8507 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, in btrfs_truncate()
8518 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); in btrfs_truncate()
8519 trans->block_rsv = rsv; in btrfs_truncate()
8523 inode->i_size, in btrfs_truncate()
8525 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8526 if (ret != -ENOSPC && ret != -EAGAIN) in btrfs_truncate()
8543 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); in btrfs_truncate()
8544 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, in btrfs_truncate()
8547 trans->block_rsv = rsv; in btrfs_truncate()
8560 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0); in btrfs_truncate()
8574 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8608 inode->i_op = &btrfs_dir_inode_operations; in btrfs_create_subvol_root()
8609 inode->i_fop = &btrfs_dir_file_operations; in btrfs_create_subvol_root()
8617 btrfs_err(new_root->fs_info, in btrfs_create_subvol_root()
8619 new_root->root_key.objectid, err); in btrfs_create_subvol_root()
8637 ei->root = NULL; in btrfs_alloc_inode()
8638 ei->generation = 0; in btrfs_alloc_inode()
8639 ei->last_trans = 0; in btrfs_alloc_inode()
8640 ei->last_sub_trans = 0; in btrfs_alloc_inode()
8641 ei->logged_trans = 0; in btrfs_alloc_inode()
8642 ei->delalloc_bytes = 0; in btrfs_alloc_inode()
8643 ei->new_delalloc_bytes = 0; in btrfs_alloc_inode()
8644 ei->defrag_bytes = 0; in btrfs_alloc_inode()
8645 ei->disk_i_size = 0; in btrfs_alloc_inode()
8646 ei->flags = 0; in btrfs_alloc_inode()
8647 ei->csum_bytes = 0; in btrfs_alloc_inode()
8648 ei->index_cnt = (u64)-1; in btrfs_alloc_inode()
8649 ei->dir_index = 0; in btrfs_alloc_inode()
8650 ei->last_unlink_trans = 0; in btrfs_alloc_inode()
8651 ei->last_reflink_trans = 0; in btrfs_alloc_inode()
8652 ei->last_log_commit = 0; in btrfs_alloc_inode()
8654 spin_lock_init(&ei->lock); in btrfs_alloc_inode()
8655 ei->outstanding_extents = 0; in btrfs_alloc_inode()
8656 if (sb->s_magic != BTRFS_TEST_MAGIC) in btrfs_alloc_inode()
8657 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, in btrfs_alloc_inode()
8659 ei->runtime_flags = 0; in btrfs_alloc_inode()
8660 ei->prop_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8661 ei->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8663 ei->delayed_node = NULL; in btrfs_alloc_inode()
8665 ei->i_otime.tv_sec = 0; in btrfs_alloc_inode()
8666 ei->i_otime.tv_nsec = 0; in btrfs_alloc_inode()
8668 inode = &ei->vfs_inode; in btrfs_alloc_inode()
8669 extent_map_tree_init(&ei->extent_tree); in btrfs_alloc_inode()
8670 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode); in btrfs_alloc_inode()
8671 extent_io_tree_init(fs_info, &ei->io_failure_tree, in btrfs_alloc_inode()
8673 extent_io_tree_init(fs_info, &ei->file_extent_tree, in btrfs_alloc_inode()
8675 ei->io_tree.track_uptodate = true; in btrfs_alloc_inode()
8676 ei->io_failure_tree.track_uptodate = true; in btrfs_alloc_inode()
8677 atomic_set(&ei->sync_writers, 0); in btrfs_alloc_inode()
8678 mutex_init(&ei->log_mutex); in btrfs_alloc_inode()
8679 btrfs_ordered_inode_tree_init(&ei->ordered_tree); in btrfs_alloc_inode()
8680 INIT_LIST_HEAD(&ei->delalloc_inodes); in btrfs_alloc_inode()
8681 INIT_LIST_HEAD(&ei->delayed_iput); in btrfs_alloc_inode()
8682 RB_CLEAR_NODE(&ei->rb_node); in btrfs_alloc_inode()
8683 init_rwsem(&ei->dio_sem); in btrfs_alloc_inode()
8691 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); in btrfs_test_destroy_inode()
8705 struct btrfs_root *root = inode->root; in btrfs_destroy_inode()
8707 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); in btrfs_destroy_inode()
8708 WARN_ON(vfs_inode->i_data.nrpages); in btrfs_destroy_inode()
8709 WARN_ON(inode->block_rsv.reserved); in btrfs_destroy_inode()
8710 WARN_ON(inode->block_rsv.size); in btrfs_destroy_inode()
8711 WARN_ON(inode->outstanding_extents); in btrfs_destroy_inode()
8712 WARN_ON(inode->delalloc_bytes); in btrfs_destroy_inode()
8713 WARN_ON(inode->new_delalloc_bytes); in btrfs_destroy_inode()
8714 WARN_ON(inode->csum_bytes); in btrfs_destroy_inode()
8715 WARN_ON(inode->defrag_bytes); in btrfs_destroy_inode()
8726 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); in btrfs_destroy_inode()
8730 btrfs_err(root->fs_info, in btrfs_destroy_inode()
8732 ordered->file_offset, ordered->num_bytes); in btrfs_destroy_inode()
8740 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); in btrfs_destroy_inode()
8741 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); in btrfs_destroy_inode()
8742 btrfs_put_root(inode->root); in btrfs_destroy_inode()
8747 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_drop_inode()
8753 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_drop_inode()
8759 static void init_once(void *foo) in init_once() argument
8761 struct btrfs_inode *ei = (struct btrfs_inode *) foo; in init_once()
8763 inode_init_once(&ei->vfs_inode); in init_once()
8816 return -ENOMEM; in btrfs_init_cachep()
8823 struct inode *inode = d_inode(path->dentry); in btrfs_getattr()
8824 u32 blocksize = inode->i_sb->s_blocksize; in btrfs_getattr()
8825 u32 bi_flags = BTRFS_I(inode)->flags; in btrfs_getattr()
8827 stat->result_mask |= STATX_BTIME; in btrfs_getattr()
8828 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; in btrfs_getattr()
8829 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; in btrfs_getattr()
8831 stat->attributes |= STATX_ATTR_APPEND; in btrfs_getattr()
8833 stat->attributes |= STATX_ATTR_COMPRESSED; in btrfs_getattr()
8835 stat->attributes |= STATX_ATTR_IMMUTABLE; in btrfs_getattr()
8837 stat->attributes |= STATX_ATTR_NODUMP; in btrfs_getattr()
8839 stat->attributes_mask |= (STATX_ATTR_APPEND | in btrfs_getattr()
8845 stat->dev = BTRFS_I(inode)->root->anon_dev; in btrfs_getattr()
8847 spin_lock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8848 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; in btrfs_getattr()
8849 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8850 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + in btrfs_getattr()
8860 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename_exchange()
8862 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename_exchange()
8863 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename_exchange()
8864 struct inode *new_inode = new_dentry->d_inode; in btrfs_rename_exchange()
8865 struct inode *old_inode = old_dentry->d_inode; in btrfs_rename_exchange()
8878 return -EXDEV; in btrfs_rename_exchange()
8883 down_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
8913 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8914 BTRFS_I(new_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8924 new_dentry->d_name.name, in btrfs_rename_exchange()
8925 new_dentry->d_name.len, in btrfs_rename_exchange()
8941 old_dentry->d_name.name, in btrfs_rename_exchange()
8942 old_dentry->d_name.len, in btrfs_rename_exchange()
8955 old_dir->i_ctime = old_dir->i_mtime = ctime; in btrfs_rename_exchange()
8956 new_dir->i_ctime = new_dir->i_mtime = ctime; in btrfs_rename_exchange()
8957 old_inode->i_ctime = ctime; in btrfs_rename_exchange()
8958 new_inode->i_ctime = ctime; in btrfs_rename_exchange()
8960 if (old_dentry->d_parent != new_dentry->d_parent) { in btrfs_rename_exchange()
8972 BTRFS_I(old_dentry->d_inode), in btrfs_rename_exchange()
8973 old_dentry->d_name.name, in btrfs_rename_exchange()
8974 old_dentry->d_name.len); in btrfs_rename_exchange()
8988 BTRFS_I(new_dentry->d_inode), in btrfs_rename_exchange()
8989 new_dentry->d_name.name, in btrfs_rename_exchange()
8990 new_dentry->d_name.len); in btrfs_rename_exchange()
9000 new_dentry->d_name.name, in btrfs_rename_exchange()
9001 new_dentry->d_name.len, 0, old_idx); in btrfs_rename_exchange()
9008 old_dentry->d_name.name, in btrfs_rename_exchange()
9009 old_dentry->d_name.len, 0, new_idx); in btrfs_rename_exchange()
9015 if (old_inode->i_nlink == 1) in btrfs_rename_exchange()
9016 BTRFS_I(old_inode)->dir_index = old_idx; in btrfs_rename_exchange()
9017 if (new_inode->i_nlink == 1) in btrfs_rename_exchange()
9018 BTRFS_I(new_inode)->dir_index = new_idx; in btrfs_rename_exchange()
9022 new_dentry->d_parent); in btrfs_rename_exchange()
9028 old_dentry->d_parent); in btrfs_rename_exchange()
9045 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || in btrfs_rename_exchange()
9046 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || in btrfs_rename_exchange()
9047 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || in btrfs_rename_exchange()
9049 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) in btrfs_rename_exchange()
9066 up_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
9086 dentry->d_name.name, in btrfs_whiteout_for_rename()
9087 dentry->d_name.len, in btrfs_whiteout_for_rename()
9098 inode->i_op = &btrfs_special_inode_operations; in btrfs_whiteout_for_rename()
9099 init_special_inode(inode, inode->i_mode, in btrfs_whiteout_for_rename()
9103 &dentry->d_name); in btrfs_whiteout_for_rename()
9126 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename()
9129 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename()
9130 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename()
9140 return -EPERM; in btrfs_rename()
9144 return -EXDEV; in btrfs_rename()
9148 return -ENOTEMPTY; in btrfs_rename()
9150 if (S_ISDIR(old_inode->i_mode) && new_inode && in btrfs_rename()
9151 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rename()
9152 return -ENOTEMPTY; in btrfs_rename()
9156 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, in btrfs_rename()
9157 new_dentry->d_name.name, in btrfs_rename()
9158 new_dentry->d_name.len); in btrfs_rename()
9161 if (ret == -EEXIST) { in btrfs_rename()
9168 /* maybe -EOVERFLOW */ in btrfs_rename()
9178 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) in btrfs_rename()
9179 filemap_flush(old_inode->i_mapping); in btrfs_rename()
9183 down_read(&fs_info->subvol_sem); in btrfs_rename()
9211 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename()
9219 new_dentry->d_name.name, in btrfs_rename()
9220 new_dentry->d_name.len, in btrfs_rename()
9230 old_dir->i_ctime = old_dir->i_mtime = in btrfs_rename()
9231 new_dir->i_ctime = new_dir->i_mtime = in btrfs_rename()
9232 old_inode->i_ctime = current_time(old_dir); in btrfs_rename()
9234 if (old_dentry->d_parent != new_dentry->d_parent) in btrfs_rename()
9243 old_dentry->d_name.name, in btrfs_rename()
9244 old_dentry->d_name.len); in btrfs_rename()
9255 new_inode->i_ctime = current_time(new_inode); in btrfs_rename()
9259 BUG_ON(new_inode->i_nlink == 0); in btrfs_rename()
9263 new_dentry->d_name.name, in btrfs_rename()
9264 new_dentry->d_name.len); in btrfs_rename()
9266 if (!ret && new_inode->i_nlink == 0) in btrfs_rename()
9276 new_dentry->d_name.name, in btrfs_rename()
9277 new_dentry->d_name.len, 0, index); in btrfs_rename()
9283 if (old_inode->i_nlink == 1) in btrfs_rename()
9284 BTRFS_I(old_inode)->dir_index = index; in btrfs_rename()
9288 new_dentry->d_parent); in btrfs_rename()
9315 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || in btrfs_rename()
9316 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || in btrfs_rename()
9317 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || in btrfs_rename()
9319 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) in btrfs_rename()
9329 up_read(&fs_info->subvol_sem); in btrfs_rename()
9339 return -EINVAL; in btrfs_rename2()
9362 inode = delalloc_work->inode; in btrfs_run_delalloc_work()
9363 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9365 &BTRFS_I(inode)->runtime_flags)) in btrfs_run_delalloc_work()
9366 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9369 complete(&delalloc_work->completion); in btrfs_run_delalloc_work()
9380 init_completion(&work->completion); in btrfs_alloc_delalloc_work()
9381 INIT_LIST_HEAD(&work->list); in btrfs_alloc_delalloc_work()
9382 work->inode = inode; in btrfs_alloc_delalloc_work()
9383 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); in btrfs_alloc_delalloc_work()
9404 mutex_lock(&root->delalloc_mutex); in start_delalloc_inodes()
9405 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9406 list_splice_init(&root->delalloc_inodes, &splice); in start_delalloc_inodes()
9411 list_move_tail(&binode->delalloc_inodes, in start_delalloc_inodes()
9412 &root->delalloc_inodes); in start_delalloc_inodes()
9413 inode = igrab(&binode->vfs_inode); in start_delalloc_inodes()
9415 cond_resched_lock(&root->delalloc_lock); in start_delalloc_inodes()
9418 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9422 &binode->runtime_flags); in start_delalloc_inodes()
9426 ret = -ENOMEM; in start_delalloc_inodes()
9429 list_add_tail(&work->list, &works); in start_delalloc_inodes()
9430 btrfs_queue_work(root->fs_info->flush_workers, in start_delalloc_inodes()
9431 &work->work); in start_delalloc_inodes()
9433 (*nr)--; in start_delalloc_inodes()
9438 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9440 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9444 list_del_init(&work->list); in start_delalloc_inodes()
9445 wait_for_completion(&work->completion); in start_delalloc_inodes()
9450 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9451 list_splice_tail(&splice, &root->delalloc_inodes); in start_delalloc_inodes()
9452 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9454 mutex_unlock(&root->delalloc_mutex); in start_delalloc_inodes()
9460 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_start_delalloc_snapshot()
9463 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in btrfs_start_delalloc_snapshot()
9464 return -EROFS; in btrfs_start_delalloc_snapshot()
9475 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in btrfs_start_delalloc_roots()
9476 return -EROFS; in btrfs_start_delalloc_roots()
9480 mutex_lock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9481 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9482 list_splice_init(&fs_info->delalloc_roots, &splice); in btrfs_start_delalloc_roots()
9488 list_move_tail(&root->delalloc_root, in btrfs_start_delalloc_roots()
9489 &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9490 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9496 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9498 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9503 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9504 list_splice_tail(&splice, &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9505 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9507 mutex_unlock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9514 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_symlink()
9516 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_symlink()
9531 return -ENAMETOOLONG; in btrfs_symlink()
9548 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_symlink()
9549 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), in btrfs_symlink()
9563 inode->i_fop = &btrfs_file_operations; in btrfs_symlink()
9564 inode->i_op = &btrfs_file_inode_operations; in btrfs_symlink()
9565 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_symlink()
9567 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_symlink()
9573 err = -ENOMEM; in btrfs_symlink()
9586 leaf = path->nodes[0]; in btrfs_symlink()
9587 ei = btrfs_item_ptr(leaf, path->slots[0], in btrfs_symlink()
9589 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in btrfs_symlink()
9602 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_symlink()
9639 u64 start = ins->objectid; in insert_prealloc_file_extent()
9640 u64 len = ins->offset; in insert_prealloc_file_extent()
9677 return ERR_PTR(-ENOMEM); in insert_prealloc_file_extent()
9680 file_offset + len - 1, &extent_info, in insert_prealloc_file_extent()
9694 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in __btrfs_prealloc_file_range()
9695 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in __btrfs_prealloc_file_range()
9697 struct btrfs_root *root = BTRFS_I(inode)->root; in __btrfs_prealloc_file_range()
9703 u64 last_alloc = (u64)-1; in __btrfs_prealloc_file_range()
9706 u64 end = start + num_bytes - 1; in __btrfs_prealloc_file_range()
9727 * ->bytes_may_use to ->bytes_reserved. Any error that happens in __btrfs_prealloc_file_range()
9751 cur_offset + ins.offset -1, 0); in __btrfs_prealloc_file_range()
9756 &BTRFS_I(inode)->runtime_flags); in __btrfs_prealloc_file_range()
9760 em->start = cur_offset; in __btrfs_prealloc_file_range()
9761 em->orig_start = cur_offset; in __btrfs_prealloc_file_range()
9762 em->len = ins.offset; in __btrfs_prealloc_file_range()
9763 em->block_start = ins.objectid; in __btrfs_prealloc_file_range()
9764 em->block_len = ins.offset; in __btrfs_prealloc_file_range()
9765 em->orig_block_len = ins.offset; in __btrfs_prealloc_file_range()
9766 em->ram_bytes = ins.offset; in __btrfs_prealloc_file_range()
9767 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); in __btrfs_prealloc_file_range()
9768 em->generation = trans->transid; in __btrfs_prealloc_file_range()
9771 write_lock(&em_tree->lock); in __btrfs_prealloc_file_range()
9773 write_unlock(&em_tree->lock); in __btrfs_prealloc_file_range()
9774 if (ret != -EEXIST) in __btrfs_prealloc_file_range()
9777 cur_offset + ins.offset - 1, in __btrfs_prealloc_file_range()
9782 num_bytes -= ins.offset; in __btrfs_prealloc_file_range()
9787 inode->i_ctime = current_time(inode); in __btrfs_prealloc_file_range()
9788 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; in __btrfs_prealloc_file_range()
9790 (actual_len > inode->i_size) && in __btrfs_prealloc_file_range()
9791 (cur_offset > inode->i_size)) { in __btrfs_prealloc_file_range()
9816 end - clear_offset + 1); in __btrfs_prealloc_file_range()
9845 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_permission()
9846 umode_t mode = inode->i_mode; in btrfs_permission()
9851 return -EROFS; in btrfs_permission()
9852 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) in btrfs_permission()
9853 return -EACCES; in btrfs_permission()
9860 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_tmpfile()
9862 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_tmpfile()
9887 inode->i_fop = &btrfs_file_operations; in btrfs_tmpfile()
9888 inode->i_op = &btrfs_file_inode_operations; in btrfs_tmpfile()
9890 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_tmpfile()
9908 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() in btrfs_tmpfile()
9924 struct inode *inode = tree->private_data; in btrfs_set_range_writeback()
9930 page = find_get_page(inode->i_mapping, index); in btrfs_set_range_writeback()
9947 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_add_swapfile_pin()
9954 return -ENOMEM; in btrfs_add_swapfile_pin()
9955 sp->ptr = ptr; in btrfs_add_swapfile_pin()
9956 sp->inode = inode; in btrfs_add_swapfile_pin()
9957 sp->is_block_group = is_block_group; in btrfs_add_swapfile_pin()
9959 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
9960 p = &fs_info->swapfile_pins.rb_node; in btrfs_add_swapfile_pin()
9964 if (sp->ptr < entry->ptr || in btrfs_add_swapfile_pin()
9965 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { in btrfs_add_swapfile_pin()
9966 p = &(*p)->rb_left; in btrfs_add_swapfile_pin()
9967 } else if (sp->ptr > entry->ptr || in btrfs_add_swapfile_pin()
9968 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { in btrfs_add_swapfile_pin()
9969 p = &(*p)->rb_right; in btrfs_add_swapfile_pin()
9971 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
9976 rb_link_node(&sp->node, parent, p); in btrfs_add_swapfile_pin()
9977 rb_insert_color(&sp->node, &fs_info->swapfile_pins); in btrfs_add_swapfile_pin()
9978 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
9985 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_free_swapfile_pins()
9989 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
9990 node = rb_first(&fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
9994 if (sp->inode == inode) { in btrfs_free_swapfile_pins()
9995 rb_erase(&sp->node, &fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
9996 if (sp->is_block_group) in btrfs_free_swapfile_pins()
9997 btrfs_put_block_group(sp->ptr); in btrfs_free_swapfile_pins()
10002 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10022 first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; in btrfs_add_swap_extent()
10023 next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, in btrfs_add_swap_extent()
10028 nr_pages = next_ppage - first_ppage; in btrfs_add_swap_extent()
10031 if (bsi->start == 0) in btrfs_add_swap_extent()
10033 if (bsi->lowest_ppage > first_ppage_reported) in btrfs_add_swap_extent()
10034 bsi->lowest_ppage = first_ppage_reported; in btrfs_add_swap_extent()
10035 if (bsi->highest_ppage < (next_ppage - 1)) in btrfs_add_swap_extent()
10036 bsi->highest_ppage = next_ppage - 1; in btrfs_add_swap_extent()
10038 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); in btrfs_add_swap_extent()
10041 bsi->nr_extents += ret; in btrfs_add_swap_extent()
10042 bsi->nr_pages += nr_pages; in btrfs_add_swap_extent()
10051 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); in btrfs_swap_deactivate()
10058 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_swap_activate()
10059 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_swap_activate()
10064 .lowest_ppage = (sector_t)-1ULL, in btrfs_swap_activate()
10075 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_swap_activate()
10082 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { in btrfs_swap_activate()
10084 return -EINVAL; in btrfs_swap_activate()
10086 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { in btrfs_swap_activate()
10087 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10088 return -EINVAL; in btrfs_swap_activate()
10090 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { in btrfs_swap_activate()
10092 return -EINVAL; in btrfs_swap_activate()
10099 * fs_info->swapfile_pins prevents them from running while the swap in btrfs_swap_activate()
10107 return -EBUSY; in btrfs_swap_activate()
10115 atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles); in btrfs_swap_activate()
10117 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); in btrfs_swap_activate()
10119 lock_extent_bits(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10124 u64 len = isize - start; in btrfs_swap_activate()
10132 if (em->block_start == EXTENT_MAP_HOLE) { in btrfs_swap_activate()
10134 ret = -EINVAL; in btrfs_swap_activate()
10137 if (em->block_start == EXTENT_MAP_INLINE) { in btrfs_swap_activate()
10146 ret = -EINVAL; in btrfs_swap_activate()
10149 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_swap_activate()
10151 ret = -EINVAL; in btrfs_swap_activate()
10155 logical_block_start = em->block_start + (start - em->start); in btrfs_swap_activate()
10156 len = min(len, em->len - (start - em->start)); in btrfs_swap_activate()
10167 "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10168 ret = -EINVAL; in btrfs_swap_activate()
10178 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { in btrfs_swap_activate()
10181 ret = -EINVAL; in btrfs_swap_activate()
10186 device = em->map_lookup->stripes[0].dev; in btrfs_swap_activate()
10192 } else if (device != em->map_lookup->stripes[0].dev) { in btrfs_swap_activate()
10194 ret = -EINVAL; in btrfs_swap_activate()
10198 physical_block_start = (em->map_lookup->stripes[0].physical + in btrfs_swap_activate()
10199 (logical_block_start - em->start)); in btrfs_swap_activate()
10200 len = min(len, em->len - (logical_block_start - em->start)); in btrfs_swap_activate()
10208 ret = -EINVAL; in btrfs_swap_activate()
10245 unlock_extent_cached(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10256 sis->bdev = device->bdev; in btrfs_swap_activate()
10257 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; in btrfs_swap_activate()
10258 sis->max = bsi.nr_pages; in btrfs_swap_activate()
10259 sis->pages = bsi.nr_pages - 1; in btrfs_swap_activate()
10260 sis->highest_bit = bsi.nr_pages - 1; in btrfs_swap_activate()
10271 return -EOPNOTSUPP; in btrfs_swap_activate()