Lines Matching full:ordered

124  * look find the first ordered struct that has this offset, otherwise
165 * The ordered extent has reserved qgroup space, release now in alloc_ordered_extent()
229 "inconsistency in ordered tree at offset %llu", in insert_ordered_extent()
247 * Add an ordered extent to the per-inode tree.
260 * tree is given a single reference on the ordered extent that was inserted, and
263 * Return: the new ordered extent or error pointer.
279 * For PREALLOC, we do not use ordered extent members, but in btrfs_alloc_ordered_extent()
282 * So here we always pass 0 as offset for NOCOW/PREALLOC ordered extents, in btrfs_alloc_ordered_extent()
307 * when an ordered extent is finished. If the list covers more than one
308 * ordered extent, it is split across multiples.
320 void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered) in btrfs_mark_ordered_extent_error() argument
322 if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) in btrfs_mark_ordered_extent_error()
323 mapping_set_error(ordered->inode->vfs_inode.i_mapping, -EIO); in btrfs_mark_ordered_extent_error()
334 static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, in can_finish_ordered_extent() argument
338 struct btrfs_inode *inode = ordered->inode; in can_finish_ordered_extent()
349 * Ordered flag indicates whether we still have in can_finish_ordered_extent()
350 * pending io unfinished for the ordered extent. in can_finish_ordered_extent()
360 if (WARN_ON_ONCE(len > ordered->bytes_left)) { in can_finish_ordered_extent()
362 "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu… in can_finish_ordered_extent()
364 ordered->file_offset, ordered->num_bytes, in can_finish_ordered_extent()
365 len, ordered->bytes_left); in can_finish_ordered_extent()
366 ordered->bytes_left = 0; in can_finish_ordered_extent()
368 ordered->bytes_left -= len; in can_finish_ordered_extent()
372 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); in can_finish_ordered_extent()
374 if (ordered->bytes_left) in can_finish_ordered_extent()
378 * All the IO of the ordered extent is finished, we need to queue in can_finish_ordered_extent()
381 set_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags); in can_finish_ordered_extent()
382 cond_wake_up(&ordered->wait); in can_finish_ordered_extent()
383 refcount_inc(&ordered->refs); in can_finish_ordered_extent()
384 trace_btrfs_ordered_extent_mark_finished(inode, ordered); in can_finish_ordered_extent()
388 static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered) in btrfs_queue_ordered_fn() argument
390 struct btrfs_inode *inode = ordered->inode; in btrfs_queue_ordered_fn()
395 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL); in btrfs_queue_ordered_fn()
396 btrfs_queue_work(wq, &ordered->work); in btrfs_queue_ordered_fn()
399 void btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered, in btrfs_finish_ordered_extent() argument
403 struct btrfs_inode *inode = ordered->inode; in btrfs_finish_ordered_extent()
410 ret = can_finish_ordered_extent(ordered, folio, file_offset, len, in btrfs_finish_ordered_extent()
419 * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we in btrfs_finish_ordered_extent()
430 * logging before ordered extent completion runs in the work queue. in btrfs_finish_ordered_extent()
433 * finds, so if by the time it collects extent maps the ordered extent in btrfs_finish_ordered_extent()
437 * wait for completion of ordered extents in order to reduce latency. in btrfs_finish_ordered_extent()
440 * ordered extents to complete before starting to log. in btrfs_finish_ordered_extent()
442 if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) in btrfs_finish_ordered_extent()
446 btrfs_queue_ordered_fn(ordered); in btrfs_finish_ordered_extent()
450 * Mark all ordered extents io inside the specified range finished.
454 * updated to indicate whether the pending ordered io is finished.
459 * This function is called for endio, thus the range must have ordered
482 /* No ordered extents at all */ in btrfs_mark_ordered_io_finished()
495 /* No more ordered extents, exit */ in btrfs_mark_ordered_io_finished()
501 /* Go to next ordered extent and continue */ in btrfs_mark_ordered_io_finished()
516 * Now we are definitely inside one ordered extent. in btrfs_mark_ordered_io_finished()
538 * Finish IO for one ordered extent across a given range. The range can only
539 * contain one ordered extent.
541 * @cached: The cached ordered extent. If not NULL, we can skip the tree
542 * search and use the ordered extent directly.
543 * Will be also used to store the finished ordered extent.
547 * Return true if the ordered extent is finished in the range, and update
551 * NOTE: The range can NOT cross multiple ordered extents.
552 * Thus caller should ensure the range doesn't cross ordered extents.
580 "bad ordered accounting left %llu size %llu", in btrfs_dec_test_ordered_pending()
605 * used to drop a reference on an ordered extent. This will free
632 * remove an ordered extent from the tree. No references are dropped
645 * If this is a free space inode the thread has not acquired the ordered in btrfs_remove_ordered_extent()
729 struct btrfs_ordered_extent *ordered; in btrfs_run_ordered_extent_work() local
731 ordered = container_of(work, struct btrfs_ordered_extent, flush_work); in btrfs_run_ordered_extent_work()
732 btrfs_start_ordered_extent(ordered); in btrfs_run_ordered_extent_work()
733 complete(&ordered->completion); in btrfs_run_ordered_extent_work()
737 * Wait for all the ordered extents in a root. Use @bg as range or do whole
747 struct btrfs_ordered_extent *ordered, *next; in btrfs_wait_ordered_extents() local
765 ordered = list_first_entry(&splice, struct btrfs_ordered_extent, in btrfs_wait_ordered_extents()
768 if (range_end <= ordered->disk_bytenr || in btrfs_wait_ordered_extents()
769 ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { in btrfs_wait_ordered_extents()
770 list_move_tail(&ordered->root_extent_list, &skipped); in btrfs_wait_ordered_extents()
775 list_move_tail(&ordered->root_extent_list, in btrfs_wait_ordered_extents()
777 refcount_inc(&ordered->refs); in btrfs_wait_ordered_extents()
780 btrfs_init_work(&ordered->flush_work, in btrfs_wait_ordered_extents()
782 list_add_tail(&ordered->work_list, &works); in btrfs_wait_ordered_extents()
783 btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); in btrfs_wait_ordered_extents()
795 list_for_each_entry_safe(ordered, next, &works, work_list) { in btrfs_wait_ordered_extents()
796 list_del_init(&ordered->work_list); in btrfs_wait_ordered_extents()
797 wait_for_completion(&ordered->completion); in btrfs_wait_ordered_extents()
798 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_extents()
807 * Wait for @nr ordered extents that intersect the @bg, or the whole range of
843 * Start IO and wait for a given ordered extent to finish.
860 * If this is a free space inode do not take the ordered extents lockdep in btrfs_start_ordered_extent_nowriteback()
890 * Used to wait on ordered extents across a large range of bytes.
898 struct btrfs_ordered_extent *ordered; in btrfs_wait_ordered_range() local
917 * for any ordered extents that haven't completed yet. This is to make in btrfs_wait_ordered_range()
919 * before the ordered extents complete - to avoid failures (-EEXIST) in btrfs_wait_ordered_range()
920 * when adding the new ordered extents to the ordered tree. in btrfs_wait_ordered_range()
926 ordered = btrfs_lookup_first_ordered_extent(inode, end); in btrfs_wait_ordered_range()
927 if (!ordered) in btrfs_wait_ordered_range()
929 if (ordered->file_offset > orig_end) { in btrfs_wait_ordered_range()
930 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_range()
933 if (ordered->file_offset + ordered->num_bytes <= start) { in btrfs_wait_ordered_range()
934 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_range()
937 btrfs_start_ordered_extent(ordered); in btrfs_wait_ordered_range()
938 end = ordered->file_offset; in btrfs_wait_ordered_range()
940 * If the ordered extent had an error save the error but don't in btrfs_wait_ordered_range()
941 * exit without waiting first for all other ordered extents in in btrfs_wait_ordered_range()
944 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) in btrfs_wait_ordered_range()
946 btrfs_put_ordered_extent(ordered); in btrfs_wait_ordered_range()
955 * find an ordered extent corresponding to file_offset. return NULL if
982 /* Since the DIO code tries to lock a wide area we need to look for any ordered
1023 * Adds all ordered extents to the given list. The list ends up sorted by the
1024 * file_offset of the ordered extents.
1035 struct btrfs_ordered_extent *ordered; in btrfs_get_ordered_extents_for_logging() local
1037 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); in btrfs_get_ordered_extents_for_logging()
1039 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) in btrfs_get_ordered_extents_for_logging()
1042 ASSERT(list_empty(&ordered->log_list)); in btrfs_get_ordered_extents_for_logging()
1043 list_add_tail(&ordered->log_list, list); in btrfs_get_ordered_extents_for_logging()
1044 refcount_inc(&ordered->refs); in btrfs_get_ordered_extents_for_logging()
1045 trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered); in btrfs_get_ordered_extents_for_logging()
1074 * Lookup the first ordered extent that overlaps the range
1078 * that this one won't return any ordered extent that does not overlap the range.
1080 * ensures the first ordered extent gets returned.
1096 * And __tree_search() can't return the adjacent ordered extents in btrfs_lookup_first_ordered_range()
1108 * Direct hit, got an ordered extent that starts at in btrfs_lookup_first_ordered_range()
1138 /* No ordered extent in the range */ in btrfs_lookup_first_ordered_range()
1151 * Lock the passed range and ensures all pending ordered extents in it are run
1154 * @inode: Inode whose ordered tree is to be searched
1168 struct btrfs_ordered_extent *ordered; in btrfs_lock_and_flush_ordered_range() local
1177 ordered = btrfs_lookup_ordered_range(inode, start, in btrfs_lock_and_flush_ordered_range()
1179 if (!ordered) { in btrfs_lock_and_flush_ordered_range()
1190 btrfs_start_ordered_extent(ordered); in btrfs_lock_and_flush_ordered_range()
1191 btrfs_put_ordered_extent(ordered); in btrfs_lock_and_flush_ordered_range()
1196 * Lock the passed range and ensure all pending ordered extents in it are run
1205 struct btrfs_ordered_extent *ordered; in btrfs_try_lock_ordered_range() local
1210 ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1); in btrfs_try_lock_ordered_range()
1211 if (!ordered) in btrfs_try_lock_ordered_range()
1214 btrfs_put_ordered_extent(ordered); in btrfs_try_lock_ordered_range()
1220 /* Split out a new ordered extent for this first @len bytes of @ordered. */
1222 struct btrfs_ordered_extent *ordered, u64 len) in btrfs_split_ordered_extent() argument
1224 struct btrfs_inode *inode = ordered->inode; in btrfs_split_ordered_extent()
1227 u64 file_offset = ordered->file_offset; in btrfs_split_ordered_extent()
1228 u64 disk_bytenr = ordered->disk_bytenr; in btrfs_split_ordered_extent()
1229 unsigned long flags = ordered->flags; in btrfs_split_ordered_extent()
1235 trace_btrfs_ordered_extent_split(inode, ordered); in btrfs_split_ordered_extent()
1240 * The entire bio must be covered by the ordered extent, but we can't in btrfs_split_ordered_extent()
1243 if (WARN_ON_ONCE(len >= ordered->num_bytes)) in btrfs_split_ordered_extent()
1246 * If our ordered extent had an error there's no point in continuing. in btrfs_split_ordered_extent()
1249 * iterates over all existing ordered extents and sets the flag in btrfs_split_ordered_extent()
1257 /* We cannot split partially completed ordered extents. */ in btrfs_split_ordered_extent()
1258 if (ordered->bytes_left) { in btrfs_split_ordered_extent()
1260 if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) in btrfs_split_ordered_extent()
1263 /* We cannot split a compressed ordered extent. */ in btrfs_split_ordered_extent()
1264 if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) in btrfs_split_ordered_extent()
1268 len, 0, flags, ordered->compress_type); in btrfs_split_ordered_extent()
1278 * disk_num_bytes fields of the ordered extent below. And we disable in btrfs_split_ordered_extent()
1283 * btrfs_wait_ordered_extents() getting the trimmed ordered extent in btrfs_split_ordered_extent()
1284 * before we insert the new one, because even if it gets the ordered in btrfs_split_ordered_extent()
1286 * uses it or during its use, the ordered extent might have been in btrfs_split_ordered_extent()
1287 * trimmed in the meanwhile, and it missed the new ordered extent. in btrfs_split_ordered_extent()
1296 * We don't have overlapping ordered extents (that would imply double in btrfs_split_ordered_extent()
1298 * does not cross the ordered extent's num_bytes field, so there's in btrfs_split_ordered_extent()
1301 ordered->file_offset += len; in btrfs_split_ordered_extent()
1302 ordered->disk_bytenr += len; in btrfs_split_ordered_extent()
1303 ordered->num_bytes -= len; in btrfs_split_ordered_extent()
1304 ordered->disk_num_bytes -= len; in btrfs_split_ordered_extent()
1305 ordered->ram_bytes -= len; in btrfs_split_ordered_extent()
1307 if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { in btrfs_split_ordered_extent()
1308 ASSERT(ordered->bytes_left == 0); in btrfs_split_ordered_extent()
1311 ordered->bytes_left -= len; in btrfs_split_ordered_extent()
1314 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) { in btrfs_split_ordered_extent()
1315 if (ordered->truncated_len > len) { in btrfs_split_ordered_extent()
1316 ordered->truncated_len -= len; in btrfs_split_ordered_extent()
1318 new->truncated_len = ordered->truncated_len; in btrfs_split_ordered_extent()
1319 ordered->truncated_len = 0; in btrfs_split_ordered_extent()
1323 list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) { in btrfs_split_ordered_extent()
1333 "inconsistency in ordered tree at offset %llu after split", in btrfs_split_ordered_extent()