Lines Matching +full:locality +full:- +full:specific
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
18 #include <linux/backing-dev.h>
25 * - test ext4_ext_search_left() and ext4_ext_search_right()
26 * - search for metadata in few groups
29 * - normalization should take into account whether file is still open
30 * - discard preallocations if no free space left (policy?)
31 * - don't normalize tails
32 * - quota
33 * - reservation for superuser
36 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
37 * - track min/max extents in each group for better group selection
38 * - mb_mark_used() may allocate chunk right after splitting buddy
39 * - tree of groups sorted by number of free blocks
40 * - error handling
51 * is larger. If the size is less than sbi->s_mb_stream_request we
61 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
65 * pa_lstart -> the logical start block for this prealloc space
66 * pa_pstart -> the physical start block for this prealloc space
67 * pa_len -> length for this prealloc space (in clusters)
68 * pa_free -> free space available in this prealloc space (in clusters)
80 * have the group allocation flag set then we look at the locality group
85 * The reason for having a per cpu locality group is to reduce the contention
88 * The locality group prealloc space is used looking at whether we have
91 * If we can't allocate blocks via inode prealloc or/and locality group
123 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
124 * dependent on the cluster size; for non-bigalloc file systems, it is
127 * terms of number of blocks. If we have mounted the file system with -O
129 * smallest multiple of the stripe value (sbi->s_stripe) which is
135 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
137 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
142 * number of buddy bitmap orders possible) number of lists. Group-infos are
145 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
147 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
149 * This is an array of lists where in the i-th list there are groups with
151 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
193 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
196 * stripe size (sbi->s_stripe), we try to search for contiguous block in
198 * not, we search in the specific group using bitmap for best extents. The
211 * non-linear fashion. While that may not matter on non-rotational devices, for
226 * - on-disk bitmap
227 * - in-core buddy (actually includes buddy and bitmap)
228 * - preallocation descriptors (PAs)
231 * - inode
232 * assiged to specific inode and can be used for this inode only.
233 * it describes part of inode's space preallocated to specific
240 * - locality group
241 * assigned to specific locality group which does not translate to
247 * in-core buddy = on-disk bitmap + preallocation descriptors
250 * - allocated blocks (persistent)
251 * - preallocated blocks (non-persistent)
255 * literally -- time is discrete and delimited by locks.
258 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
261 * - init buddy: buddy = on-disk + PAs
262 * - new PA: buddy += N; PA = N
263 * - use inode PA: on-disk += N; PA -= N
264 * - discard inode PA buddy -= on-disk - PA; PA = 0
265 * - use locality group PA on-disk += N; PA -= N
266 * - discard locality group PA buddy -= PA; PA = 0
267 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
269 * bits from PA, only from on-disk bitmap
273 * killing performance on high-end SMP hardware. let's try to relax it using
277 * nobody can re-allocate that block
278 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
280 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
284 * - init buddy vs.
285 * - new PA
288 * - use inode PA
289 * we need to make sure that either on-disk bitmap or PA has uptodate data
290 * given (3) we care that PA-=N operation doesn't interfere with init
291 * - discard inode PA
293 * - use locality group PA
294 * again PA-=N must be serialized with init
295 * - discard locality group PA
297 * - new PA vs.
298 * - use inode PA
300 * - discard inode PA
302 * - use locality group PA
304 * - discard locality group PA
306 * - use inode PA
307 * - use inode PA
309 * - discard inode PA
311 * - use locality group PA
312 * nothing wrong here -- they're different PAs covering different blocks
313 * - discard locality group PA
317 * - PA is referenced and while it is no discard is possible
318 * - PA is referenced until block isn't marked in on-disk bitmap
319 * - PA changes only after on-disk bitmap
320 * - discard must not compete with init. either init is done before
322 * - buddy init as sum of on-disk bitmap and PAs is done atomically
332 * - allocation:
335 * mark bits in on-disk bitmap
338 * - use preallocation:
339 * find proper PA (per-inode or group)
341 * mark bits in on-disk bitmap
345 * - free:
347 * mark bits in on-disk bitmap
350 * - discard preallocations in group:
353 * load on-disk bitmap
355 * remove PA from object (inode or locality group)
356 * mark free blocks in-core
358 * - discard inode's preallocations:
365 * - bitlock on a group (group)
366 * - object (inode/locality) (object)
367 * - per-pa lock (pa)
368 * - cr_power2_aligned lists lock (cr_power2_aligned)
369 * - cr_goal_len_fast lists lock (cr_goal_len_fast)
372 * - new pa
376 * - find and use pa:
379 * - release consumed pa:
384 * - generate in-core bitmap:
388 * - discard all for given object (inode, locality group):
393 * - discard all for given group:
399 * - allocation path (ext4_mb_regular_allocator)
446 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
508 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; in mb_find_next_zero_bit()
521 ret = ext4_find_next_bit(addr, tmpmax, start) - fix; in mb_find_next_bit()
531 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); in mb_find_buddy()
534 if (order > e4b->bd_blkbits + 1) { in mb_find_buddy()
541 *max = 1 << (e4b->bd_blkbits + 3); in mb_find_buddy()
542 return e4b->bd_bitmap; in mb_find_buddy()
545 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; in mb_find_buddy()
546 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; in mb_find_buddy()
556 struct super_block *sb = e4b->bd_sb; in mb_free_blocks_double()
558 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) in mb_free_blocks_double()
560 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); in mb_free_blocks_double()
562 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { in mb_free_blocks_double()
565 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); in mb_free_blocks_double()
567 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in mb_free_blocks_double()
569 ext4_grp_locked_error(sb, e4b->bd_group, in mb_free_blocks_double()
570 inode ? inode->i_ino : 0, in mb_free_blocks_double()
576 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); in mb_free_blocks_double()
584 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) in mb_mark_used_double()
586 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); in mb_mark_used_double()
588 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); in mb_mark_used_double()
589 mb_set_bit(first + i, e4b->bd_info->bb_bitmap); in mb_mark_used_double()
595 if (unlikely(e4b->bd_info->bb_bitmap == NULL)) in mb_cmp_bitmaps()
597 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { in mb_cmp_bitmaps()
600 b1 = (unsigned char *) e4b->bd_info->bb_bitmap; in mb_cmp_bitmaps()
602 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { in mb_cmp_bitmaps()
604 ext4_msg(e4b->bd_sb, KERN_ERR, in mb_cmp_bitmaps()
608 e4b->bd_group, i, i * 8, b1[i], b2[i]); in mb_cmp_bitmaps()
620 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); in mb_group_bb_bitmap_alloc()
621 if (!grp->bb_bitmap) in mb_group_bb_bitmap_alloc()
626 kfree(grp->bb_bitmap); in mb_group_bb_bitmap_alloc()
627 grp->bb_bitmap = NULL; in mb_group_bb_bitmap_alloc()
631 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); in mb_group_bb_bitmap_alloc()
637 kfree(grp->bb_bitmap); in mb_group_bb_bitmap_free()
683 struct super_block *sb = e4b->bd_sb; in __mb_check_buddy()
684 int order = e4b->bd_blkbits + 1; in __mb_check_buddy()
698 if (e4b->bd_info->bb_check_counter++ % 10) in __mb_check_buddy()
704 buddy2 = mb_find_buddy(e4b, order - 1, &max2); in __mb_check_buddy()
728 !mb_test_bit(k, e4b->bd_bitmap)); in __mb_check_buddy()
732 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); in __mb_check_buddy()
733 order--; in __mb_check_buddy()
736 fstart = -1; in __mb_check_buddy()
740 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); in __mb_check_buddy()
741 if (fstart == -1) { in __mb_check_buddy()
747 fstart = -1; in __mb_check_buddy()
749 for (j = 0; j < e4b->bd_blkbits + 1; j++) { in __mb_check_buddy()
756 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); in __mb_check_buddy()
757 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); in __mb_check_buddy()
759 grp = ext4_get_group_info(sb, e4b->bd_group); in __mb_check_buddy()
762 list_for_each(cur, &grp->bb_prealloc_list) { in __mb_check_buddy()
766 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); in __mb_check_buddy()
767 MB_CHECK_ASSERT(groupnr == e4b->bd_group); in __mb_check_buddy()
768 for (i = 0; i < pa->pa_len; i++) in __mb_check_buddy()
797 border = 2 << sb->s_blocksize_bits; in ext4_mb_mark_free_simple()
801 max = ffs(first | border) - 1; in ext4_mb_mark_free_simple()
804 min = fls(len) - 1; in ext4_mb_mark_free_simple()
811 grp->bb_counters[min]++; in ext4_mb_mark_free_simple()
814 buddy + sbi->s_mb_offsets[min]); in ext4_mb_mark_free_simple()
816 len -= chunk; in ext4_mb_mark_free_simple()
829 order = fls(len) - 2; in mb_avg_fragment_size_order()
833 order--; in mb_avg_fragment_size_order()
844 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) in mb_update_avg_fragment_size()
848 grp->bb_free / grp->bb_fragments); in mb_update_avg_fragment_size()
849 if (new_order == grp->bb_avg_fragment_size_order) in mb_update_avg_fragment_size()
852 if (grp->bb_avg_fragment_size_order != -1) { in mb_update_avg_fragment_size()
853 write_lock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
854 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
855 list_del(&grp->bb_avg_fragment_size_node); in mb_update_avg_fragment_size()
856 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
857 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
859 grp->bb_avg_fragment_size_order = new_order; in mb_update_avg_fragment_size()
860 write_lock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
861 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
862 list_add_tail(&grp->bb_avg_fragment_size_node, in mb_update_avg_fragment_size()
863 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
864 write_unlock(&sbi->s_mb_avg_fragment_size_locks[ in mb_update_avg_fragment_size()
865 grp->bb_avg_fragment_size_order]); in mb_update_avg_fragment_size()
875 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_p2_aligned()
879 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_choose_next_group_p2_aligned()
882 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) in ext4_mb_choose_next_group_p2_aligned()
883 atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); in ext4_mb_choose_next_group_p2_aligned()
885 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_choose_next_group_p2_aligned()
886 if (list_empty(&sbi->s_mb_largest_free_orders[i])) in ext4_mb_choose_next_group_p2_aligned()
888 read_lock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
889 if (list_empty(&sbi->s_mb_largest_free_orders[i])) { in ext4_mb_choose_next_group_p2_aligned()
890 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
893 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], in ext4_mb_choose_next_group_p2_aligned()
895 if (sbi->s_mb_stats) in ext4_mb_choose_next_group_p2_aligned()
896 atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]); in ext4_mb_choose_next_group_p2_aligned()
897 if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { in ext4_mb_choose_next_group_p2_aligned()
898 *group = iter->bb_group; in ext4_mb_choose_next_group_p2_aligned()
899 ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; in ext4_mb_choose_next_group_p2_aligned()
900 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
904 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_choose_next_group_p2_aligned()
917 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_good_group_avg_frag_lists()
918 struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; in ext4_mb_find_good_group_avg_frag_lists()
919 rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; in ext4_mb_find_good_group_avg_frag_lists()
921 enum criteria cr = ac->ac_criteria; in ext4_mb_find_good_group_avg_frag_lists()
931 if (sbi->s_mb_stats) in ext4_mb_find_good_group_avg_frag_lists()
932 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); in ext4_mb_find_good_group_avg_frag_lists()
933 if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { in ext4_mb_find_good_group_avg_frag_lists()
949 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_goal_fast()
953 if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { in ext4_mb_choose_next_group_goal_fast()
954 if (sbi->s_mb_stats) in ext4_mb_choose_next_group_goal_fast()
955 atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); in ext4_mb_choose_next_group_goal_fast()
958 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); in ext4_mb_choose_next_group_goal_fast()
959 i < MB_NUM_ORDERS(ac->ac_sb); i++) { in ext4_mb_choose_next_group_goal_fast()
962 *group = grp->bb_group; in ext4_mb_choose_next_group_goal_fast()
963 ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; in ext4_mb_choose_next_group_goal_fast()
972 * request len. However, allocation request for non-regular in ext4_mb_choose_next_group_goal_fast()
976 if (ac->ac_flags & EXT4_MB_HINT_DATA) in ext4_mb_choose_next_group_goal_fast()
994 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_choose_next_group_best_avail()
999 if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { in ext4_mb_choose_next_group_best_avail()
1000 if (sbi->s_mb_stats) in ext4_mb_choose_next_group_best_avail()
1001 atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); in ext4_mb_choose_next_group_best_avail()
1010 order = fls(ac->ac_g_ex.fe_len) - 1; in ext4_mb_choose_next_group_best_avail()
1011 min_order = order - sbi->s_mb_best_avail_max_trim_order; in ext4_mb_choose_next_group_best_avail()
1015 if (sbi->s_stripe > 0) { in ext4_mb_choose_next_group_best_avail()
1020 num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); in ext4_mb_choose_next_group_best_avail()
1026 min_order = fls(num_stripe_clusters) - 1; in ext4_mb_choose_next_group_best_avail()
1029 if (1 << min_order < ac->ac_o_ex.fe_len) in ext4_mb_choose_next_group_best_avail()
1030 min_order = fls(ac->ac_o_ex.fe_len); in ext4_mb_choose_next_group_best_avail()
1032 for (i = order; i >= min_order; i--) { in ext4_mb_choose_next_group_best_avail()
1039 ac->ac_g_ex.fe_len = 1 << i; in ext4_mb_choose_next_group_best_avail()
1047 ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, in ext4_mb_choose_next_group_best_avail()
1051 frag_order = mb_avg_fragment_size_order(ac->ac_sb, in ext4_mb_choose_next_group_best_avail()
1052 ac->ac_g_ex.fe_len); in ext4_mb_choose_next_group_best_avail()
1056 *group = grp->bb_group; in ext4_mb_choose_next_group_best_avail()
1057 ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; in ext4_mb_choose_next_group_best_avail()
1063 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_choose_next_group_best_avail()
1069 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) in should_optimize_scan()
1071 if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) in should_optimize_scan()
1073 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) in should_optimize_scan()
1089 if (ac->ac_groups_linear_remaining) { in next_linear_group()
1090 ac->ac_groups_linear_remaining--; in next_linear_group()
1097 * Artificially restricted ngroups for non-extent in next_linear_group()
1119 *new_cr = ac->ac_criteria; in ext4_mb_choose_next_group()
1121 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { in ext4_mb_choose_next_group()
1151 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) in mb_set_largest_free_order()
1152 if (grp->bb_counters[i] > 0) in mb_set_largest_free_order()
1156 i == grp->bb_largest_free_order) { in mb_set_largest_free_order()
1157 grp->bb_largest_free_order = i; in mb_set_largest_free_order()
1161 if (grp->bb_largest_free_order >= 0) { in mb_set_largest_free_order()
1162 write_lock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1163 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1164 list_del_init(&grp->bb_largest_free_order_node); in mb_set_largest_free_order()
1165 write_unlock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1166 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1168 grp->bb_largest_free_order = i; in mb_set_largest_free_order()
1169 if (grp->bb_largest_free_order >= 0 && grp->bb_free) { in mb_set_largest_free_order()
1170 write_lock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1171 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1172 list_add_tail(&grp->bb_largest_free_order_node, in mb_set_largest_free_order()
1173 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); in mb_set_largest_free_order()
1174 write_unlock(&sbi->s_mb_largest_free_orders_locks[ in mb_set_largest_free_order()
1175 grp->bb_largest_free_order]); in mb_set_largest_free_order()
1194 * of on-disk bitmap and preallocations */ in ext4_mb_generate_buddy()
1196 grp->bb_first_free = i; in ext4_mb_generate_buddy()
1201 len = i - first; in ext4_mb_generate_buddy()
1206 grp->bb_counters[0]++; in ext4_mb_generate_buddy()
1210 grp->bb_fragments = fragments; in ext4_mb_generate_buddy()
1212 if (free != grp->bb_free) { in ext4_mb_generate_buddy()
1216 free, grp->bb_free); in ext4_mb_generate_buddy()
1221 grp->bb_free = free; in ext4_mb_generate_buddy()
1228 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); in ext4_mb_generate_buddy()
1230 period = get_cycles() - period; in ext4_mb_generate_buddy()
1231 atomic_inc(&sbi->s_mb_buddies_generated); in ext4_mb_generate_buddy()
1232 atomic64_add(period, &sbi->s_mb_generation_time); in ext4_mb_generate_buddy()
1244 e4b->bd_info->bb_fragments = 0; in mb_regenerate_buddy()
1245 memset(e4b->bd_info->bb_counters, 0, in mb_regenerate_buddy()
1246 sizeof(*e4b->bd_info->bb_counters) * in mb_regenerate_buddy()
1247 (e4b->bd_sb->s_blocksize_bits + 2)); in mb_regenerate_buddy()
1249 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, in mb_regenerate_buddy()
1250 e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); in mb_regenerate_buddy()
1291 inode = page->mapping->host; in ext4_mb_init_cache()
1292 sb = inode->i_sb; in ext4_mb_init_cache()
1297 mb_debug(sb, "init page %lu\n", page->index); in ext4_mb_init_cache()
1308 return -ENOMEM; in ext4_mb_init_cache()
1312 first_group = page->index * blocks_per_page / 2; in ext4_mb_init_cache()
1352 first_block = page->index * blocks_per_page; in ext4_mb_init_cache()
1358 if (!bh[group - first_group]) in ext4_mb_init_cache()
1362 if (!buffer_verified(bh[group - first_group])) in ext4_mb_init_cache()
1374 bitmap = bh[group - first_group]->b_data; in ext4_mb_init_cache()
1382 err = -EFSCORRUPTED; in ext4_mb_init_cache()
1389 group, page->index, i * blocksize); in ext4_mb_init_cache()
1391 grinfo->bb_fragments = 0; in ext4_mb_init_cache()
1392 memset(grinfo->bb_counters, 0, in ext4_mb_init_cache()
1393 sizeof(*grinfo->bb_counters) * in ext4_mb_init_cache()
1408 group, page->index, i * blocksize); in ext4_mb_init_cache()
1415 /* mark all preallocated blks used in in-core bitmap */ in ext4_mb_init_cache()
1417 WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); in ext4_mb_init_cache()
1442 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1447 struct inode *inode = EXT4_SB(sb)->s_buddy_cache; in ext4_mb_get_buddy_page_lock()
1452 e4b->bd_buddy_page = NULL; in ext4_mb_get_buddy_page_lock()
1453 e4b->bd_bitmap_page = NULL; in ext4_mb_get_buddy_page_lock()
1455 blocks_per_page = PAGE_SIZE / sb->s_blocksize; in ext4_mb_get_buddy_page_lock()
1464 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_get_buddy_page_lock()
1466 return -ENOMEM; in ext4_mb_get_buddy_page_lock()
1467 BUG_ON(page->mapping != inode->i_mapping); in ext4_mb_get_buddy_page_lock()
1468 e4b->bd_bitmap_page = page; in ext4_mb_get_buddy_page_lock()
1469 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); in ext4_mb_get_buddy_page_lock()
1477 page = find_or_create_page(inode->i_mapping, block + 1, gfp); in ext4_mb_get_buddy_page_lock()
1479 return -ENOMEM; in ext4_mb_get_buddy_page_lock()
1480 BUG_ON(page->mapping != inode->i_mapping); in ext4_mb_get_buddy_page_lock()
1481 e4b->bd_buddy_page = page; in ext4_mb_get_buddy_page_lock()
1487 if (e4b->bd_bitmap_page) { in ext4_mb_put_buddy_page_lock()
1488 unlock_page(e4b->bd_bitmap_page); in ext4_mb_put_buddy_page_lock()
1489 put_page(e4b->bd_bitmap_page); in ext4_mb_put_buddy_page_lock()
1491 if (e4b->bd_buddy_page) { in ext4_mb_put_buddy_page_lock()
1492 unlock_page(e4b->bd_buddy_page); in ext4_mb_put_buddy_page_lock()
1493 put_page(e4b->bd_buddy_page); in ext4_mb_put_buddy_page_lock()
1515 return -EFSCORRUPTED; in ext4_mb_init_group()
1540 ret = -EIO; in ext4_mb_init_group()
1559 ret = -EIO; in ext4_mb_init_group()
1584 struct inode *inode = sbi->s_buddy_cache; in ext4_mb_load_buddy_gfp()
1589 blocks_per_page = PAGE_SIZE / sb->s_blocksize; in ext4_mb_load_buddy_gfp()
1592 return -EFSCORRUPTED; in ext4_mb_load_buddy_gfp()
1594 e4b->bd_blkbits = sb->s_blocksize_bits; in ext4_mb_load_buddy_gfp()
1595 e4b->bd_info = grp; in ext4_mb_load_buddy_gfp()
1596 e4b->bd_sb = sb; in ext4_mb_load_buddy_gfp()
1597 e4b->bd_group = group; in ext4_mb_load_buddy_gfp()
1598 e4b->bd_buddy_page = NULL; in ext4_mb_load_buddy_gfp()
1599 e4b->bd_bitmap_page = NULL; in ext4_mb_load_buddy_gfp()
1622 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); in ext4_mb_load_buddy_gfp()
1634 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_load_buddy_gfp()
1636 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, in ext4_mb_load_buddy_gfp()
1637 "ext4: bitmap's paging->mapping != inode->i_mapping\n")) { in ext4_mb_load_buddy_gfp()
1640 ret = -EINVAL; in ext4_mb_load_buddy_gfp()
1650 (poff * sb->s_blocksize)); in ext4_mb_load_buddy_gfp()
1656 ret = -ENOMEM; in ext4_mb_load_buddy_gfp()
1660 ret = -EIO; in ext4_mb_load_buddy_gfp()
1665 e4b->bd_bitmap_page = page; in ext4_mb_load_buddy_gfp()
1666 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); in ext4_mb_load_buddy_gfp()
1672 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); in ext4_mb_load_buddy_gfp()
1676 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_load_buddy_gfp()
1678 if (WARN_RATELIMIT(page->mapping != inode->i_mapping, in ext4_mb_load_buddy_gfp()
1679 "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) { in ext4_mb_load_buddy_gfp()
1682 ret = -EINVAL; in ext4_mb_load_buddy_gfp()
1686 ret = ext4_mb_init_cache(page, e4b->bd_bitmap, in ext4_mb_load_buddy_gfp()
1697 ret = -ENOMEM; in ext4_mb_load_buddy_gfp()
1701 ret = -EIO; in ext4_mb_load_buddy_gfp()
1706 e4b->bd_buddy_page = page; in ext4_mb_load_buddy_gfp()
1707 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); in ext4_mb_load_buddy_gfp()
1714 if (e4b->bd_bitmap_page) in ext4_mb_load_buddy_gfp()
1715 put_page(e4b->bd_bitmap_page); in ext4_mb_load_buddy_gfp()
1717 e4b->bd_buddy = NULL; in ext4_mb_load_buddy_gfp()
1718 e4b->bd_bitmap = NULL; in ext4_mb_load_buddy_gfp()
1730 if (e4b->bd_bitmap_page) in ext4_mb_unload_buddy()
1731 put_page(e4b->bd_bitmap_page); in ext4_mb_unload_buddy()
1732 if (e4b->bd_buddy_page) in ext4_mb_unload_buddy()
1733 put_page(e4b->bd_buddy_page); in ext4_mb_unload_buddy()
1742 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); in mb_find_order_for_block()
1743 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); in mb_find_order_for_block()
1745 while (order <= e4b->bd_blkbits + 1) { in mb_find_order_for_block()
1762 if ((cur & 31) == 0 && (len - cur) >= 32) { in mb_clear_bits()
1775 * will return first found zero bit if any, -1 otherwise
1780 int zero_bit = -1; in mb_test_and_clear_bits()
1784 if ((cur & 31) == 0 && (len - cur) >= 32) { in mb_test_and_clear_bits()
1787 if (*addr != (__u32)(-1) && zero_bit == -1) in mb_test_and_clear_bits()
1793 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) in mb_test_and_clear_bits()
1807 if ((cur & 31) == 0 && (len - cur) >= 32) { in mb_set_bits()
1823 (*bit) -= side; in mb_buddy_adjust_border()
1829 return -1; in mb_buddy_adjust_border()
1852 * --------------------------------- in mb_buddy_mark_free()
1854 * --------------------------------- in mb_buddy_mark_free()
1856 * --------------------------------- in mb_buddy_mark_free()
1872 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); in mb_buddy_mark_free()
1874 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); in mb_buddy_mark_free()
1881 mb_clear_bits(buddy, first, last - first + 1); in mb_buddy_mark_free()
1882 e4b->bd_info->bb_counters[order - 1] += last - first + 1; in mb_buddy_mark_free()
1897 int last = first + count - 1; in mb_free_blocks()
1898 struct super_block *sb = e4b->bd_sb; in mb_free_blocks()
1902 BUG_ON(last >= (sb->s_blocksize << 3)); in mb_free_blocks()
1903 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); in mb_free_blocks()
1905 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in mb_free_blocks()
1915 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); in mb_free_blocks()
1916 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); in mb_free_blocks()
1917 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) in mb_free_blocks()
1918 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); in mb_free_blocks()
1920 if (unlikely(block != -1)) { in mb_free_blocks()
1928 if (sbi->s_mount_state & EXT4_FC_REPLAY) { in mb_free_blocks()
1933 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); in mb_free_blocks()
1935 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in mb_free_blocks()
1937 ext4_grp_locked_error(sb, e4b->bd_group, in mb_free_blocks()
1938 inode ? inode->i_ino : 0, blocknr, in mb_free_blocks()
1945 e4b->bd_info->bb_free += count; in mb_free_blocks()
1946 if (first < e4b->bd_info->bb_first_free) in mb_free_blocks()
1947 e4b->bd_info->bb_first_free = first; in mb_free_blocks()
1951 e4b->bd_info->bb_fragments--; in mb_free_blocks()
1953 e4b->bd_info->bb_fragments++; in mb_free_blocks()
1963 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; in mb_free_blocks()
1966 last -= !right_is_free; in mb_free_blocks()
1967 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; in mb_free_blocks()
1973 mb_set_largest_free_order(sb, e4b->bd_info); in mb_free_blocks()
1974 mb_update_avg_fragment_size(sb, e4b->bd_info); in mb_free_blocks()
1985 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); in mb_find_extent()
1992 ex->fe_len = 0; in mb_find_extent()
1993 ex->fe_start = 0; in mb_find_extent()
1994 ex->fe_group = 0; in mb_find_extent()
2001 ex->fe_len = (1 << order) - (block & ((1 << order) - 1)); in mb_find_extent()
2002 ex->fe_start = block; in mb_find_extent()
2003 ex->fe_group = e4b->bd_group; in mb_find_extent()
2007 while (needed > ex->fe_len && in mb_find_extent()
2014 if (mb_test_bit(next, e4b->bd_bitmap)) in mb_find_extent()
2020 ex->fe_len += 1 << order; in mb_find_extent()
2023 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { in mb_find_extent()
2026 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, in mb_find_extent()
2029 block, order, needed, ex->fe_group, ex->fe_start, in mb_find_extent()
2030 ex->fe_len, ex->fe_logical); in mb_find_extent()
2031 ex->fe_len = 0; in mb_find_extent()
2032 ex->fe_start = 0; in mb_find_extent()
2033 ex->fe_group = 0; in mb_find_extent()
2035 return ex->fe_len; in mb_find_extent()
2044 int start = ex->fe_start; in mb_mark_used()
2045 int len = ex->fe_len; in mb_mark_used()
2051 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); in mb_mark_used()
2052 BUG_ON(e4b->bd_group != ex->fe_group); in mb_mark_used()
2053 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); in mb_mark_used()
2058 e4b->bd_info->bb_free -= len; in mb_mark_used()
2059 if (e4b->bd_info->bb_first_free == start) in mb_mark_used()
2060 e4b->bd_info->bb_first_free += len; in mb_mark_used()
2064 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); in mb_mark_used()
2065 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) in mb_mark_used()
2066 max = !mb_test_bit(start + len, e4b->bd_bitmap); in mb_mark_used()
2068 e4b->bd_info->bb_fragments++; in mb_mark_used()
2070 e4b->bd_info->bb_fragments--; in mb_mark_used()
2086 e4b->bd_info->bb_counters[ord]--; in mb_mark_used()
2088 len -= mlen; in mb_mark_used()
2101 e4b->bd_info->bb_counters[ord]--; in mb_mark_used()
2103 ord--; in mb_mark_used()
2108 e4b->bd_info->bb_counters[ord]++; in mb_mark_used()
2109 e4b->bd_info->bb_counters[ord]++; in mb_mark_used()
2112 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); in mb_mark_used()
2114 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); in mb_mark_used()
2115 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); in mb_mark_used()
2127 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_best_found()
2130 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); in ext4_mb_use_best_found()
2131 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_use_best_found()
2133 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); in ext4_mb_use_best_found()
2134 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_use_best_found()
2135 ret = mb_mark_used(e4b, &ac->ac_b_ex); in ext4_mb_use_best_found()
2139 ac->ac_f_ex = ac->ac_b_ex; in ext4_mb_use_best_found()
2141 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_best_found()
2142 ac->ac_tail = ret & 0xffff; in ext4_mb_use_best_found()
2143 ac->ac_buddy = ret >> 16; in ext4_mb_use_best_found()
2152 ac->ac_bitmap_page = e4b->bd_bitmap_page; in ext4_mb_use_best_found()
2153 get_page(ac->ac_bitmap_page); in ext4_mb_use_best_found()
2154 ac->ac_buddy_page = e4b->bd_buddy_page; in ext4_mb_use_best_found()
2155 get_page(ac->ac_buddy_page); in ext4_mb_use_best_found()
2157 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_use_best_found()
2158 spin_lock(&sbi->s_md_lock); in ext4_mb_use_best_found()
2159 sbi->s_mb_last_group = ac->ac_f_ex.fe_group; in ext4_mb_use_best_found()
2160 sbi->s_mb_last_start = ac->ac_f_ex.fe_start; in ext4_mb_use_best_found()
2161 spin_unlock(&sbi->s_md_lock); in ext4_mb_use_best_found()
2168 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) in ext4_mb_use_best_found()
2177 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_check_limits()
2178 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_check_limits()
2179 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_check_limits()
2181 if (ac->ac_status == AC_STATUS_FOUND) in ext4_mb_check_limits()
2186 if (ac->ac_found > sbi->s_mb_max_to_scan && in ext4_mb_check_limits()
2187 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_check_limits()
2188 ac->ac_status = AC_STATUS_BREAK; in ext4_mb_check_limits()
2195 if (bex->fe_len < gex->fe_len) in ext4_mb_check_limits()
2198 if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) in ext4_mb_check_limits()
2216 * upto a max of sbi->s_mb_max_to_scan times (default 200). After
2220 * upto a max of sbi->s_mb_min_to_scan times (default 10) before
2230 struct ext4_free_extent *bex = &ac->ac_b_ex; in ext4_mb_measure_extent()
2231 struct ext4_free_extent *gex = &ac->ac_g_ex; in ext4_mb_measure_extent()
2233 BUG_ON(ex->fe_len <= 0); in ext4_mb_measure_extent()
2234 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2235 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); in ext4_mb_measure_extent()
2236 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); in ext4_mb_measure_extent()
2238 ac->ac_found++; in ext4_mb_measure_extent()
2239 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_measure_extent()
2242 * The special case - take what you catch first in ext4_mb_measure_extent()
2244 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_measure_extent()
2253 if (ex->fe_len == gex->fe_len) { in ext4_mb_measure_extent()
2262 if (bex->fe_len == 0) { in ext4_mb_measure_extent()
2270 if (bex->fe_len < gex->fe_len) { in ext4_mb_measure_extent()
2273 if (ex->fe_len > bex->fe_len) in ext4_mb_measure_extent()
2275 } else if (ex->fe_len > gex->fe_len) { in ext4_mb_measure_extent()
2279 if (ex->fe_len < bex->fe_len) in ext4_mb_measure_extent()
2290 struct ext4_free_extent ex = ac->ac_b_ex; in ext4_mb_try_best_found()
2296 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_try_best_found()
2300 ext4_lock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2301 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in ext4_mb_try_best_found()
2307 ac->ac_b_ex = ex; in ext4_mb_try_best_found()
2312 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_try_best_found()
2320 ext4_group_t group = ac->ac_g_ex.fe_group; in ext4_mb_find_by_goal()
2323 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_find_by_goal()
2324 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_find_by_goal()
2328 return -EFSCORRUPTED; in ext4_mb_find_by_goal()
2329 if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_find_by_goal()
2331 if (grp->bb_free == 0) in ext4_mb_find_by_goal()
2334 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); in ext4_mb_find_by_goal()
2338 ext4_lock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2339 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in ext4_mb_find_by_goal()
2342 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, in ext4_mb_find_by_goal()
2343 ac->ac_g_ex.fe_len, &ex); in ext4_mb_find_by_goal()
2346 if (max >= ac->ac_g_ex.fe_len && in ext4_mb_find_by_goal()
2347 ac->ac_g_ex.fe_len == EXT4_B2C(sbi, sbi->s_stripe)) { in ext4_mb_find_by_goal()
2350 start = ext4_grp_offs_to_block(ac->ac_sb, &ex); in ext4_mb_find_by_goal()
2351 /* use do_div to get remainder (would be 64-bit modulo) */ in ext4_mb_find_by_goal()
2352 if (do_div(start, sbi->s_stripe) == 0) { in ext4_mb_find_by_goal()
2353 ac->ac_found++; in ext4_mb_find_by_goal()
2354 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2357 } else if (max >= ac->ac_g_ex.fe_len) { in ext4_mb_find_by_goal()
2359 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2360 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2361 ac->ac_found++; in ext4_mb_find_by_goal()
2362 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2364 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { in ext4_mb_find_by_goal()
2368 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); in ext4_mb_find_by_goal()
2369 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); in ext4_mb_find_by_goal()
2370 ac->ac_found++; in ext4_mb_find_by_goal()
2371 ac->ac_b_ex = ex; in ext4_mb_find_by_goal()
2375 ext4_unlock_group(ac->ac_sb, group); in ext4_mb_find_by_goal()
2389 struct super_block *sb = ac->ac_sb; in ext4_mb_simple_scan_group()
2390 struct ext4_group_info *grp = e4b->bd_info; in ext4_mb_simple_scan_group()
2396 BUG_ON(ac->ac_2order <= 0); in ext4_mb_simple_scan_group()
2397 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { in ext4_mb_simple_scan_group()
2398 if (grp->bb_counters[i] == 0) in ext4_mb_simple_scan_group()
2408 ext4_mark_group_bitmap_corrupted(ac->ac_sb, in ext4_mb_simple_scan_group()
2409 e4b->bd_group, in ext4_mb_simple_scan_group()
2411 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, in ext4_mb_simple_scan_group()
2413 grp->bb_counters[i], i); in ext4_mb_simple_scan_group()
2416 ac->ac_found++; in ext4_mb_simple_scan_group()
2417 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_simple_scan_group()
2419 ac->ac_b_ex.fe_len = 1 << i; in ext4_mb_simple_scan_group()
2420 ac->ac_b_ex.fe_start = k << i; in ext4_mb_simple_scan_group()
2421 ac->ac_b_ex.fe_group = e4b->bd_group; in ext4_mb_simple_scan_group()
2425 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); in ext4_mb_simple_scan_group()
2427 if (EXT4_SB(sb)->s_mb_stats) in ext4_mb_simple_scan_group()
2428 atomic_inc(&EXT4_SB(sb)->s_bal_2orders); in ext4_mb_simple_scan_group()
2443 struct super_block *sb = ac->ac_sb; in ext4_mb_complex_scan_group()
2444 void *bitmap = e4b->bd_bitmap; in ext4_mb_complex_scan_group()
2449 free = e4b->bd_info->bb_free; in ext4_mb_complex_scan_group()
2453 i = e4b->bd_info->bb_first_free; in ext4_mb_complex_scan_group()
2455 while (free && ac->ac_status == AC_STATUS_CONTINUE) { in ext4_mb_complex_scan_group()
2464 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in ext4_mb_complex_scan_group()
2466 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, in ext4_mb_complex_scan_group()
2473 if (!ext4_mb_cr_expensive(ac->ac_criteria)) { in ext4_mb_complex_scan_group()
2482 freelen = j - i; in ext4_mb_complex_scan_group()
2484 if (freelen < ac->ac_g_ex.fe_len) { in ext4_mb_complex_scan_group()
2486 free -= freelen; in ext4_mb_complex_scan_group()
2491 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); in ext4_mb_complex_scan_group()
2495 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, in ext4_mb_complex_scan_group()
2497 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, in ext4_mb_complex_scan_group()
2512 free -= ex.fe_len; in ext4_mb_complex_scan_group()
2520 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2526 struct super_block *sb = ac->ac_sb; in ext4_mb_scan_aligned()
2528 void *bitmap = e4b->bd_bitmap; in ext4_mb_scan_aligned()
2535 BUG_ON(sbi->s_stripe == 0); in ext4_mb_scan_aligned()
2537 /* find first stripe-aligned block in group */ in ext4_mb_scan_aligned()
2538 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); in ext4_mb_scan_aligned()
2540 a = first_group_block + sbi->s_stripe - 1; in ext4_mb_scan_aligned()
2541 do_div(a, sbi->s_stripe); in ext4_mb_scan_aligned()
2542 i = (a * sbi->s_stripe) - first_group_block; in ext4_mb_scan_aligned()
2544 stripe = EXT4_B2C(sbi, sbi->s_stripe); in ext4_mb_scan_aligned()
2550 ac->ac_found++; in ext4_mb_scan_aligned()
2551 ac->ac_cX_found[ac->ac_criteria]++; in ext4_mb_scan_aligned()
2553 ac->ac_b_ex = ex; in ext4_mb_scan_aligned()
2571 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); in ext4_mb_good_group()
2572 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group()
2579 free = grp->bb_free; in ext4_mb_good_group()
2583 fragments = grp->bb_fragments; in ext4_mb_good_group()
2589 BUG_ON(ac->ac_2order == 0); in ext4_mb_good_group()
2592 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && in ext4_mb_good_group()
2597 if (free < ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2600 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) in ext4_mb_good_group()
2603 if (grp->bb_largest_free_order < ac->ac_2order) in ext4_mb_good_group()
2609 if ((free / fragments) >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2613 if (free >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
2639 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); in ext4_mb_good_group_nolock()
2640 struct super_block *sb = ac->ac_sb; in ext4_mb_good_group_nolock()
2642 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; in ext4_mb_good_group_nolock()
2647 return -EFSCORRUPTED; in ext4_mb_good_group_nolock()
2648 if (sbi->s_mb_stats) in ext4_mb_good_group_nolock()
2649 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); in ext4_mb_good_group_nolock()
2654 free = grp->bb_free; in ext4_mb_good_group_nolock()
2662 if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) in ext4_mb_good_group_nolock()
2687 (!sbi->s_log_groups_per_flex || in ext4_mb_good_group_nolock()
2688 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && in ext4_mb_good_group_nolock()
2690 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) in ext4_mb_good_group_nolock()
2722 while (nr-- > 0) { in ext4_mb_prefetch()
2769 while (nr-- > 0) { in ext4_mb_prefetch_fini()
2772 group--; in ext4_mb_prefetch_fini()
2796 sb = ac->ac_sb; in ext4_mb_regular_allocator()
2799 /* non-extent files are limited to low blocks/groups */ in ext4_mb_regular_allocator()
2800 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) in ext4_mb_regular_allocator()
2801 ngroups = sbi->s_blockfile_groups; in ext4_mb_regular_allocator()
2803 BUG_ON(ac->ac_status == AC_STATUS_FOUND); in ext4_mb_regular_allocator()
2807 if (err || ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2810 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_regular_allocator()
2814 * ac->ac_2order is set only if the fe_len is a power of 2 in ext4_mb_regular_allocator()
2815 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED in ext4_mb_regular_allocator()
2818 i = fls(ac->ac_g_ex.fe_len); in ext4_mb_regular_allocator()
2819 ac->ac_2order = 0; in ext4_mb_regular_allocator()
2824 * We also support searching for power-of-two requests only for in ext4_mb_regular_allocator()
2827 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { in ext4_mb_regular_allocator()
2828 if (is_power_of_2(ac->ac_g_ex.fe_len)) in ext4_mb_regular_allocator()
2829 ac->ac_2order = array_index_nospec(i - 1, in ext4_mb_regular_allocator()
2834 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { in ext4_mb_regular_allocator()
2836 spin_lock(&sbi->s_md_lock); in ext4_mb_regular_allocator()
2837 ac->ac_g_ex.fe_group = sbi->s_mb_last_group; in ext4_mb_regular_allocator()
2838 ac->ac_g_ex.fe_start = sbi->s_mb_last_start; in ext4_mb_regular_allocator()
2839 spin_unlock(&sbi->s_md_lock); in ext4_mb_regular_allocator()
2843 * Let's just scan groups to find more-less suitable blocks We in ext4_mb_regular_allocator()
2847 if (ac->ac_2order) in ext4_mb_regular_allocator()
2850 for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { in ext4_mb_regular_allocator()
2851 ac->ac_criteria = cr; in ext4_mb_regular_allocator()
2856 group = ac->ac_g_ex.fe_group; in ext4_mb_regular_allocator()
2857 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; in ext4_mb_regular_allocator()
2878 prefetch_ios < sbi->s_mb_prefetch_limit)) { in ext4_mb_regular_allocator()
2879 nr = sbi->s_mb_prefetch; in ext4_mb_regular_allocator()
2881 nr = 1 << sbi->s_log_groups_per_flex; in ext4_mb_regular_allocator()
2882 nr -= group & (nr - 1); in ext4_mb_regular_allocator()
2883 nr = min(nr, sbi->s_mb_prefetch); in ext4_mb_regular_allocator()
2914 ac->ac_groups_scanned++; in ext4_mb_regular_allocator()
2918 bool is_stripe_aligned = sbi->s_stripe && in ext4_mb_regular_allocator()
2919 !(ac->ac_g_ex.fe_len % in ext4_mb_regular_allocator()
2920 EXT4_B2C(sbi, sbi->s_stripe)); in ext4_mb_regular_allocator()
2927 if (ac->ac_status == AC_STATUS_CONTINUE) in ext4_mb_regular_allocator()
2934 if (ac->ac_status != AC_STATUS_CONTINUE) in ext4_mb_regular_allocator()
2938 if (sbi->s_mb_stats && i == ngroups) in ext4_mb_regular_allocator()
2939 atomic64_inc(&sbi->s_bal_cX_failed[cr]); in ext4_mb_regular_allocator()
2941 if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) in ext4_mb_regular_allocator()
2944 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; in ext4_mb_regular_allocator()
2947 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && in ext4_mb_regular_allocator()
2948 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { in ext4_mb_regular_allocator()
2954 if (ac->ac_status != AC_STATUS_FOUND) { in ext4_mb_regular_allocator()
2960 lost = atomic_inc_return(&sbi->s_mb_lost_chunks); in ext4_mb_regular_allocator()
2962 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, in ext4_mb_regular_allocator()
2963 ac->ac_b_ex.fe_len, lost); in ext4_mb_regular_allocator()
2965 ac->ac_b_ex.fe_group = 0; in ext4_mb_regular_allocator()
2966 ac->ac_b_ex.fe_start = 0; in ext4_mb_regular_allocator()
2967 ac->ac_b_ex.fe_len = 0; in ext4_mb_regular_allocator()
2968 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_regular_allocator()
2969 ac->ac_flags |= EXT4_MB_HINT_FIRST; in ext4_mb_regular_allocator()
2975 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) in ext4_mb_regular_allocator()
2976 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); in ext4_mb_regular_allocator()
2978 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) in ext4_mb_regular_allocator()
2982 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, in ext4_mb_regular_allocator()
2983 ac->ac_flags, cr, err); in ext4_mb_regular_allocator()
2993 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_groups_start()
3004 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_groups_next()
3016 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_groups_show()
3023 sb->s_blocksize_bits, in ext4_mb_seq_groups_show()
3030 group--; in ext4_mb_seq_groups_show()
3046 seq_printf(seq, "#%-5u: I/O error\n", group); in ext4_mb_seq_groups_show()
3057 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, in ext4_mb_seq_groups_show()
3060 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? in ext4_mb_seq_groups_show()
3080 struct super_block *sb = seq->private; in ext4_seq_mb_stats_show()
3084 if (!sbi->s_mb_stats) { in ext4_seq_mb_stats_show()
3091 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); in ext4_seq_mb_stats_show()
3092 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); in ext4_seq_mb_stats_show()
3095 atomic_read(&sbi->s_bal_groups_scanned)); in ext4_seq_mb_stats_show()
3100 atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3104 &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3106 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3108 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); in ext4_seq_mb_stats_show()
3110 atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); in ext4_seq_mb_stats_show()
3115 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3118 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3120 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3122 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); in ext4_seq_mb_stats_show()
3124 atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); in ext4_seq_mb_stats_show()
3129 atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3133 &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3135 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3137 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); in ext4_seq_mb_stats_show()
3139 atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); in ext4_seq_mb_stats_show()
3144 atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3147 &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3149 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3151 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); in ext4_seq_mb_stats_show()
3156 atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3159 atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3161 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3163 atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); in ext4_seq_mb_stats_show()
3167 atomic_read(&sbi->s_bal_ex_scanned)); in ext4_seq_mb_stats_show()
3168 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); in ext4_seq_mb_stats_show()
3170 atomic_read(&sbi->s_bal_len_goals)); in ext4_seq_mb_stats_show()
3171 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); in ext4_seq_mb_stats_show()
3172 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); in ext4_seq_mb_stats_show()
3173 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); in ext4_seq_mb_stats_show()
3175 atomic_read(&sbi->s_mb_buddies_generated), in ext4_seq_mb_stats_show()
3178 atomic64_read(&sbi->s_mb_generation_time)); in ext4_seq_mb_stats_show()
3180 atomic_read(&sbi->s_mb_preallocated)); in ext4_seq_mb_stats_show()
3181 seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); in ext4_seq_mb_stats_show()
3186 __acquires(&EXT4_SB(sb)->s_mb_rb_lock) in ext4_mb_seq_structs_summary_start()
3188 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_structs_summary_start()
3199 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_structs_summary_next()
3211 struct super_block *sb = pde_data(file_inode(seq->file)); in ext4_mb_seq_structs_summary_show()
3217 position--; in ext4_mb_seq_structs_summary_show()
3219 position -= MB_NUM_ORDERS(sb); in ext4_mb_seq_structs_summary_show()
3224 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); in ext4_mb_seq_structs_summary_show()
3225 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], in ext4_mb_seq_structs_summary_show()
3228 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); in ext4_mb_seq_structs_summary_show()
3240 read_lock(&sbi->s_mb_largest_free_orders_locks[position]); in ext4_mb_seq_structs_summary_show()
3241 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], in ext4_mb_seq_structs_summary_show()
3244 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); in ext4_mb_seq_structs_summary_show()
3264 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; in get_groupinfo_cache()
3272 * Allocate the top-level s_group_info array for the specified number
3281 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_alloc_groupinfo()
3283 if (size <= sbi->s_group_info_size) in ext4_mb_alloc_groupinfo()
3286 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); in ext4_mb_alloc_groupinfo()
3290 return -ENOMEM; in ext4_mb_alloc_groupinfo()
3293 old_groupinfo = rcu_dereference(sbi->s_group_info); in ext4_mb_alloc_groupinfo()
3296 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); in ext4_mb_alloc_groupinfo()
3298 rcu_assign_pointer(sbi->s_group_info, new_groupinfo); in ext4_mb_alloc_groupinfo()
3299 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); in ext4_mb_alloc_groupinfo()
3303 sbi->s_group_info_size); in ext4_mb_alloc_groupinfo()
3316 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); in ext4_mb_add_groupinfo()
3330 return -ENOMEM; in ext4_mb_add_groupinfo()
3333 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; in ext4_mb_add_groupinfo()
3338 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); in ext4_mb_add_groupinfo()
3346 &(meta_group_info[i]->bb_state)); in ext4_mb_add_groupinfo()
3353 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { in ext4_mb_add_groupinfo()
3354 meta_group_info[i]->bb_free = in ext4_mb_add_groupinfo()
3357 meta_group_info[i]->bb_free = in ext4_mb_add_groupinfo()
3361 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); in ext4_mb_add_groupinfo()
3362 init_rwsem(&meta_group_info[i]->alloc_sem); in ext4_mb_add_groupinfo()
3363 meta_group_info[i]->bb_free_root = RB_ROOT; in ext4_mb_add_groupinfo()
3364 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); in ext4_mb_add_groupinfo()
3365 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); in ext4_mb_add_groupinfo()
3366 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3367 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ in ext4_mb_add_groupinfo()
3368 meta_group_info[i]->bb_group = group; in ext4_mb_add_groupinfo()
3379 group_info = rcu_dereference(sbi->s_group_info); in ext4_mb_add_groupinfo()
3384 return -ENOMEM; in ext4_mb_add_groupinfo()
3401 sbi->s_buddy_cache = new_inode(sb); in ext4_mb_init_backend()
3402 if (sbi->s_buddy_cache == NULL) { in ext4_mb_init_backend()
3406 /* To avoid potentially colliding with an valid on-disk inode number, in ext4_mb_init_backend()
3410 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; in ext4_mb_init_backend()
3411 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; in ext4_mb_init_backend()
3428 if (sbi->s_es->s_log_groups_per_flex >= 32) { in ext4_mb_init_backend()
3432 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, in ext4_mb_init_backend()
3433 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); in ext4_mb_init_backend()
3434 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ in ext4_mb_init_backend()
3436 sbi->s_mb_prefetch = 32; in ext4_mb_init_backend()
3438 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) in ext4_mb_init_backend()
3439 sbi->s_mb_prefetch = ext4_get_groups_count(sb); in ext4_mb_init_backend()
3441 * given cr=0 is an CPU-related optimization we shouldn't try to in ext4_mb_init_backend()
3447 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; in ext4_mb_init_backend()
3448 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) in ext4_mb_init_backend()
3449 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); in ext4_mb_init_backend()
3454 cachep = get_groupinfo_cache(sb->s_blocksize_bits); in ext4_mb_init_backend()
3455 while (i-- > 0) { in ext4_mb_init_backend()
3461 i = sbi->s_group_info_size; in ext4_mb_init_backend()
3463 group_info = rcu_dereference(sbi->s_group_info); in ext4_mb_init_backend()
3464 while (i-- > 0) in ext4_mb_init_backend()
3467 iput(sbi->s_buddy_cache); in ext4_mb_init_backend()
3470 kvfree(rcu_dereference(sbi->s_group_info)); in ext4_mb_init_backend()
3472 return -ENOMEM; in ext4_mb_init_backend()
3490 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; in ext4_groupinfo_create_slab()
3494 return -EINVAL; in ext4_groupinfo_create_slab()
3517 "EXT4-fs: no memory for groupinfo slab cache\n"); in ext4_groupinfo_create_slab()
3518 return -ENOMEM; in ext4_groupinfo_create_slab()
3528 struct super_block *sb = sbi->s_sb; in ext4_discard_work()
3535 spin_lock(&sbi->s_md_lock); in ext4_discard_work()
3536 list_splice_init(&sbi->s_discard_list, &discard_list); in ext4_discard_work()
3537 spin_unlock(&sbi->s_md_lock); in ext4_discard_work()
3545 if ((sb->s_flags & SB_ACTIVE) && !err && in ext4_discard_work()
3546 !atomic_read(&sbi->s_retry_alloc_pending)) { in ext4_discard_work()
3547 grp = fd->efd_group; in ext4_discard_work()
3563 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, in ext4_discard_work()
3564 fd->efd_start_cluster + fd->efd_count - 1, 1); in ext4_discard_work()
3582 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); in ext4_mb_init()
3584 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); in ext4_mb_init()
3585 if (sbi->s_mb_offsets == NULL) { in ext4_mb_init()
3586 ret = -ENOMEM; in ext4_mb_init()
3590 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); in ext4_mb_init()
3591 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); in ext4_mb_init()
3592 if (sbi->s_mb_maxs == NULL) { in ext4_mb_init()
3593 ret = -ENOMEM; in ext4_mb_init()
3597 ret = ext4_groupinfo_create_slab(sb->s_blocksize); in ext4_mb_init()
3602 sbi->s_mb_maxs[0] = sb->s_blocksize << 3; in ext4_mb_init()
3603 sbi->s_mb_offsets[0] = 0; in ext4_mb_init()
3607 offset_incr = 1 << (sb->s_blocksize_bits - 1); in ext4_mb_init()
3608 max = sb->s_blocksize << 2; in ext4_mb_init()
3610 sbi->s_mb_offsets[i] = offset; in ext4_mb_init()
3611 sbi->s_mb_maxs[i] = max; in ext4_mb_init()
3618 sbi->s_mb_avg_fragment_size = in ext4_mb_init()
3621 if (!sbi->s_mb_avg_fragment_size) { in ext4_mb_init()
3622 ret = -ENOMEM; in ext4_mb_init()
3625 sbi->s_mb_avg_fragment_size_locks = in ext4_mb_init()
3628 if (!sbi->s_mb_avg_fragment_size_locks) { in ext4_mb_init()
3629 ret = -ENOMEM; in ext4_mb_init()
3633 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); in ext4_mb_init()
3634 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); in ext4_mb_init()
3636 sbi->s_mb_largest_free_orders = in ext4_mb_init()
3639 if (!sbi->s_mb_largest_free_orders) { in ext4_mb_init()
3640 ret = -ENOMEM; in ext4_mb_init()
3643 sbi->s_mb_largest_free_orders_locks = in ext4_mb_init()
3646 if (!sbi->s_mb_largest_free_orders_locks) { in ext4_mb_init()
3647 ret = -ENOMEM; in ext4_mb_init()
3651 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); in ext4_mb_init()
3652 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); in ext4_mb_init()
3655 spin_lock_init(&sbi->s_md_lock); in ext4_mb_init()
3656 sbi->s_mb_free_pending = 0; in ext4_mb_init()
3657 INIT_LIST_HEAD(&sbi->s_freed_data_list[0]); in ext4_mb_init()
3658 INIT_LIST_HEAD(&sbi->s_freed_data_list[1]); in ext4_mb_init()
3659 INIT_LIST_HEAD(&sbi->s_discard_list); in ext4_mb_init()
3660 INIT_WORK(&sbi->s_discard_work, ext4_discard_work); in ext4_mb_init()
3661 atomic_set(&sbi->s_retry_alloc_pending, 0); in ext4_mb_init()
3663 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; in ext4_mb_init()
3664 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; in ext4_mb_init()
3665 sbi->s_mb_stats = MB_DEFAULT_STATS; in ext4_mb_init()
3666 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; in ext4_mb_init()
3667 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; in ext4_mb_init()
3668 sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; in ext4_mb_init()
3682 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> in ext4_mb_init()
3683 sbi->s_cluster_bits, 32); in ext4_mb_init()
3692 if (sbi->s_stripe > 1) { in ext4_mb_init()
3693 sbi->s_mb_group_prealloc = roundup( in ext4_mb_init()
3694 sbi->s_mb_group_prealloc, EXT4_B2C(sbi, sbi->s_stripe)); in ext4_mb_init()
3697 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); in ext4_mb_init()
3698 if (sbi->s_locality_groups == NULL) { in ext4_mb_init()
3699 ret = -ENOMEM; in ext4_mb_init()
3704 lg = per_cpu_ptr(sbi->s_locality_groups, i); in ext4_mb_init()
3705 mutex_init(&lg->lg_mutex); in ext4_mb_init()
3707 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); in ext4_mb_init()
3708 spin_lock_init(&lg->lg_prealloc_lock); in ext4_mb_init()
3711 if (bdev_nonrot(sb->s_bdev)) in ext4_mb_init()
3712 sbi->s_mb_max_linear_groups = 0; in ext4_mb_init()
3714 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; in ext4_mb_init()
3723 free_percpu(sbi->s_locality_groups); in ext4_mb_init()
3724 sbi->s_locality_groups = NULL; in ext4_mb_init()
3726 kfree(sbi->s_mb_avg_fragment_size); in ext4_mb_init()
3727 kfree(sbi->s_mb_avg_fragment_size_locks); in ext4_mb_init()
3728 kfree(sbi->s_mb_largest_free_orders); in ext4_mb_init()
3729 kfree(sbi->s_mb_largest_free_orders_locks); in ext4_mb_init()
3730 kfree(sbi->s_mb_offsets); in ext4_mb_init()
3731 sbi->s_mb_offsets = NULL; in ext4_mb_init()
3732 kfree(sbi->s_mb_maxs); in ext4_mb_init()
3733 sbi->s_mb_maxs = NULL; in ext4_mb_init()
3744 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { in ext4_mb_cleanup_pa()
3746 list_del(&pa->pa_group_list); in ext4_mb_cleanup_pa()
3760 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); in ext4_mb_release()
3767 flush_work(&sbi->s_discard_work); in ext4_mb_release()
3768 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); in ext4_mb_release()
3771 if (sbi->s_group_info) { in ext4_mb_release()
3787 EXT4_DESC_PER_BLOCK(sb) - 1) >> in ext4_mb_release()
3790 group_info = rcu_dereference(sbi->s_group_info); in ext4_mb_release()
3796 kfree(sbi->s_mb_avg_fragment_size); in ext4_mb_release()
3797 kfree(sbi->s_mb_avg_fragment_size_locks); in ext4_mb_release()
3798 kfree(sbi->s_mb_largest_free_orders); in ext4_mb_release()
3799 kfree(sbi->s_mb_largest_free_orders_locks); in ext4_mb_release()
3800 kfree(sbi->s_mb_offsets); in ext4_mb_release()
3801 kfree(sbi->s_mb_maxs); in ext4_mb_release()
3802 iput(sbi->s_buddy_cache); in ext4_mb_release()
3803 if (sbi->s_mb_stats) { in ext4_mb_release()
3806 atomic_read(&sbi->s_bal_allocated), in ext4_mb_release()
3807 atomic_read(&sbi->s_bal_reqs), in ext4_mb_release()
3808 atomic_read(&sbi->s_bal_success)); in ext4_mb_release()
3812 atomic_read(&sbi->s_bal_ex_scanned), in ext4_mb_release()
3813 atomic_read(&sbi->s_bal_groups_scanned), in ext4_mb_release()
3814 atomic_read(&sbi->s_bal_goals), in ext4_mb_release()
3815 atomic_read(&sbi->s_bal_2orders), in ext4_mb_release()
3816 atomic_read(&sbi->s_bal_breaks), in ext4_mb_release()
3817 atomic_read(&sbi->s_mb_lost_chunks)); in ext4_mb_release()
3820 atomic_read(&sbi->s_mb_buddies_generated), in ext4_mb_release()
3821 atomic64_read(&sbi->s_mb_generation_time)); in ext4_mb_release()
3824 atomic_read(&sbi->s_mb_preallocated), in ext4_mb_release()
3825 atomic_read(&sbi->s_mb_discarded)); in ext4_mb_release()
3828 free_percpu(sbi->s_locality_groups); in ext4_mb_release()
3843 return __blkdev_issue_discard(sb->s_bdev, in ext4_issue_discard()
3844 (sector_t)discard_block << (sb->s_blocksize_bits - 9), in ext4_issue_discard()
3845 (sector_t)count << (sb->s_blocksize_bits - 9), in ext4_issue_discard()
3859 entry->efd_count, entry->efd_group, entry); in ext4_free_data_in_buddy()
3861 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); in ext4_free_data_in_buddy()
3865 spin_lock(&EXT4_SB(sb)->s_md_lock); in ext4_free_data_in_buddy()
3866 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; in ext4_free_data_in_buddy()
3867 spin_unlock(&EXT4_SB(sb)->s_md_lock); in ext4_free_data_in_buddy()
3871 count += entry->efd_count; in ext4_free_data_in_buddy()
3872 ext4_lock_group(sb, entry->efd_group); in ext4_free_data_in_buddy()
3874 rb_erase(&entry->efd_node, &(db->bb_free_root)); in ext4_free_data_in_buddy()
3875 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); in ext4_free_data_in_buddy()
3880 * If the volume is mounted with -o discard, online discard in ext4_free_data_in_buddy()
3886 if (!db->bb_free_root.rb_node) { in ext4_free_data_in_buddy()
3893 ext4_unlock_group(sb, entry->efd_group); in ext4_free_data_in_buddy()
3908 struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1]; in ext4_process_freed_data()
3917 spin_lock(&sbi->s_md_lock); in ext4_process_freed_data()
3918 wake = list_empty(&sbi->s_discard_list); in ext4_process_freed_data()
3919 list_splice_tail(&freed_data_list, &sbi->s_discard_list); in ext4_process_freed_data()
3920 spin_unlock(&sbi->s_md_lock); in ext4_process_freed_data()
3922 queue_work(system_unbound_wq, &sbi->s_discard_work); in ext4_process_freed_data()
3953 return -ENOMEM; in ext4_init_mballoc()
4001 err = -EIO; in ext4_mb_mark_context()
4016 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { in ext4_mb_mark_context()
4017 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); in ext4_mb_mark_context()
4025 if (mb_test_bit(blkoff + i, bitmap_bh->b_data) == in ext4_mb_mark_context()
4028 changed = len - already; in ext4_mb_mark_context()
4032 mb_set_bits(bitmap_bh->b_data, blkoff, len); in ext4_mb_mark_context()
4034 ext4_free_group_clusters(sb, gdp) - changed); in ext4_mb_mark_context()
4036 mb_clear_bits(bitmap_bh->b_data, blkoff, len); in ext4_mb_mark_context()
4047 if (sbi->s_log_groups_per_flex) { in ext4_mb_mark_context()
4053 atomic64_sub(changed, &fg->free_clusters); in ext4_mb_mark_context()
4055 atomic64_add(changed, &fg->free_clusters); in ext4_mb_mark_context()
4076 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
4091 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_mark_diskspace_used()
4092 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_mark_diskspace_used()
4094 sb = ac->ac_sb; in ext4_mb_mark_diskspace_used()
4097 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL); in ext4_mb_mark_diskspace_used()
4099 return -EIO; in ext4_mb_mark_diskspace_used()
4100 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4103 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_mark_diskspace_used()
4104 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4105 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { in ext4_mb_mark_diskspace_used()
4106 ext4_error(sb, "Allocating blocks %llu-%llu which overlap " in ext4_mb_mark_diskspace_used()
4113 ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4114 ac->ac_b_ex.fe_start, in ext4_mb_mark_diskspace_used()
4115 ac->ac_b_ex.fe_len, in ext4_mb_mark_diskspace_used()
4118 err = -EFSCORRUPTED; in ext4_mb_mark_diskspace_used()
4125 err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, in ext4_mb_mark_diskspace_used()
4126 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, in ext4_mb_mark_diskspace_used()
4133 BUG_ON(changed != ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4135 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); in ext4_mb_mark_diskspace_used()
4139 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) in ext4_mb_mark_diskspace_used()
4141 percpu_counter_sub(&sbi->s_dirtyclusters_counter, in ext4_mb_mark_diskspace_used()
4172 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); in ext4_mb_mark_bb()
4176 ext4_error(sb, "Marking blocks in system zone - " in ext4_mb_mark_bb()
4191 len -= thisgrp_len; in ext4_mb_mark_bb()
4197 * here we normalize request for locality group
4207 struct super_block *sb = ac->ac_sb; in ext4_mb_normalize_group_request()
4208 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_normalize_group_request()
4211 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; in ext4_mb_normalize_group_request()
4212 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); in ext4_mb_normalize_group_request()
4218 * (ei->i_prealloc_lock)
4228 return node->rb_left; in ext4_mb_pa_rb_next_iter()
4230 return node->rb_right; in ext4_mb_pa_rb_next_iter()
4237 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_assert_overlap()
4238 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_assert_overlap()
4244 read_lock(&ei->i_prealloc_lock); in ext4_mb_pa_assert_overlap()
4245 for (iter = ei->i_prealloc_node.rb_node; iter; in ext4_mb_pa_assert_overlap()
4249 tmp_pa_start = tmp_pa->pa_lstart; in ext4_mb_pa_assert_overlap()
4252 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_assert_overlap()
4253 if (tmp_pa->pa_deleted == 0) in ext4_mb_pa_assert_overlap()
4255 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_assert_overlap()
4257 read_unlock(&ei->i_prealloc_lock); in ext4_mb_pa_assert_overlap()
4274 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_pa_adjust_overlap()
4275 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_adjust_overlap()
4278 ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; in ext4_mb_pa_adjust_overlap()
4279 loff_t new_end, tmp_pa_end, left_pa_end = -1; in ext4_mb_pa_adjust_overlap()
4289 read_lock(&ei->i_prealloc_lock); in ext4_mb_pa_adjust_overlap()
4292 for (iter = ei->i_prealloc_node.rb_node; iter; in ext4_mb_pa_adjust_overlap()
4293 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_pa_adjust_overlap()
4297 tmp_pa_start = tmp_pa->pa_lstart; in ext4_mb_pa_adjust_overlap()
4301 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4302 if (tmp_pa->pa_deleted == 0) in ext4_mb_pa_adjust_overlap()
4303 BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || in ext4_mb_pa_adjust_overlap()
4304 ac->ac_o_ex.fe_logical < tmp_pa_start)); in ext4_mb_pa_adjust_overlap()
4305 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4313 if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { in ext4_mb_pa_adjust_overlap()
4317 tmp = rb_next(&left_pa->pa_node.inode_node); in ext4_mb_pa_adjust_overlap()
4327 tmp = rb_prev(&right_pa->pa_node.inode_node); in ext4_mb_pa_adjust_overlap()
4338 for (iter = &left_pa->pa_node.inode_node;; in ext4_mb_pa_adjust_overlap()
4348 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4349 if (tmp_pa->pa_deleted == 0) { in ext4_mb_pa_adjust_overlap()
4350 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4353 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4358 for (iter = &right_pa->pa_node.inode_node;; in ext4_mb_pa_adjust_overlap()
4368 spin_lock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4369 if (tmp_pa->pa_deleted == 0) { in ext4_mb_pa_adjust_overlap()
4370 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4373 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_pa_adjust_overlap()
4379 BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4383 right_pa_start = right_pa->pa_lstart; in ext4_mb_pa_adjust_overlap()
4384 BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); in ext4_mb_pa_adjust_overlap()
4397 read_unlock(&ei->i_prealloc_lock); in ext4_mb_pa_adjust_overlap()
4414 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_normalize_request()
4415 struct ext4_super_block *es = sbi->s_es; in ext4_mb_normalize_request()
4423 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_normalize_request()
4427 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_normalize_request()
4432 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) in ext4_mb_normalize_request()
4435 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { in ext4_mb_normalize_request()
4440 bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_normalize_request()
4444 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_normalize_request()
4446 if (size < i_size_read(ac->ac_inode)) in ext4_mb_normalize_request()
4447 size = i_size_read(ac->ac_inode); in ext4_mb_normalize_request()
4474 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4475 (21 - bsbits)) << 21; in ext4_mb_normalize_request()
4478 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4479 (22 - bsbits)) << 22; in ext4_mb_normalize_request()
4481 } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), in ext4_mb_normalize_request()
4483 start_off = ((loff_t)ac->ac_o_ex.fe_logical >> in ext4_mb_normalize_request()
4484 (23 - bsbits)) << 23; in ext4_mb_normalize_request()
4487 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; in ext4_mb_normalize_request()
4489 ac->ac_o_ex.fe_len) << bsbits; in ext4_mb_normalize_request()
4500 start = max(start, rounddown(ac->ac_o_ex.fe_logical, in ext4_mb_normalize_request()
4501 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); in ext4_mb_normalize_request()
4505 size = EXT_MAX_BLOCKS - start; in ext4_mb_normalize_request()
4508 if (ar->pleft && start <= ar->lleft) { in ext4_mb_normalize_request()
4509 size -= ar->lleft + 1 - start; in ext4_mb_normalize_request()
4510 start = ar->lleft + 1; in ext4_mb_normalize_request()
4512 if (ar->pright && start + size - 1 >= ar->lright) in ext4_mb_normalize_request()
4513 size -= start + size - ar->lright; in ext4_mb_normalize_request()
4519 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) in ext4_mb_normalize_request()
4520 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); in ext4_mb_normalize_request()
4526 size = end - start; in ext4_mb_normalize_request()
4532 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and in ext4_mb_normalize_request()
4538 * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated in ext4_mb_normalize_request()
4543 if (start + size <= ac->ac_o_ex.fe_logical || in ext4_mb_normalize_request()
4544 start > ac->ac_o_ex.fe_logical) { in ext4_mb_normalize_request()
4545 ext4_msg(ac->ac_sb, KERN_ERR, in ext4_mb_normalize_request()
4548 (unsigned long) ac->ac_o_ex.fe_logical); in ext4_mb_normalize_request()
4551 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); in ext4_mb_normalize_request()
4557 ac->ac_g_ex.fe_logical = start; in ext4_mb_normalize_request()
4558 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); in ext4_mb_normalize_request()
4559 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_normalize_request()
4562 if (ar->pright && (ar->lright == (start + size)) && in ext4_mb_normalize_request()
4563 ar->pright >= size && in ext4_mb_normalize_request()
4564 ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { in ext4_mb_normalize_request()
4566 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, in ext4_mb_normalize_request()
4567 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4568 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4569 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4571 if (ar->pleft && (ar->lleft + 1 == start) && in ext4_mb_normalize_request()
4572 ar->pleft + 1 < ext4_blocks_count(es)) { in ext4_mb_normalize_request()
4574 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, in ext4_mb_normalize_request()
4575 &ac->ac_g_ex.fe_group, in ext4_mb_normalize_request()
4576 &ac->ac_g_ex.fe_start); in ext4_mb_normalize_request()
4577 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; in ext4_mb_normalize_request()
4580 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, in ext4_mb_normalize_request()
4586 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_collect_stats()
4588 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { in ext4_mb_collect_stats()
4589 atomic_inc(&sbi->s_bal_reqs); in ext4_mb_collect_stats()
4590 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); in ext4_mb_collect_stats()
4591 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) in ext4_mb_collect_stats()
4592 atomic_inc(&sbi->s_bal_success); in ext4_mb_collect_stats()
4594 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); in ext4_mb_collect_stats()
4596 atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); in ext4_mb_collect_stats()
4599 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); in ext4_mb_collect_stats()
4600 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && in ext4_mb_collect_stats()
4601 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) in ext4_mb_collect_stats()
4602 atomic_inc(&sbi->s_bal_goals); in ext4_mb_collect_stats()
4604 if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) in ext4_mb_collect_stats()
4605 atomic_inc(&sbi->s_bal_len_goals); in ext4_mb_collect_stats()
4607 if (ac->ac_found > sbi->s_mb_max_to_scan) in ext4_mb_collect_stats()
4608 atomic_inc(&sbi->s_bal_breaks); in ext4_mb_collect_stats()
4611 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) in ext4_mb_collect_stats()
4621 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4625 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_discard_allocated_blocks()
4630 if (ac->ac_f_ex.fe_len == 0) in ext4_discard_allocated_blocks()
4632 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); in ext4_discard_allocated_blocks()
4641 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4642 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, in ext4_discard_allocated_blocks()
4643 ac->ac_f_ex.fe_len); in ext4_discard_allocated_blocks()
4644 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); in ext4_discard_allocated_blocks()
4648 if (pa->pa_type == MB_INODE_PA) { in ext4_discard_allocated_blocks()
4649 spin_lock(&pa->pa_lock); in ext4_discard_allocated_blocks()
4650 pa->pa_free += ac->ac_b_ex.fe_len; in ext4_discard_allocated_blocks()
4651 spin_unlock(&pa->pa_lock); in ext4_discard_allocated_blocks()
4661 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_inode_pa()
4667 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); in ext4_mb_use_inode_pa()
4668 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), in ext4_mb_use_inode_pa()
4669 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); in ext4_mb_use_inode_pa()
4670 len = EXT4_NUM_B2C(sbi, end - start); in ext4_mb_use_inode_pa()
4671 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, in ext4_mb_use_inode_pa()
4672 &ac->ac_b_ex.fe_start); in ext4_mb_use_inode_pa()
4673 ac->ac_b_ex.fe_len = len; in ext4_mb_use_inode_pa()
4674 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_inode_pa()
4675 ac->ac_pa = pa; in ext4_mb_use_inode_pa()
4677 BUG_ON(start < pa->pa_pstart); in ext4_mb_use_inode_pa()
4678 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); in ext4_mb_use_inode_pa()
4679 BUG_ON(pa->pa_free < len); in ext4_mb_use_inode_pa()
4680 BUG_ON(ac->ac_b_ex.fe_len <= 0); in ext4_mb_use_inode_pa()
4681 pa->pa_free -= len; in ext4_mb_use_inode_pa()
4683 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); in ext4_mb_use_inode_pa()
4687 * use blocks preallocated to locality group
4692 unsigned int len = ac->ac_o_ex.fe_len; in ext4_mb_use_group_pa()
4694 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, in ext4_mb_use_group_pa()
4695 &ac->ac_b_ex.fe_group, in ext4_mb_use_group_pa()
4696 &ac->ac_b_ex.fe_start); in ext4_mb_use_group_pa()
4697 ac->ac_b_ex.fe_len = len; in ext4_mb_use_group_pa()
4698 ac->ac_status = AC_STATUS_FOUND; in ext4_mb_use_group_pa()
4699 ac->ac_pa = pa; in ext4_mb_use_group_pa()
4704 * in on-disk bitmap -- see ext4_mb_release_context() in ext4_mb_use_group_pa()
4707 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", in ext4_mb_use_group_pa()
4708 pa->pa_lstart, len, pa); in ext4_mb_use_group_pa()
4725 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4728 cur_distance = abs(goal_block - cpa->pa_pstart); in ext4_mb_check_group_pa()
4729 new_distance = abs(goal_block - pa->pa_pstart); in ext4_mb_check_group_pa()
4735 atomic_dec(&cpa->pa_count); in ext4_mb_check_group_pa()
4736 atomic_inc(&pa->pa_count); in ext4_mb_check_group_pa()
4747 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_pa_goal_check()
4750 if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) in ext4_mb_pa_goal_check()
4759 start = pa->pa_pstart + in ext4_mb_pa_goal_check()
4760 (ac->ac_g_ex.fe_logical - pa->pa_lstart); in ext4_mb_pa_goal_check()
4761 if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) in ext4_mb_pa_goal_check()
4764 if (ac->ac_g_ex.fe_len > pa->pa_len - in ext4_mb_pa_goal_check()
4765 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) in ext4_mb_pa_goal_check()
4777 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_use_preallocated()
4779 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_use_preallocated()
4786 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_use_preallocated()
4790 * first, try per-file preallocation by searching the inode pa rbtree. in ext4_mb_use_preallocated()
4796 read_lock(&ei->i_prealloc_lock); in ext4_mb_use_preallocated()
4798 if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { in ext4_mb_use_preallocated()
4806 * (tmp_pa->pa_lstart never changes so we can skip locking for it). in ext4_mb_use_preallocated()
4808 for (iter = ei->i_prealloc_node.rb_node; iter; in ext4_mb_use_preallocated()
4809 iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, in ext4_mb_use_preallocated()
4810 tmp_pa->pa_lstart, iter)) { in ext4_mb_use_preallocated()
4820 if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { in ext4_mb_use_preallocated()
4822 tmp = rb_prev(&tmp_pa->pa_node.inode_node); in ext4_mb_use_preallocated()
4837 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4844 for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { in ext4_mb_use_preallocated()
4854 spin_lock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4855 if (tmp_pa->pa_deleted == 0) { in ext4_mb_use_preallocated()
4865 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4869 BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); in ext4_mb_use_preallocated()
4870 BUG_ON(tmp_pa->pa_deleted == 1); in ext4_mb_use_preallocated()
4877 if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { in ext4_mb_use_preallocated()
4878 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4882 /* non-extent files can't have physical blocks past 2^32 */ in ext4_mb_use_preallocated()
4883 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && in ext4_mb_use_preallocated()
4884 (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > in ext4_mb_use_preallocated()
4890 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4894 if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { in ext4_mb_use_preallocated()
4895 atomic_inc(&tmp_pa->pa_count); in ext4_mb_use_preallocated()
4897 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4898 read_unlock(&ei->i_prealloc_lock); in ext4_mb_use_preallocated()
4928 WARN_ON_ONCE(tmp_pa->pa_free == 0); in ext4_mb_use_preallocated()
4930 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4932 read_unlock(&ei->i_prealloc_lock); in ext4_mb_use_preallocated()
4935 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) in ext4_mb_use_preallocated()
4938 /* inode may have no locality group for some reason */ in ext4_mb_use_preallocated()
4939 lg = ac->ac_lg; in ext4_mb_use_preallocated()
4942 order = fls(ac->ac_o_ex.fe_len) - 1; in ext4_mb_use_preallocated()
4943 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_use_preallocated()
4945 order = PREALLOC_TB_SIZE - 1; in ext4_mb_use_preallocated()
4947 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); in ext4_mb_use_preallocated()
4954 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], in ext4_mb_use_preallocated()
4956 spin_lock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4957 if (tmp_pa->pa_deleted == 0 && in ext4_mb_use_preallocated()
4958 tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { in ext4_mb_use_preallocated()
4963 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_use_preallocated()
4976 * used in in-core bitmap. buddy must be generated from this bitmap
5002 list_for_each(cur, &grp->bb_prealloc_list) { in ext4_mb_generate_from_pa()
5004 spin_lock(&pa->pa_lock); in ext4_mb_generate_from_pa()
5005 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_generate_from_pa()
5007 len = pa->pa_len; in ext4_mb_generate_from_pa()
5008 spin_unlock(&pa->pa_lock); in ext4_mb_generate_from_pa()
5023 if (pa->pa_deleted) { in ext4_mb_mark_pa_deleted()
5025 pa->pa_type, pa->pa_pstart, pa->pa_lstart, in ext4_mb_mark_pa_deleted()
5026 pa->pa_len); in ext4_mb_mark_pa_deleted()
5030 pa->pa_deleted = 1; in ext4_mb_mark_pa_deleted()
5032 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_mark_pa_deleted()
5033 ei = EXT4_I(pa->pa_inode); in ext4_mb_mark_pa_deleted()
5034 atomic_dec(&ei->i_prealloc_active); in ext4_mb_mark_pa_deleted()
5041 BUG_ON(atomic_read(&pa->pa_count)); in ext4_mb_pa_free()
5042 BUG_ON(pa->pa_deleted == 0); in ext4_mb_pa_free()
5063 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); in ext4_mb_put_pa()
5066 spin_lock(&pa->pa_lock); in ext4_mb_put_pa()
5067 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { in ext4_mb_put_pa()
5068 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5072 if (pa->pa_deleted == 1) { in ext4_mb_put_pa()
5073 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5078 spin_unlock(&pa->pa_lock); in ext4_mb_put_pa()
5080 grp_blk = pa->pa_pstart; in ext4_mb_put_pa()
5082 * If doing group-based preallocation, pa_pstart may be in the in ext4_mb_put_pa()
5085 if (pa->pa_type == MB_GROUP_PA) in ext4_mb_put_pa()
5086 grp_blk--; in ext4_mb_put_pa()
5095 * copy on-disk bitmap to buddy in ext4_mb_put_pa()
5096 * mark B in on-disk bitmap in ext4_mb_put_pa()
5105 list_del(&pa->pa_group_list); in ext4_mb_put_pa()
5108 if (pa->pa_type == MB_INODE_PA) { in ext4_mb_put_pa()
5109 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_put_pa()
5110 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_mb_put_pa()
5111 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_put_pa()
5114 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_put_pa()
5115 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_put_pa()
5116 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_put_pa()
5117 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_put_pa()
5123 struct rb_node **iter = &root->rb_node, *parent = NULL; in ext4_mb_pa_rb_insert()
5132 iter_start = iter_pa->pa_lstart; in ext4_mb_pa_rb_insert()
5133 new_start = new_pa->pa_lstart; in ext4_mb_pa_rb_insert()
5137 iter = &((*iter)->rb_left); in ext4_mb_pa_rb_insert()
5139 iter = &((*iter)->rb_right); in ext4_mb_pa_rb_insert()
5152 struct super_block *sb = ac->ac_sb; in ext4_mb_new_inode_pa()
5159 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_inode_pa()
5160 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_inode_pa()
5161 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_inode_pa()
5162 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_inode_pa()
5164 pa = ac->ac_pa; in ext4_mb_new_inode_pa()
5166 if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { in ext4_mb_new_inode_pa()
5168 .fe_logical = ac->ac_g_ex.fe_logical, in ext4_mb_new_inode_pa()
5169 .fe_len = ac->ac_orig_goal_len, in ext4_mb_new_inode_pa()
5176 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); in ext4_mb_new_inode_pa()
5177 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); in ext4_mb_new_inode_pa()
5190 ex.fe_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5192 ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); in ext4_mb_new_inode_pa()
5193 if (ac->ac_o_ex.fe_logical >= ex.fe_logical) in ext4_mb_new_inode_pa()
5196 ex.fe_logical = ac->ac_g_ex.fe_logical; in ext4_mb_new_inode_pa()
5197 if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex)) in ext4_mb_new_inode_pa()
5200 ex.fe_logical = ac->ac_o_ex.fe_logical; in ext4_mb_new_inode_pa()
5202 ac->ac_b_ex.fe_logical = ex.fe_logical; in ext4_mb_new_inode_pa()
5204 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); in ext4_mb_new_inode_pa()
5205 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); in ext4_mb_new_inode_pa()
5209 pa->pa_lstart = ac->ac_b_ex.fe_logical; in ext4_mb_new_inode_pa()
5210 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_inode_pa()
5211 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_inode_pa()
5212 pa->pa_free = pa->pa_len; in ext4_mb_new_inode_pa()
5213 spin_lock_init(&pa->pa_lock); in ext4_mb_new_inode_pa()
5214 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_inode_pa()
5215 pa->pa_deleted = 0; in ext4_mb_new_inode_pa()
5216 pa->pa_type = MB_INODE_PA; in ext4_mb_new_inode_pa()
5218 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_inode_pa()
5219 pa->pa_len, pa->pa_lstart); in ext4_mb_new_inode_pa()
5222 atomic_add(pa->pa_free, &sbi->s_mb_preallocated); in ext4_mb_new_inode_pa()
5225 ei = EXT4_I(ac->ac_inode); in ext4_mb_new_inode_pa()
5226 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_inode_pa()
5230 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; in ext4_mb_new_inode_pa()
5231 pa->pa_inode = ac->ac_inode; in ext4_mb_new_inode_pa()
5233 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_inode_pa()
5235 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_new_inode_pa()
5236 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); in ext4_mb_new_inode_pa()
5237 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_new_inode_pa()
5238 atomic_inc(&ei->i_prealloc_active); in ext4_mb_new_inode_pa()
5242 * creates new preallocated space for locality group inodes belongs to
5247 struct super_block *sb = ac->ac_sb; in ext4_mb_new_group_pa()
5253 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); in ext4_mb_new_group_pa()
5254 BUG_ON(ac->ac_status != AC_STATUS_FOUND); in ext4_mb_new_group_pa()
5255 BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); in ext4_mb_new_group_pa()
5256 BUG_ON(ac->ac_pa == NULL); in ext4_mb_new_group_pa()
5258 pa = ac->ac_pa; in ext4_mb_new_group_pa()
5260 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_group_pa()
5261 pa->pa_lstart = pa->pa_pstart; in ext4_mb_new_group_pa()
5262 pa->pa_len = ac->ac_b_ex.fe_len; in ext4_mb_new_group_pa()
5263 pa->pa_free = pa->pa_len; in ext4_mb_new_group_pa()
5264 spin_lock_init(&pa->pa_lock); in ext4_mb_new_group_pa()
5265 INIT_LIST_HEAD(&pa->pa_node.lg_list); in ext4_mb_new_group_pa()
5266 INIT_LIST_HEAD(&pa->pa_group_list); in ext4_mb_new_group_pa()
5267 pa->pa_deleted = 0; in ext4_mb_new_group_pa()
5268 pa->pa_type = MB_GROUP_PA; in ext4_mb_new_group_pa()
5270 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, in ext4_mb_new_group_pa()
5271 pa->pa_len, pa->pa_lstart); in ext4_mb_new_group_pa()
5275 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); in ext4_mb_new_group_pa()
5277 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); in ext4_mb_new_group_pa()
5280 lg = ac->ac_lg; in ext4_mb_new_group_pa()
5283 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; in ext4_mb_new_group_pa()
5284 pa->pa_inode = NULL; in ext4_mb_new_group_pa()
5286 list_add(&pa->pa_group_list, &grp->bb_prealloc_list); in ext4_mb_new_group_pa()
5296 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_new_preallocation()
5303 * finds all unused blocks in on-disk bitmap, frees them in
5304 * in-core bitmap and buddy.
5308 * TODO: optimize the case when there are no in-core structures yet
5314 struct super_block *sb = e4b->bd_sb; in ext4_mb_release_inode_pa()
5323 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_inode_pa()
5324 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_inode_pa()
5325 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); in ext4_mb_release_inode_pa()
5326 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); in ext4_mb_release_inode_pa()
5327 end = bit + pa->pa_len; in ext4_mb_release_inode_pa()
5330 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); in ext4_mb_release_inode_pa()
5333 next = mb_find_next_bit(bitmap_bh->b_data, end, bit); in ext4_mb_release_inode_pa()
5336 (unsigned) next - bit, (unsigned) group); in ext4_mb_release_inode_pa()
5337 free += next - bit; in ext4_mb_release_inode_pa()
5339 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); in ext4_mb_release_inode_pa()
5342 next - bit); in ext4_mb_release_inode_pa()
5343 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); in ext4_mb_release_inode_pa()
5346 if (free != pa->pa_free) { in ext4_mb_release_inode_pa()
5347 ext4_msg(e4b->bd_sb, KERN_CRIT, in ext4_mb_release_inode_pa()
5349 pa, (unsigned long) pa->pa_lstart, in ext4_mb_release_inode_pa()
5350 (unsigned long) pa->pa_pstart, in ext4_mb_release_inode_pa()
5351 pa->pa_len); in ext4_mb_release_inode_pa()
5353 free, pa->pa_free); in ext4_mb_release_inode_pa()
5359 atomic_add(free, &sbi->s_mb_discarded); in ext4_mb_release_inode_pa()
5366 struct super_block *sb = e4b->bd_sb; in ext4_mb_release_group_pa()
5371 BUG_ON(pa->pa_deleted == 0); in ext4_mb_release_group_pa()
5372 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); in ext4_mb_release_group_pa()
5373 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { in ext4_mb_release_group_pa()
5375 e4b->bd_group, group, pa->pa_pstart); in ext4_mb_release_group_pa()
5378 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); in ext4_mb_release_group_pa()
5379 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); in ext4_mb_release_group_pa()
5380 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); in ext4_mb_release_group_pa()
5387 * - when do we discard
5389 * - how many do we discard
5408 if (list_empty(&grp->bb_prealloc_list)) in ext4_mb_discard_group_preallocations()
5414 ext4_error_err(sb, -err, in ext4_mb_discard_group_preallocations()
5430 &grp->bb_prealloc_list, pa_group_list) { in ext4_mb_discard_group_preallocations()
5431 spin_lock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5432 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_group_preallocations()
5433 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5437 if (pa->pa_deleted) { in ext4_mb_discard_group_preallocations()
5438 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5449 free += pa->pa_free; in ext4_mb_discard_group_preallocations()
5451 spin_unlock(&pa->pa_lock); in ext4_mb_discard_group_preallocations()
5453 list_del(&pa->pa_group_list); in ext4_mb_discard_group_preallocations()
5454 list_add(&pa->u.pa_tmp_list, &list); in ext4_mb_discard_group_preallocations()
5460 /* remove from object (inode or locality group) */ in ext4_mb_discard_group_preallocations()
5461 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_discard_group_preallocations()
5462 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_discard_group_preallocations()
5463 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_discard_group_preallocations()
5464 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_discard_group_preallocations()
5466 write_lock(pa->pa_node_lock.inode_lock); in ext4_mb_discard_group_preallocations()
5467 ei = EXT4_I(pa->pa_inode); in ext4_mb_discard_group_preallocations()
5468 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_mb_discard_group_preallocations()
5469 write_unlock(pa->pa_node_lock.inode_lock); in ext4_mb_discard_group_preallocations()
5472 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_group_preallocations()
5474 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_discard_group_preallocations()
5476 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_group_preallocations()
5488 free, group, grp->bb_free); in ext4_mb_discard_group_preallocations()
5493 * releases all non-used preallocated blocks for given inode
5504 struct super_block *sb = inode->i_sb; in ext4_discard_preallocations()
5513 if (!S_ISREG(inode->i_mode)) in ext4_discard_preallocations()
5516 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) in ext4_discard_preallocations()
5520 inode->i_ino); in ext4_discard_preallocations()
5522 atomic_read(&ei->i_prealloc_active)); in ext4_discard_preallocations()
5526 write_lock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5527 for (iter = rb_first(&ei->i_prealloc_node); iter; in ext4_discard_preallocations()
5531 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); in ext4_discard_preallocations()
5533 spin_lock(&pa->pa_lock); in ext4_discard_preallocations()
5534 if (atomic_read(&pa->pa_count)) { in ext4_discard_preallocations()
5535 /* this shouldn't happen often - nobody should in ext4_discard_preallocations()
5537 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5538 write_unlock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5540 "uh-oh! used pa while discarding"); in ext4_discard_preallocations()
5546 if (pa->pa_deleted == 0) { in ext4_discard_preallocations()
5548 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5549 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); in ext4_discard_preallocations()
5550 list_add(&pa->u.pa_tmp_list, &list); in ext4_discard_preallocations()
5555 spin_unlock(&pa->pa_lock); in ext4_discard_preallocations()
5556 write_unlock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5561 * ->clear_inode() the inode will get freed in ext4_discard_preallocations()
5564 * freed memory, bad-bad-bad */ in ext4_discard_preallocations()
5568 * of ->clear_inode(), but not in case of in ext4_discard_preallocations()
5573 write_unlock(&ei->i_prealloc_lock); in ext4_discard_preallocations()
5576 BUG_ON(pa->pa_type != MB_INODE_PA); in ext4_discard_preallocations()
5577 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_discard_preallocations()
5582 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", in ext4_discard_preallocations()
5590 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", in ext4_discard_preallocations()
5597 list_del(&pa->pa_group_list); in ext4_discard_preallocations()
5604 list_del(&pa->u.pa_tmp_list); in ext4_discard_preallocations()
5616 return -ENOMEM; in ext4_mb_pa_alloc()
5617 atomic_set(&pa->pa_count, 1); in ext4_mb_pa_alloc()
5618 ac->ac_pa = pa; in ext4_mb_pa_alloc()
5624 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_pa_put_free()
5627 ac->ac_pa = NULL; in ext4_mb_pa_put_free()
5628 WARN_ON(!atomic_dec_and_test(&pa->pa_count)); in ext4_mb_pa_put_free()
5632 * been added to grp->bb_prealloc_list. So we don't need to lock it in ext4_mb_pa_put_free()
5634 pa->pa_deleted = 1; in ext4_mb_pa_put_free()
5657 list_for_each(cur, &grp->bb_prealloc_list) { in ext4_mb_show_pa()
5660 spin_lock(&pa->pa_lock); in ext4_mb_show_pa()
5661 ext4_get_group_no_and_offset(sb, pa->pa_pstart, in ext4_mb_show_pa()
5663 spin_unlock(&pa->pa_lock); in ext4_mb_show_pa()
5665 pa->pa_len); in ext4_mb_show_pa()
5668 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, in ext4_mb_show_pa()
5669 grp->bb_fragments); in ext4_mb_show_pa()
5675 struct super_block *sb = ac->ac_sb; in ext4_mb_show_ac()
5683 ac->ac_status, ac->ac_flags); in ext4_mb_show_ac()
5687 (unsigned long)ac->ac_o_ex.fe_group, in ext4_mb_show_ac()
5688 (unsigned long)ac->ac_o_ex.fe_start, in ext4_mb_show_ac()
5689 (unsigned long)ac->ac_o_ex.fe_len, in ext4_mb_show_ac()
5690 (unsigned long)ac->ac_o_ex.fe_logical, in ext4_mb_show_ac()
5691 (unsigned long)ac->ac_g_ex.fe_group, in ext4_mb_show_ac()
5692 (unsigned long)ac->ac_g_ex.fe_start, in ext4_mb_show_ac()
5693 (unsigned long)ac->ac_g_ex.fe_len, in ext4_mb_show_ac()
5694 (unsigned long)ac->ac_g_ex.fe_logical, in ext4_mb_show_ac()
5695 (unsigned long)ac->ac_b_ex.fe_group, in ext4_mb_show_ac()
5696 (unsigned long)ac->ac_b_ex.fe_start, in ext4_mb_show_ac()
5697 (unsigned long)ac->ac_b_ex.fe_len, in ext4_mb_show_ac()
5698 (unsigned long)ac->ac_b_ex.fe_logical, in ext4_mb_show_ac()
5699 (int)ac->ac_criteria); in ext4_mb_show_ac()
5700 mb_debug(sb, "%u found", ac->ac_found); in ext4_mb_show_ac()
5701 mb_debug(sb, "used pa: %s, ", ac->ac_pa ? "yes" : "no"); in ext4_mb_show_ac()
5702 if (ac->ac_pa) in ext4_mb_show_ac()
5703 mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? in ext4_mb_show_ac()
5713 ext4_mb_show_pa(ac->ac_sb); in ext4_mb_show_ac()
5718 * We use locality group preallocation for small size file. The size of the
5726 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_group_or_file()
5727 int bsbits = ac->ac_sb->s_blocksize_bits; in ext4_mb_group_or_file()
5731 if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) in ext4_mb_group_or_file()
5734 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) in ext4_mb_group_or_file()
5737 group_pa_eligible = sbi->s_mb_group_prealloc > 0; in ext4_mb_group_or_file()
5739 size = extent_logical_end(sbi, &ac->ac_o_ex); in ext4_mb_group_or_file()
5740 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) in ext4_mb_group_or_file()
5745 !inode_is_open_for_write(ac->ac_inode)) in ext4_mb_group_or_file()
5750 if (size > sbi->s_mb_stream_request) in ext4_mb_group_or_file()
5755 ac->ac_flags |= EXT4_MB_STREAM_ALLOC; in ext4_mb_group_or_file()
5757 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; in ext4_mb_group_or_file()
5761 BUG_ON(ac->ac_lg != NULL); in ext4_mb_group_or_file()
5763 * locality group prealloc space are per cpu. The reason for having in ext4_mb_group_or_file()
5764 * per cpu locality group is to reduce the contention between block in ext4_mb_group_or_file()
5767 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); in ext4_mb_group_or_file()
5770 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; in ext4_mb_group_or_file()
5773 mutex_lock(&ac->ac_lg->lg_mutex); in ext4_mb_group_or_file()
5780 struct super_block *sb = ar->inode->i_sb; in ext4_mb_initialize_context()
5782 struct ext4_super_block *es = sbi->s_es; in ext4_mb_initialize_context()
5789 len = ar->len; in ext4_mb_initialize_context()
5796 goal = ar->goal; in ext4_mb_initialize_context()
5797 if (goal < le32_to_cpu(es->s_first_data_block) || in ext4_mb_initialize_context()
5799 goal = le32_to_cpu(es->s_first_data_block); in ext4_mb_initialize_context()
5803 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); in ext4_mb_initialize_context()
5804 ac->ac_status = AC_STATUS_CONTINUE; in ext4_mb_initialize_context()
5805 ac->ac_sb = sb; in ext4_mb_initialize_context()
5806 ac->ac_inode = ar->inode; in ext4_mb_initialize_context()
5807 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; in ext4_mb_initialize_context()
5808 ac->ac_o_ex.fe_group = group; in ext4_mb_initialize_context()
5809 ac->ac_o_ex.fe_start = block; in ext4_mb_initialize_context()
5810 ac->ac_o_ex.fe_len = len; in ext4_mb_initialize_context()
5811 ac->ac_g_ex = ac->ac_o_ex; in ext4_mb_initialize_context()
5812 ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; in ext4_mb_initialize_context()
5813 ac->ac_flags = ar->flags; in ext4_mb_initialize_context()
5816 * locality group. this is a policy, actually */ in ext4_mb_initialize_context()
5821 (unsigned) ar->len, (unsigned) ar->logical, in ext4_mb_initialize_context()
5822 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, in ext4_mb_initialize_context()
5823 (unsigned) ar->lleft, (unsigned) ar->pleft, in ext4_mb_initialize_context()
5824 (unsigned) ar->lright, (unsigned) ar->pright, in ext4_mb_initialize_context()
5825 inode_is_open_for_write(ar->inode) ? "" : "non-"); in ext4_mb_initialize_context()
5838 mb_debug(sb, "discard locality group preallocation\n"); in ext4_mb_discard_lg_preallocations()
5840 spin_lock(&lg->lg_prealloc_lock); in ext4_mb_discard_lg_preallocations()
5841 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], in ext4_mb_discard_lg_preallocations()
5843 lockdep_is_held(&lg->lg_prealloc_lock)) { in ext4_mb_discard_lg_preallocations()
5844 spin_lock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5845 if (atomic_read(&pa->pa_count)) { in ext4_mb_discard_lg_preallocations()
5851 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5854 if (pa->pa_deleted) { in ext4_mb_discard_lg_preallocations()
5855 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5859 BUG_ON(pa->pa_type != MB_GROUP_PA); in ext4_mb_discard_lg_preallocations()
5863 spin_unlock(&pa->pa_lock); in ext4_mb_discard_lg_preallocations()
5865 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_discard_lg_preallocations()
5866 list_add(&pa->u.pa_tmp_list, &discard_list); in ext4_mb_discard_lg_preallocations()
5868 total_entries--; in ext4_mb_discard_lg_preallocations()
5879 spin_unlock(&lg->lg_prealloc_lock); in ext4_mb_discard_lg_preallocations()
5884 group = ext4_get_group_number(sb, pa->pa_pstart); in ext4_mb_discard_lg_preallocations()
5888 ext4_error_err(sb, -err, "Error %d loading buddy information for %u", in ext4_mb_discard_lg_preallocations()
5893 list_del(&pa->pa_group_list); in ext4_mb_discard_lg_preallocations()
5898 list_del(&pa->u.pa_tmp_list); in ext4_mb_discard_lg_preallocations()
5899 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); in ext4_mb_discard_lg_preallocations()
5915 struct super_block *sb = ac->ac_sb; in ext4_mb_add_n_trim()
5916 struct ext4_locality_group *lg = ac->ac_lg; in ext4_mb_add_n_trim()
5917 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; in ext4_mb_add_n_trim()
5919 order = fls(pa->pa_free) - 1; in ext4_mb_add_n_trim()
5920 if (order > PREALLOC_TB_SIZE - 1) in ext4_mb_add_n_trim()
5922 order = PREALLOC_TB_SIZE - 1; in ext4_mb_add_n_trim()
5924 spin_lock(&lg->lg_prealloc_lock); in ext4_mb_add_n_trim()
5925 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], in ext4_mb_add_n_trim()
5927 lockdep_is_held(&lg->lg_prealloc_lock)) { in ext4_mb_add_n_trim()
5928 spin_lock(&tmp_pa->pa_lock); in ext4_mb_add_n_trim()
5929 if (tmp_pa->pa_deleted) { in ext4_mb_add_n_trim()
5930 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_add_n_trim()
5933 if (!added && pa->pa_free < tmp_pa->pa_free) { in ext4_mb_add_n_trim()
5935 list_add_tail_rcu(&pa->pa_node.lg_list, in ext4_mb_add_n_trim()
5936 &tmp_pa->pa_node.lg_list); in ext4_mb_add_n_trim()
5943 spin_unlock(&tmp_pa->pa_lock); in ext4_mb_add_n_trim()
5947 list_add_tail_rcu(&pa->pa_node.lg_list, in ext4_mb_add_n_trim()
5948 &lg->lg_prealloc_list[order]); in ext4_mb_add_n_trim()
5949 spin_unlock(&lg->lg_prealloc_lock); in ext4_mb_add_n_trim()
5962 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); in ext4_mb_release_context()
5963 struct ext4_prealloc_space *pa = ac->ac_pa; in ext4_mb_release_context()
5965 if (pa->pa_type == MB_GROUP_PA) { in ext4_mb_release_context()
5967 spin_lock(&pa->pa_lock); in ext4_mb_release_context()
5968 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5969 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); in ext4_mb_release_context()
5970 pa->pa_free -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5971 pa->pa_len -= ac->ac_b_ex.fe_len; in ext4_mb_release_context()
5972 spin_unlock(&pa->pa_lock); in ext4_mb_release_context()
5980 if (likely(pa->pa_free)) { in ext4_mb_release_context()
5981 spin_lock(pa->pa_node_lock.lg_lock); in ext4_mb_release_context()
5982 list_del_rcu(&pa->pa_node.lg_list); in ext4_mb_release_context()
5983 spin_unlock(pa->pa_node_lock.lg_lock); in ext4_mb_release_context()
5988 ext4_mb_put_pa(ac, ac->ac_sb, pa); in ext4_mb_release_context()
5990 if (ac->ac_bitmap_page) in ext4_mb_release_context()
5991 put_page(ac->ac_bitmap_page); in ext4_mb_release_context()
5992 if (ac->ac_buddy_page) in ext4_mb_release_context()
5993 put_page(ac->ac_buddy_page); in ext4_mb_release_context()
5994 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) in ext4_mb_release_context()
5995 mutex_unlock(&ac->ac_lg->lg_mutex); in ext4_mb_release_context()
6014 needed -= ret; in ext4_mb_discard_preallocations()
6033 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); in ext4_mb_discard_preallocations_should_retry()
6039 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { in ext4_mb_discard_preallocations_should_retry()
6040 ac->ac_flags |= EXT4_MB_STRICT_CHECK; in ext4_mb_discard_preallocations_should_retry()
6059 struct super_block *sb = ar->inode->i_sb; in ext4_mb_new_blocks_simple()
6066 struct ext4_super_block *es = sbi->s_es; in ext4_mb_new_blocks_simple()
6068 goal = ar->goal; in ext4_mb_new_blocks_simple()
6069 if (goal < le32_to_cpu(es->s_first_data_block) || in ext4_mb_new_blocks_simple()
6071 goal = le32_to_cpu(es->s_first_data_block); in ext4_mb_new_blocks_simple()
6073 ar->len = 0; in ext4_mb_new_blocks_simple()
6075 for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { in ext4_mb_new_blocks_simple()
6084 i = mb_find_next_zero_bit(bitmap_bh->b_data, max, in ext4_mb_new_blocks_simple()
6106 *errp = -ENOSPC; in ext4_mb_new_blocks_simple()
6112 ar->len = 1; in ext4_mb_new_blocks_simple()
6135 sb = ar->inode->i_sb; in ext4_mb_new_blocks()
6139 if (sbi->s_mount_state & EXT4_FC_REPLAY) in ext4_mb_new_blocks()
6143 if (ext4_is_quota_file(ar->inode)) in ext4_mb_new_blocks()
6144 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; in ext4_mb_new_blocks()
6146 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { in ext4_mb_new_blocks()
6151 while (ar->len && in ext4_mb_new_blocks()
6152 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { in ext4_mb_new_blocks()
6156 ar->len = ar->len >> 1; in ext4_mb_new_blocks()
6158 if (!ar->len) { in ext4_mb_new_blocks()
6160 *errp = -ENOSPC; in ext4_mb_new_blocks()
6163 reserv_clstrs = ar->len; in ext4_mb_new_blocks()
6164 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { in ext4_mb_new_blocks()
6165 dquot_alloc_block_nofail(ar->inode, in ext4_mb_new_blocks()
6166 EXT4_C2B(sbi, ar->len)); in ext4_mb_new_blocks()
6168 while (ar->len && in ext4_mb_new_blocks()
6169 dquot_alloc_block(ar->inode, in ext4_mb_new_blocks()
6170 EXT4_C2B(sbi, ar->len))) { in ext4_mb_new_blocks()
6172 ar->flags |= EXT4_MB_HINT_NOPREALLOC; in ext4_mb_new_blocks()
6173 ar->len--; in ext4_mb_new_blocks()
6176 inquota = ar->len; in ext4_mb_new_blocks()
6177 if (ar->len == 0) { in ext4_mb_new_blocks()
6178 *errp = -EDQUOT; in ext4_mb_new_blocks()
6185 ar->len = 0; in ext4_mb_new_blocks()
6186 *errp = -ENOMEM; in ext4_mb_new_blocks()
6192 ac->ac_op = EXT4_MB_HISTORY_PREALLOC; in ext4_mb_new_blocks()
6195 ac->ac_op = EXT4_MB_HISTORY_ALLOC; in ext4_mb_new_blocks()
6205 * pa allocated above is added to grp->bb_prealloc_list only in ext4_mb_new_blocks()
6207 * ac->ac_status == AC_STATUS_FOUND. in ext4_mb_new_blocks()
6208 * And error from above mean ac->ac_status != AC_STATUS_FOUND in ext4_mb_new_blocks()
6216 if (ac->ac_status == AC_STATUS_FOUND && in ext4_mb_new_blocks()
6217 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) in ext4_mb_new_blocks()
6220 if (likely(ac->ac_status == AC_STATUS_FOUND)) { in ext4_mb_new_blocks()
6226 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); in ext4_mb_new_blocks()
6227 ar->len = ac->ac_b_ex.fe_len; in ext4_mb_new_blocks()
6238 *errp = -ENOSPC; in ext4_mb_new_blocks()
6243 ac->ac_b_ex.fe_len = 0; in ext4_mb_new_blocks()
6244 ar->len = 0; in ext4_mb_new_blocks()
6250 if (inquota && ar->len < inquota) in ext4_mb_new_blocks()
6251 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); in ext4_mb_new_blocks()
6252 if (!ar->len) { in ext4_mb_new_blocks()
6253 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) in ext4_mb_new_blocks()
6255 percpu_counter_sub(&sbi->s_dirtyclusters_counter, in ext4_mb_new_blocks()
6274 if ((entry->efd_tid != new_entry->efd_tid) || in ext4_try_merge_freed_extent()
6275 (entry->efd_group != new_entry->efd_group)) in ext4_try_merge_freed_extent()
6277 if (entry->efd_start_cluster + entry->efd_count == in ext4_try_merge_freed_extent()
6278 new_entry->efd_start_cluster) { in ext4_try_merge_freed_extent()
6279 new_entry->efd_start_cluster = entry->efd_start_cluster; in ext4_try_merge_freed_extent()
6280 new_entry->efd_count += entry->efd_count; in ext4_try_merge_freed_extent()
6281 } else if (new_entry->efd_start_cluster + new_entry->efd_count == in ext4_try_merge_freed_extent()
6282 entry->efd_start_cluster) { in ext4_try_merge_freed_extent()
6283 new_entry->efd_count += entry->efd_count; in ext4_try_merge_freed_extent()
6286 spin_lock(&sbi->s_md_lock); in ext4_try_merge_freed_extent()
6287 list_del(&entry->efd_list); in ext4_try_merge_freed_extent()
6288 spin_unlock(&sbi->s_md_lock); in ext4_try_merge_freed_extent()
6289 rb_erase(&entry->efd_node, entry_rb_root); in ext4_try_merge_freed_extent()
6297 ext4_group_t group = e4b->bd_group; in ext4_mb_free_metadata()
6299 ext4_grpblk_t clusters = new_entry->efd_count; in ext4_mb_free_metadata()
6301 struct ext4_group_info *db = e4b->bd_info; in ext4_mb_free_metadata()
6302 struct super_block *sb = e4b->bd_sb; in ext4_mb_free_metadata()
6304 struct rb_node **n = &db->bb_free_root.rb_node, *node; in ext4_mb_free_metadata()
6308 BUG_ON(e4b->bd_bitmap_page == NULL); in ext4_mb_free_metadata()
6309 BUG_ON(e4b->bd_buddy_page == NULL); in ext4_mb_free_metadata()
6311 new_node = &new_entry->efd_node; in ext4_mb_free_metadata()
6312 cluster = new_entry->efd_start_cluster; in ext4_mb_free_metadata()
6318 * on-disk bitmap and lose not-yet-available in ext4_mb_free_metadata()
6320 get_page(e4b->bd_buddy_page); in ext4_mb_free_metadata()
6321 get_page(e4b->bd_bitmap_page); in ext4_mb_free_metadata()
6326 if (cluster < entry->efd_start_cluster) in ext4_mb_free_metadata()
6327 n = &(*n)->rb_left; in ext4_mb_free_metadata()
6328 else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) in ext4_mb_free_metadata()
6329 n = &(*n)->rb_right; in ext4_mb_free_metadata()
6334 "Block already on to-be-freed list"); in ext4_mb_free_metadata()
6341 rb_insert_color(new_node, &db->bb_free_root); in ext4_mb_free_metadata()
6348 &(db->bb_free_root)); in ext4_mb_free_metadata()
6355 &(db->bb_free_root)); in ext4_mb_free_metadata()
6358 spin_lock(&sbi->s_md_lock); in ext4_mb_free_metadata()
6359 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]); in ext4_mb_free_metadata()
6360 sbi->s_mb_free_pending += clusters; in ext4_mb_free_metadata()
6361 spin_unlock(&sbi->s_md_lock); in ext4_mb_free_metadata()
6367 struct super_block *sb = inode->i_sb; in ext4_free_blocks_simple()
6379 * ext4_mb_clear_bb() -- helper function for freeing blocks.
6391 struct super_block *sb = inode->i_sb; in ext4_mb_clear_bb()
6407 ext4_error(sb, "Freeing blocks in system zone - " in ext4_mb_clear_bb()
6427 overflow = EXT4_C2B(sbi, bit) + count - in ext4_mb_clear_bb()
6429 count -= overflow; in ext4_mb_clear_bb()
6444 ext4_error(sb, "Freeing blocks in system zone - " in ext4_mb_clear_bb()
6480 new_entry->efd_start_cluster = bit; in ext4_mb_clear_bb()
6481 new_entry->efd_group = block_group; in ext4_mb_clear_bb()
6482 new_entry->efd_count = count_clusters; in ext4_mb_clear_bb()
6483 new_entry->efd_tid = handle->h_transaction->t_tid; in ext4_mb_clear_bb()
6491 if (err && err != -EOPNOTSUPP) in ext4_mb_clear_bb()
6513 percpu_counter_add(&sbi->s_freeclusters_counter, in ext4_mb_clear_bb()
6533 * ext4_free_blocks() -- Free given blocks and update quota
6545 struct super_block *sb = inode->i_sb; in ext4_free_blocks()
6553 BUG_ON(block != bh->b_blocknr); in ext4_free_blocks()
6555 block = bh->b_blocknr; in ext4_free_blocks()
6558 if (sbi->s_mount_state & EXT4_FC_REPLAY) { in ext4_free_blocks()
6567 ext4_error(sb, "Freeing blocks not in datazone - " in ext4_free_blocks()
6593 overflow = sbi->s_cluster_ratio - overflow; in ext4_free_blocks()
6596 count -= overflow; in ext4_free_blocks()
6600 block -= overflow; in ext4_free_blocks()
6610 count -= overflow; in ext4_free_blocks()
6614 count += sbi->s_cluster_ratio - overflow; in ext4_free_blocks()
6626 bh = sb_find_get_block(inode->i_sb, block + i); in ext4_free_blocks()
6635 * ext4_group_add_blocks() -- Add given blocks to an existing group
6652 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); in ext4_group_add_blocks()
6653 unsigned long cluster_count = last_cluster - first_cluster + 1; in ext4_group_add_blocks()
6656 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); in ext4_group_add_blocks()
6669 err = -EINVAL; in ext4_group_add_blocks()
6678 ext4_error(sb, "Adding blocks in system zones - " in ext4_group_add_blocks()
6681 err = -EINVAL; in ext4_group_add_blocks()
6697 percpu_counter_add(&sbi->s_freeclusters_counter, in ext4_group_add_blocks()
6708 * ext4_trim_extent -- function to TRIM one single free extent in the group
6724 ext4_group_t group = e4b->bd_group; in ext4_trim_extent()
6752 if (grp < (ext4_get_groups_count(sb) - 1)) in ext4_last_grp_cluster()
6755 nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - in ext4_last_grp_cluster()
6759 return nr_clusters_in_group - 1; in ext4_last_grp_cluster()
6770 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) in ext4_try_to_trim_range()
6771 __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) in ext4_try_to_trim_range()
6777 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) in ext4_try_to_trim_range()
6780 last = ext4_last_grp_cluster(sb, e4b->bd_group); in ext4_try_to_trim_range()
6781 bitmap = e4b->bd_bitmap; in ext4_try_to_trim_range()
6785 start = max(e4b->bd_info->bb_first_free, start); in ext4_try_to_trim_range()
6798 if ((next - start) >= minblocks) { in ext4_try_to_trim_range()
6799 int ret = ext4_trim_extent(sb, start, next - start, e4b); in ext4_try_to_trim_range()
6801 if (ret && ret != -EOPNOTSUPP) in ext4_try_to_trim_range()
6803 count += next - start; in ext4_try_to_trim_range()
6805 free_count += next - start; in ext4_try_to_trim_range()
6812 ext4_unlock_group(sb, e4b->bd_group); in ext4_try_to_trim_range()
6814 ext4_lock_group(sb, e4b->bd_group); in ext4_try_to_trim_range()
6817 if ((e4b->bd_info->bb_free - free_count) < minblocks) in ext4_try_to_trim_range()
6822 EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); in ext4_try_to_trim_range()
6828 * ext4_trim_all_free -- function to trim all free space in alloc. group
6860 minblocks < EXT4_SB(sb)->s_last_trim_minblks) in ext4_trim_all_free()
6875 * ext4_trim_fs() -- trim ioctl handle function
6888 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); in ext4_trim_fs()
6894 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); in ext4_trim_fs()
6895 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); in ext4_trim_fs()
6898 start = range->start >> sb->s_blocksize_bits; in ext4_trim_fs()
6899 end = start + (range->len >> sb->s_blocksize_bits) - 1; in ext4_trim_fs()
6901 range->minlen >> sb->s_blocksize_bits); in ext4_trim_fs()
6905 range->len < sb->s_blocksize) in ext4_trim_fs()
6906 return -EINVAL; in ext4_trim_fs()
6908 if (range->minlen < discard_granularity) { in ext4_trim_fs()
6910 discard_granularity >> sb->s_blocksize_bits); in ext4_trim_fs()
6914 if (end >= max_blks - 1) in ext4_trim_fs()
6915 end = max_blks - 1; in ext4_trim_fs()
6928 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_trim_fs()
6945 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to in ext4_trim_fs()
6951 if (grp->bb_free >= minlen) { in ext4_trim_fs()
6969 EXT4_SB(sb)->s_last_trim_minblks = minlen; in ext4_trim_fs()
6972 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; in ext4_trim_fs()
6998 start = max(e4b.bd_info->bb_first_free, start); in ext4_mballoc_query_range()
7000 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; in ext4_mballoc_query_range()
7009 error = formatter(sb, group, start, next - start, priv); in ext4_mballoc_query_range()
7025 #include "mballoc-test.c"