Lines Matching refs:pa
372 * - per-pa lock (pa)
377 * - new pa
381 * - find and use pa:
382 * pa
384 * - release consumed pa:
385 * pa
391 * pa
395 * pa
400 * pa
769 struct ext4_prealloc_space *pa;
770 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
771 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
773 for (i = 0; i < pa->pa_len; i++)
3829 struct ext4_prealloc_space *pa;
3834 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3835 list_del(&pa->pa_group_list);
3837 kmem_cache_free(ext4_pspace_cachep, pa);
4613 * In case of inode pa, later we use the allocated blocks
4701 struct ext4_prealloc_space *pa = ac->ac_pa;
4705 if (pa == NULL) {
4724 if (pa->pa_type == MB_INODE_PA) {
4725 spin_lock(&pa->pa_lock);
4726 pa->pa_free += ac->ac_b_ex.fe_len;
4727 spin_unlock(&pa->pa_lock);
4735 struct ext4_prealloc_space *pa)
4743 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4744 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4751 ac->ac_pa = pa;
4753 BUG_ON(start < pa->pa_pstart);
4754 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4755 BUG_ON(pa->pa_free < len);
4757 pa->pa_free -= len;
4759 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4766 struct ext4_prealloc_space *pa)
4770 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4775 ac->ac_pa = pa;
4779 * instead we correct pa later, after blocks are marked
4781 * Other CPUs are prevented from allocating from this pa by lg_mutex
4783 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4784 pa->pa_lstart, len, pa);
4795 struct ext4_prealloc_space *pa,
4801 atomic_inc(&pa->pa_count);
4802 return pa;
4805 new_distance = abs(goal_block - pa->pa_pstart);
4812 atomic_inc(&pa->pa_count);
4813 return pa;
4817 * check if found pa meets EXT4_MB_HINT_GOAL_ONLY
4821 struct ext4_prealloc_space *pa)
4835 start = pa->pa_pstart +
4836 (ac->ac_g_ex.fe_logical - pa->pa_lstart);
4840 if (ac->ac_g_ex.fe_len > pa->pa_len -
4841 EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart))
4866 * first, try per-file preallocation by searching the inode pa rbtree.
4869 * ext4_mb_discard_group_preallocation() can paralelly mark the pa
4879 * Step 1: Find a pa with logical start immediately adjacent to the
4892 * Step 2: The adjacent pa might be to the right of logical start, find
4893 * the left adjacent pa. After this step we'd have a valid tmp_pa whose
4905 * If there is no adjacent pa to the left then finding
4906 * an overlapping pa is not possible hence stop searching
4907 * inode pa tree
4916 * Step 3: If the left adjacent pa is deleted, keep moving left to find
4917 * the first non deleted adjacent pa. After this step we should have a
4923 * no non deleted left adjacent pa, so stop searching
4924 * inode pa tree
4935 * to delete this pa underneath us. Since group
4949 * Step 4: We now have the non deleted left adjacent pa. Only this
4950 * pa can possibly satisfy the request hence check if it overlaps
4978 * We found a valid overlapping pa but couldn't use it because
4982 * 1. When a new inode pa is added to rbtree it must have
4986 * 2. An inode pa that is in the rbtree can only have it's
5001 * sure that another process will never see a pa in rbtree with
5060 struct ext4_prealloc_space *pa;
5079 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
5080 spin_lock(&pa->pa_lock);
5081 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5083 len = pa->pa_len;
5084 spin_unlock(&pa->pa_lock);
5095 struct ext4_prealloc_space *pa)
5099 if (pa->pa_deleted) {
5100 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
5101 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
5102 pa->pa_len);
5106 pa->pa_deleted = 1;
5108 if (pa->pa_type == MB_INODE_PA) {
5109 ei = EXT4_I(pa->pa_inode);
5114 static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa)
5116 BUG_ON(!pa);
5117 BUG_ON(atomic_read(&pa->pa_count));
5118 BUG_ON(pa->pa_deleted == 0);
5119 kmem_cache_free(ext4_pspace_cachep, pa);
5124 struct ext4_prealloc_space *pa;
5126 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
5127 ext4_mb_pa_free(pa);
5135 struct super_block *sb, struct ext4_prealloc_space *pa)
5142 spin_lock(&pa->pa_lock);
5143 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
5144 spin_unlock(&pa->pa_lock);
5148 if (pa->pa_deleted == 1) {
5149 spin_unlock(&pa->pa_lock);
5153 ext4_mb_mark_pa_deleted(sb, pa);
5154 spin_unlock(&pa->pa_lock);
5156 grp_blk = pa->pa_pstart;
5159 * next group when pa is used up
5161 if (pa->pa_type == MB_GROUP_PA)
5181 list_del(&pa->pa_group_list);
5184 if (pa->pa_type == MB_INODE_PA) {
5185 write_lock(pa->pa_node_lock.inode_lock);
5186 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5187 write_unlock(pa->pa_node_lock.inode_lock);
5188 ext4_mb_pa_free(pa);
5190 spin_lock(pa->pa_node_lock.lg_lock);
5191 list_del_rcu(&pa->pa_node.lg_list);
5192 spin_unlock(pa->pa_node_lock.lg_lock);
5193 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5230 struct ext4_prealloc_space *pa;
5240 pa = ac->ac_pa;
5290 pa->pa_lstart = ac->ac_b_ex.fe_logical;
5291 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5292 pa->pa_len = ac->ac_b_ex.fe_len;
5293 pa->pa_free = pa->pa_len;
5294 spin_lock_init(&pa->pa_lock);
5295 INIT_LIST_HEAD(&pa->pa_group_list);
5296 pa->pa_deleted = 0;
5297 pa->pa_type = MB_INODE_PA;
5299 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5300 pa->pa_len, pa->pa_lstart);
5301 trace_ext4_mb_new_inode_pa(ac, pa);
5303 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
5304 ext4_mb_use_inode_pa(ac, pa);
5311 pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
5312 pa->pa_inode = ac->ac_inode;
5314 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5316 write_lock(pa->pa_node_lock.inode_lock);
5317 ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node);
5318 write_unlock(pa->pa_node_lock.inode_lock);
5330 struct ext4_prealloc_space *pa;
5339 pa = ac->ac_pa;
5341 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5342 pa->pa_lstart = pa->pa_pstart;
5343 pa->pa_len = ac->ac_b_ex.fe_len;
5344 pa->pa_free = pa->pa_len;
5345 spin_lock_init(&pa->pa_lock);
5346 INIT_LIST_HEAD(&pa->pa_node.lg_list);
5347 INIT_LIST_HEAD(&pa->pa_group_list);
5348 pa->pa_deleted = 0;
5349 pa->pa_type = MB_GROUP_PA;
5351 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
5352 pa->pa_len, pa->pa_lstart);
5353 trace_ext4_mb_new_group_pa(ac, pa);
5355 ext4_mb_use_group_pa(ac, pa);
5356 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
5364 pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock;
5365 pa->pa_inode = NULL;
5367 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
5370 * We will later add the new pa to the right bucket
5386 * @pa must be unlinked from inode and group lists, so that
5393 struct ext4_prealloc_space *pa)
5404 BUG_ON(pa->pa_deleted == 0);
5405 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5406 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
5407 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
5408 end = bit + pa->pa_len;
5421 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
5424 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
5427 if (free != pa->pa_free) {
5429 "pa %p: logic %lu, phys. %lu, len %d",
5430 pa, (unsigned long) pa->pa_lstart,
5431 (unsigned long) pa->pa_pstart,
5432 pa->pa_len);
5434 free, pa->pa_free);
5436 * pa is already deleted so we use the value obtained
5445 struct ext4_prealloc_space *pa)
5451 trace_ext4_mb_release_group_pa(sb, pa);
5452 BUG_ON(pa->pa_deleted == 0);
5453 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
5454 if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
5456 e4b->bd_group, group, pa->pa_pstart);
5459 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
5460 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
5461 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
5479 struct ext4_prealloc_space *pa, *tmp;
5510 list_for_each_entry_safe(pa, tmp,
5512 spin_lock(&pa->pa_lock);
5513 if (atomic_read(&pa->pa_count)) {
5514 spin_unlock(&pa->pa_lock);
5518 if (pa->pa_deleted) {
5519 spin_unlock(&pa->pa_lock);
5524 ext4_mb_mark_pa_deleted(sb, pa);
5530 free += pa->pa_free;
5532 spin_unlock(&pa->pa_lock);
5534 list_del(&pa->pa_group_list);
5535 list_add(&pa->u.pa_tmp_list, &list);
5539 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5542 if (pa->pa_type == MB_GROUP_PA) {
5543 spin_lock(pa->pa_node_lock.lg_lock);
5544 list_del_rcu(&pa->pa_node.lg_list);
5545 spin_unlock(pa->pa_node_lock.lg_lock);
5547 write_lock(pa->pa_node_lock.inode_lock);
5548 ei = EXT4_I(pa->pa_inode);
5549 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5550 write_unlock(pa->pa_node_lock.inode_lock);
5553 list_del(&pa->u.pa_tmp_list);
5555 if (pa->pa_type == MB_GROUP_PA) {
5556 ext4_mb_release_group_pa(&e4b, pa);
5557 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5559 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5560 ext4_mb_pa_free(pa);
5587 struct ext4_prealloc_space *pa, *tmp;
5606 /* first, collect all pa's in the inode */
5610 pa = rb_entry(iter, struct ext4_prealloc_space,
5612 BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock);
5614 spin_lock(&pa->pa_lock);
5615 if (atomic_read(&pa->pa_count)) {
5618 spin_unlock(&pa->pa_lock);
5621 "uh-oh! used pa while discarding");
5627 if (pa->pa_deleted == 0) {
5628 ext4_mb_mark_pa_deleted(sb, pa);
5629 spin_unlock(&pa->pa_lock);
5630 rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node);
5631 list_add(&pa->u.pa_tmp_list, &list);
5635 /* someone is deleting pa right now */
5636 spin_unlock(&pa->pa_lock);
5640 * doesn't mean pa is already unlinked from
5644 * pa from inode's list may access already
5656 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5657 BUG_ON(pa->pa_type != MB_INODE_PA);
5658 group = ext4_get_group_number(sb, pa->pa_pstart);
5678 list_del(&pa->pa_group_list);
5679 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5685 list_del(&pa->u.pa_tmp_list);
5686 ext4_mb_pa_free(pa);
5692 struct ext4_prealloc_space *pa;
5695 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5696 if (!pa)
5698 atomic_set(&pa->pa_count, 1);
5699 ac->ac_pa = pa;
5705 struct ext4_prealloc_space *pa = ac->ac_pa;
5707 BUG_ON(!pa);
5709 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5715 pa->pa_deleted = 1;
5716 ext4_mb_pa_free(pa);
5731 struct ext4_prealloc_space *pa;
5739 pa = list_entry(cur, struct ext4_prealloc_space,
5741 spin_lock(&pa->pa_lock);
5742 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5744 spin_unlock(&pa->pa_lock);
5746 pa->pa_len);
5782 mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa));
5785 "group pa" : "inode pa");
5917 struct ext4_prealloc_space *pa, *tmp;
5922 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5925 spin_lock(&pa->pa_lock);
5926 if (atomic_read(&pa->pa_count)) {
5928 * This is the pa that we just used
5932 spin_unlock(&pa->pa_lock);
5935 if (pa->pa_deleted) {
5936 spin_unlock(&pa->pa_lock);
5940 BUG_ON(pa->pa_type != MB_GROUP_PA);
5943 ext4_mb_mark_pa_deleted(sb, pa);
5944 spin_unlock(&pa->pa_lock);
5946 list_del_rcu(&pa->pa_node.lg_list);
5947 list_add(&pa->u.pa_tmp_list, &discard_list);
5962 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5965 group = ext4_get_group_number(sb, pa->pa_pstart);
5974 list_del(&pa->pa_group_list);
5975 ext4_mb_release_group_pa(&e4b, pa);
5979 list_del(&pa->u.pa_tmp_list);
5980 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5998 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
6000 order = fls(pa->pa_free) - 1;
6014 if (!added && pa->pa_free < tmp_pa->pa_free) {
6016 list_add_tail_rcu(&pa->pa_node.lg_list,
6028 list_add_tail_rcu(&pa->pa_node.lg_list,
6044 struct ext4_prealloc_space *pa = ac->ac_pa;
6045 if (pa) {
6046 if (pa->pa_type == MB_GROUP_PA) {
6048 spin_lock(&pa->pa_lock);
6049 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6050 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
6051 pa->pa_free -= ac->ac_b_ex.fe_len;
6052 pa->pa_len -= ac->ac_b_ex.fe_len;
6053 spin_unlock(&pa->pa_lock);
6056 * We want to add the pa to the right bucket.
6061 if (likely(pa->pa_free)) {
6062 spin_lock(pa->pa_node_lock.lg_lock);
6063 list_del_rcu(&pa->pa_node.lg_list);
6064 spin_unlock(pa->pa_node_lock.lg_lock);
6069 ext4_mb_put_pa(ac, ac->ac_sb, pa);
6287 * pa allocated above is added to grp->bb_prealloc_list only
6291 * So we have to free this pa here itself.
6316 * If block allocation fails then the pa allocated above