Lines Matching defs:bh

53  * by the bh lock. No more than a single bh lock is held at any time
69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
81 # define ea_bdebug(bh, f...) do { \
83 bh->b_bdev, (unsigned long) bh->b_blocknr); \
89 # define ea_bdebug(bh, f...) no_printk(f)
199 struct buffer_head *bh = NULL;
220 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
222 if (!bh)
224 ea_bdebug(bh, "b_count=%d, refcount=%d",
225 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
226 end = bh->b_data + bh->b_size;
227 if (!ext2_xattr_header_valid(HDR(bh))) {
237 entry = FIRST_ENTRY(bh);
252 if (ext2_xattr_cache_insert(ea_block_cache, bh))
258 if (ext2_xattr_cache_insert(ea_block_cache, bh))
265 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
271 brelse(bh);
291 struct buffer_head *bh = NULL;
306 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
308 if (!bh)
310 ea_bdebug(bh, "b_count=%d, refcount=%d",
311 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
312 end = bh->b_data + bh->b_size;
313 if (!ext2_xattr_header_valid(HDR(bh))) {
323 entry = FIRST_ENTRY(bh);
330 if (ext2_xattr_cache_insert(ea_block_cache, bh))
334 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
360 brelse(bh);
410 struct buffer_head *bh = NULL;
418 * header -- Points either into bh, or to a temporarily
445 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
447 if (!bh)
449 ea_bdebug(bh, "b_count=%d, refcount=%d",
450 atomic_read(&(bh->b_count)),
451 le32_to_cpu(HDR(bh)->h_refcount));
452 header = HDR(bh);
453 end = bh->b_data + bh->b_size;
467 last = FIRST_ENTRY(bh);
521 lock_buffer(bh);
527 hash, bh->b_blocknr);
529 ea_bdebug(bh, "modifying in-place");
537 unlock_buffer(bh);
538 ea_bdebug(bh, "cloning");
539 header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
545 offset = (char *)here - bh->b_data;
547 offset = (char *)last - bh->b_data;
561 /* Iff we are modifying the block in-place, bh is locked here. */
634 if (bh && header == HDR(bh))
635 unlock_buffer(bh); /* we were modifying in-place. */
636 error = ext2_xattr_set2(inode, bh, NULL);
639 if (bh && header == HDR(bh))
640 unlock_buffer(bh); /* we were modifying in-place. */
641 error = ext2_xattr_set2(inode, bh, header);
645 if (!(bh && header == HDR(bh)))
647 brelse(bh);
654 struct buffer_head *bh)
659 lock_buffer(bh);
660 if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
661 __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
669 bh->b_blocknr);
675 unlock_buffer(bh);
682 ea_bdebug(bh, "freeing");
683 ext2_free_blocks(inode, bh->b_blocknr, 1);
684 /* We let our caller release bh, so we
686 get_bh(bh);
687 bforget(bh);
688 unlock_buffer(bh);
691 le32_add_cpu(&HDR(bh)->h_refcount, -1);
693 mark_buffer_dirty(bh);
694 unlock_buffer(bh);
695 ea_bdebug(bh, "refcount now=%d",
696 le32_to_cpu(HDR(bh)->h_refcount));
698 sync_dirty_buffer(bh);
819 struct buffer_head *bh = NULL;
841 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
842 if (!bh) {
848 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
849 if (!ext2_xattr_header_valid(HDR(bh))) {
855 ext2_xattr_release_block(inode, bh);
859 brelse(bh);
872 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
874 __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
877 error = mb_cache_entry_create(cache, GFP_KERNEL, hash, bh->b_blocknr,
881 ea_bdebug(bh, "already in cache");
885 ea_bdebug(bh, "inserting [%x]", (int)hash);
950 struct buffer_head *bh;
952 bh = sb_bread(inode->i_sb, ce->e_value);
953 if (!bh) {
958 lock_buffer(bh);
959 if (le32_to_cpu(HDR(bh)->h_refcount) >
963 le32_to_cpu(HDR(bh)->h_refcount),
965 } else if (!ext2_xattr_cmp(header, HDR(bh))) {
966 ea_bdebug(bh, "b_count=%d",
967 atomic_read(&(bh->b_count)));
970 return bh;
972 unlock_buffer(bh);
973 brelse(bh);