Lines Matching +full:9 +full:k
92 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, in should_promote() argument
102 if (bch2_bkey_has_target(c, k, opts.promote_target)) in should_promote()
105 if (bkey_extent_is_unwritten(k)) in should_promote()
161 struct bkey_s_c k, in __promote_alloc() argument
201 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) { in __promote_alloc()
227 btree_id, k); in __promote_alloc()
254 struct bkey_s_c k, in promote_alloc() argument
269 ? bkey_start_pos(k.k) in promote_alloc()
270 : POS(k.k->p.inode, iter.bi_sector); in promote_alloc()
274 ret = should_promote(c, k, pos, opts, flags); in promote_alloc()
279 k.k->type == KEY_TYPE_reflink_v in promote_alloc()
282 k, pos, pick, opts, sectors, rbio); in promote_alloc()
372 struct bkey_s_c k; in bch2_read_retry_nodecode() local
385 k = bch2_btree_iter_peek_slot(&iter); in bch2_read_retry_nodecode()
386 if (bkey_err(k)) in bch2_read_retry_nodecode()
389 bch2_bkey_buf_reassemble(&sk, c, k); in bch2_read_retry_nodecode()
390 k = bkey_i_to_s_c(sk.k); in bch2_read_retry_nodecode()
393 if (!bch2_bkey_matches_ptr(c, k, in bch2_read_retry_nodecode()
405 k, 0, failed, flags); in bch2_read_retry_nodecode()
483 struct bkey_s_c k; in __bch2_rbio_narrow_crcs() local
489 k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, in __bch2_rbio_narrow_crcs()
491 if ((ret = bkey_err(k))) in __bch2_rbio_narrow_crcs()
494 if (bversion_cmp(k.k->version, rbio->version) || in __bch2_rbio_narrow_crcs()
495 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) in __bch2_rbio_narrow_crcs()
499 if (bkey_start_offset(k.k) < data_offset || in __bch2_rbio_narrow_crcs()
500 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) in __bch2_rbio_narrow_crcs()
505 bkey_start_offset(k.k) - data_offset, k.k->size, in __bch2_rbio_narrow_crcs()
515 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + in __bch2_rbio_narrow_crcs()
520 bkey_reassemble(new, k); in __bch2_rbio_narrow_crcs()
558 src->bi_iter.bi_size = crc.compressed_size << 9; in __bch2_read_endio()
596 nonce = nonce_add(nonce, crc.offset << 9); in __bch2_read_endio()
597 bio_advance(src, crc.offset << 9); in __bch2_read_endio()
652 rbio->read_pos.offset << 9, in __bch2_read_endio()
661 rbio->read_pos.offset << 9, in __bch2_read_endio()
667 rbio->read_pos.offset << 9, in __bch2_read_endio()
726 struct bkey_s_c k; in __bch2_read_indirect_extent() local
730 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) + in __bch2_read_indirect_extent()
733 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink, in __bch2_read_indirect_extent()
735 ret = bkey_err(k); in __bch2_read_indirect_extent()
739 if (k.k->type != KEY_TYPE_reflink_v && in __bch2_read_indirect_extent()
740 k.k->type != KEY_TYPE_indirect_inline_data) { in __bch2_read_indirect_extent()
742 orig_k->k->k.p.inode, in __bch2_read_indirect_extent()
743 orig_k->k->k.p.offset << 9, in __bch2_read_indirect_extent()
745 orig_k->k->k.p.offset, in __bch2_read_indirect_extent()
746 orig_k->k->k.size, in __bch2_read_indirect_extent()
753 *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); in __bch2_read_indirect_extent()
754 bch2_bkey_buf_reassemble(orig_k, trans->c, k); in __bch2_read_indirect_extent()
761 struct bkey_s_c k, in read_from_stale_dirty_pointer() argument
778 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
783 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); in read_from_stale_dirty_pointer()
786 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
797 enum btree_id data_btree, struct bkey_s_c k, in __bch2_read_extent() argument
807 struct bpos data_pos = bkey_start_pos(k.k); in __bch2_read_extent()
810 if (bkey_extent_is_inline_data(k.k)) { in __bch2_read_extent()
812 bkey_inline_data_bytes(k.k)); in __bch2_read_extent()
815 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k)); in __bch2_read_extent()
822 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick); in __bch2_read_extent()
830 read_pos.inode, read_pos.offset << 9, in __bch2_read_extent()
846 read_from_stale_dirty_pointer(trans, k, pick.ptr); in __bch2_read_extent()
865 iter.bi_size = pick.crc.compressed_size << 9; in __bch2_read_extent()
874 bch2_can_narrow_extent_crcs(k, pick.crc); in __bch2_read_extent()
879 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size); in __bch2_read_extent()
892 promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags, in __bch2_read_extent()
921 pick.crc.compressed_size << 9); in __bch2_read_extent()
923 pick.crc.compressed_size << 9; in __bch2_read_extent()
934 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); in __bch2_read_extent()
974 rbio->devs_have = bch2_bkey_devs(k); in __bch2_read_extent()
980 rbio->version = k.k->version; in __bch2_read_extent()
1011 read_pos.offset << 9, in __bch2_read_extent()
1099 struct bkey_s_c k; in __bch2_read() local
1132 k = bch2_btree_iter_peek_slot(&iter); in __bch2_read()
1133 ret = bkey_err(k); in __bch2_read()
1138 bkey_start_offset(k.k); in __bch2_read()
1139 sectors = k.k->size - offset_into_extent; in __bch2_read()
1141 bch2_bkey_buf_reassemble(&sk, c, k); in __bch2_read()
1148 k = bkey_i_to_s_c(sk.k); in __bch2_read()
1154 sectors = min(sectors, k.k->size - offset_into_extent); in __bch2_read()
1156 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; in __bch2_read()
1163 data_btree, k, in __bch2_read()
1191 bvec_iter.bi_sector << 9, in __bch2_read()