Lines Matching +full:9 +full:k
93 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, in should_promote() argument
105 if (bch2_bkey_has_target(c, k, opts.promote_target)) in should_promote()
108 if (bkey_extent_is_unwritten(k)) in should_promote()
165 struct bkey_s_c k, in __promote_alloc() argument
206 if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) { in __promote_alloc()
233 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); in __promote_alloc()
246 btree_id, k); in __promote_alloc()
274 struct bkey_s_c k, in promote_alloc() argument
296 ? bkey_start_pos(k.k) in promote_alloc()
297 : POS(k.k->p.inode, iter.bi_sector); in promote_alloc()
301 ret = should_promote(c, k, pos, opts, flags, failed); in promote_alloc()
306 k.k->type == KEY_TYPE_reflink_v in promote_alloc()
309 k, pos, pick, opts, sectors, rbio, failed); in promote_alloc()
399 struct bkey_s_c k; in bch2_read_retry_nodecode() local
413 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); in bch2_read_retry_nodecode()
417 bch2_bkey_buf_reassemble(&sk, c, k); in bch2_read_retry_nodecode()
418 k = bkey_i_to_s_c(sk.k); in bch2_read_retry_nodecode()
420 if (!bch2_bkey_matches_ptr(c, k, in bch2_read_retry_nodecode()
432 k, 0, failed, flags); in bch2_read_retry_nodecode()
510 struct bkey_s_c k; in __bch2_rbio_narrow_crcs() local
516 k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, in __bch2_rbio_narrow_crcs()
518 if ((ret = bkey_err(k))) in __bch2_rbio_narrow_crcs()
521 if (bversion_cmp(k.k->bversion, rbio->version) || in __bch2_rbio_narrow_crcs()
522 !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) in __bch2_rbio_narrow_crcs()
526 if (bkey_start_offset(k.k) < data_offset || in __bch2_rbio_narrow_crcs()
527 k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) in __bch2_rbio_narrow_crcs()
532 bkey_start_offset(k.k) - data_offset, k.k->size, in __bch2_rbio_narrow_crcs()
542 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + in __bch2_rbio_narrow_crcs()
547 bkey_reassemble(new, k); in __bch2_rbio_narrow_crcs()
584 src->bi_iter.bi_size = crc.compressed_size << 9; in __bch2_read_endio()
622 nonce = nonce_add(nonce, crc.offset << 9); in __bch2_read_endio()
623 bio_advance(src, crc.offset << 9); in __bch2_read_endio()
680 rbio->read_pos.offset << 9, in __bch2_read_endio()
689 rbio->read_pos.offset << 9, in __bch2_read_endio()
695 rbio->read_pos.offset << 9, in __bch2_read_endio()
758 struct bkey_s_c k; in __bch2_read_indirect_extent() local
762 reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) + in __bch2_read_indirect_extent()
765 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink, in __bch2_read_indirect_extent()
767 ret = bkey_err(k); in __bch2_read_indirect_extent()
771 if (k.k->type != KEY_TYPE_reflink_v && in __bch2_read_indirect_extent()
772 k.k->type != KEY_TYPE_indirect_inline_data) { in __bch2_read_indirect_extent()
774 orig_k->k->k.p.inode, in __bch2_read_indirect_extent()
775 orig_k->k->k.p.offset << 9, in __bch2_read_indirect_extent()
777 orig_k->k->k.p.offset, in __bch2_read_indirect_extent()
778 orig_k->k->k.size, in __bch2_read_indirect_extent()
785 *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); in __bch2_read_indirect_extent()
786 bch2_bkey_buf_reassemble(orig_k, trans->c, k); in __bch2_read_indirect_extent()
794 struct bkey_s_c k, in read_from_stale_dirty_pointer() argument
811 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
816 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); in read_from_stale_dirty_pointer()
819 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
829 bch2_bkey_val_to_text(&buf, c, k); in read_from_stale_dirty_pointer()
841 enum btree_id data_btree, struct bkey_s_c k, in __bch2_read_extent() argument
850 struct bpos data_pos = bkey_start_pos(k.k); in __bch2_read_extent()
853 if (bkey_extent_is_inline_data(k.k)) { in __bch2_read_extent()
855 bkey_inline_data_bytes(k.k)); in __bch2_read_extent()
858 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k)); in __bch2_read_extent()
865 pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick); in __bch2_read_extent()
873 bch2_bkey_val_to_text(&buf, c, k); in __bch2_read_extent()
876 read_pos.inode, read_pos.offset << 9, in __bch2_read_extent()
896 read_from_stale_dirty_pointer(trans, ca, k, pick.ptr); in __bch2_read_extent()
919 iter.bi_size = pick.crc.compressed_size << 9; in __bch2_read_extent()
928 bch2_can_narrow_extent_crcs(k, pick.crc); in __bch2_read_extent()
933 EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size); in __bch2_read_extent()
946 promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags, in __bch2_read_extent()
975 pick.crc.compressed_size << 9); in __bch2_read_extent()
977 pick.crc.compressed_size << 9; in __bch2_read_extent()
988 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); in __bch2_read_extent()
1028 rbio->devs_have = bch2_bkey_devs(k); in __bch2_read_extent()
1034 rbio->version = k.k->bversion; in __bch2_read_extent()
1068 read_pos.offset << 9, in __bch2_read_extent()
1095 if (bch2_ec_read_extent(trans, rbio, k)) { in __bch2_read_extent()
1156 struct bkey_s_c k; in __bch2_read() local
1182 k = bch2_btree_iter_peek_slot(&iter); in __bch2_read()
1183 ret = bkey_err(k); in __bch2_read()
1188 bkey_start_offset(k.k); in __bch2_read()
1189 sectors = k.k->size - offset_into_extent; in __bch2_read()
1191 bch2_bkey_buf_reassemble(&sk, c, k); in __bch2_read()
1198 k = bkey_i_to_s_c(sk.k); in __bch2_read()
1204 sectors = min(sectors, k.k->size - offset_into_extent); in __bch2_read()
1206 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; in __bch2_read()
1213 data_btree, k, in __bch2_read()
1237 bvec_iter.bi_sector << 9, in __bch2_read()