Lines Matching full:k

88 				 struct bkey_s_c k)  in bch2_alloc_unpack_v1()  argument
90 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v; in bch2_alloc_unpack_v1()
102 struct bkey_s_c k) in bch2_alloc_unpack_v2() argument
104 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k); in bch2_alloc_unpack_v2()
135 struct bkey_s_c k) in bch2_alloc_unpack_v3() argument
137 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k); in bch2_alloc_unpack_v3()
170 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) in bch2_alloc_unpack() argument
174 switch (k.k->type) { in bch2_alloc_unpack()
176 bch2_alloc_unpack_v1(&ret, k); in bch2_alloc_unpack()
179 bch2_alloc_unpack_v2(&ret, k); in bch2_alloc_unpack()
182 bch2_alloc_unpack_v3(&ret, k); in bch2_alloc_unpack()
200 int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v1_validate() argument
203 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); in bch2_alloc_v1_validate()
207 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), in bch2_alloc_v1_validate()
210 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); in bch2_alloc_v1_validate()
215 int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v2_validate() argument
221 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), in bch2_alloc_v2_validate()
228 int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v3_validate() argument
234 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), in bch2_alloc_v3_validate()
241 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v4_validate() argument
247 bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k)); in bch2_alloc_v4_validate()
249 bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k), in bch2_alloc_v4_validate()
252 alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k)); in bch2_alloc_v4_validate()
322 void bch2_alloc_v4_swab(struct bkey_s k) in bch2_alloc_v4_swab() argument
324 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; in bch2_alloc_v4_swab()
345 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) in bch2_alloc_to_text() argument
348 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); in bch2_alloc_to_text()
349 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text()
376 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) in __bch2_alloc_to_v4() argument
378 if (k.k->type == KEY_TYPE_alloc_v4) { in __bch2_alloc_to_v4()
381 *out = *bkey_s_c_to_alloc_v4(k).v; in __bch2_alloc_to_v4()
392 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); in __bch2_alloc_to_v4()
413 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) in __bch2_alloc_to_v4_mut() argument
417 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4))); in __bch2_alloc_to_v4_mut()
421 if (k.k->type == KEY_TYPE_alloc_v4) { in __bch2_alloc_to_v4_mut()
424 bkey_reassemble(&ret->k_i, k); in __bch2_alloc_to_v4_mut()
437 ret->k.p = k.k->p; in __bch2_alloc_to_v4_mut()
438 bch2_alloc_to_v4(k, &ret->v); in __bch2_alloc_to_v4_mut()
443 … struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k) in bch2_alloc_to_v4_mut_inlined() argument
447 if (likely(k.k->type == KEY_TYPE_alloc_v4) && in bch2_alloc_to_v4_mut_inlined()
448 ((a = bkey_s_c_to_alloc_v4(k), true) && in bch2_alloc_to_v4_mut_inlined()
450 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4); in bch2_alloc_to_v4_mut_inlined()
452 return __bch2_alloc_to_v4_mut(trans, k); in bch2_alloc_to_v4_mut_inlined()
455 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) in bch2_alloc_to_v4_mut() argument
457 return bch2_alloc_to_v4_mut_inlined(trans, k); in bch2_alloc_to_v4_mut()
464 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos, in bch2_trans_start_alloc_update_noupdate() local
468 int ret = bkey_err(k); in bch2_trans_start_alloc_update_noupdate()
472 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); in bch2_trans_start_alloc_update_noupdate()
512 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) in alloc_gen() argument
514 return k.k->type == KEY_TYPE_bucket_gens in alloc_gen()
515 ? bkey_s_c_to_bucket_gens(k).v->gens[offset] in alloc_gen()
519 int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_bucket_gens_validate() argument
524 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), in bch2_bucket_gens_validate()
527 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); in bch2_bucket_gens_validate()
532 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) in bch2_bucket_gens_to_text() argument
534 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k); in bch2_bucket_gens_to_text()
552 BTREE_ITER_prefetch, k, ({ in bch2_bucket_gens_init()
557 if (!bch2_dev_bucket_exists(c, k.k->p)) in bch2_bucket_gens_init()
561 u8 gen = bch2_alloc_to_v4(k, &a)->gen; in bch2_bucket_gens_init()
566 if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) { in bch2_bucket_gens_init()
576 g.k.p = pos; in bch2_bucket_gens_init()
604 BTREE_ITER_prefetch, k, ({ in bch2_alloc_read()
605 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; in bch2_alloc_read()
606 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; in bch2_alloc_read()
608 if (k.k->type != KEY_TYPE_bucket_gens) in bch2_alloc_read()
611 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
617 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
621 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v; in bch2_alloc_read()
631 BTREE_ITER_prefetch, k, ({ in bch2_alloc_read()
632 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
638 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
642 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
643 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
647 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
648 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
653 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
676 struct bkey_i *k; in bch2_bucket_do_index() local
687 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k)); in bch2_bucket_do_index()
688 if (IS_ERR(k)) in bch2_bucket_do_index()
689 return PTR_ERR(k); in bch2_bucket_do_index()
691 bkey_init(&k->k); in bch2_bucket_do_index()
692 k->k.type = new_type; in bch2_bucket_do_index()
697 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a); in bch2_bucket_do_index()
698 bch2_key_resize(&k->k, 1); in bch2_bucket_do_index()
702 k->k.p = alloc_k.k->p; in bch2_bucket_do_index()
709 bkey_start_pos(&k->k), in bch2_bucket_do_index()
717 bch2_trans_inconsistent_on(old.k->type != old_type, trans, in bch2_bucket_do_index()
724 bch2_bkey_types[old.k->type], in bch2_bucket_do_index()
731 ret = bch2_trans_update(trans, &iter, k, 0); in bch2_bucket_do_index()
745 struct bkey_s_c k; in bch2_bucket_gen_update() local
753 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos, in bch2_bucket_gen_update()
756 ret = bkey_err(k); in bch2_bucket_gen_update()
760 if (k.k->type != KEY_TYPE_bucket_gens) { in bch2_bucket_gen_update()
762 g->k.p = iter.pos; in bch2_bucket_gen_update()
764 bkey_reassemble(&g->k_i, k); in bch2_bucket_gen_update()
838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc()
846 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { in bch2_trigger_alloc()
870 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { in bch2_trigger_alloc()
892 ret = bch2_lru_change(trans, new.k->p.inode, in bch2_trigger_alloc()
893 bucket_to_u64(new.k->p), in bch2_trigger_alloc()
904 bucket_to_u64(new.k->p), in bch2_trigger_alloc()
911 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); in bch2_trigger_alloc()
937 new.k->type == KEY_TYPE_alloc_v4) { in bch2_trigger_alloc()
957 new.k->p.inode, new.k->p.offset, in bch2_trigger_alloc()
966 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
984 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && in bch2_trigger_alloc()
986 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
989 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && in bch2_trigger_alloc()
999 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1025 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); in bch2_get_key_or_hole() local
1027 if (bkey_err(k)) in bch2_get_key_or_hole()
1028 return k; in bch2_get_key_or_hole()
1030 if (k.k->type) { in bch2_get_key_or_hole()
1031 return k; in bch2_get_key_or_hole()
1039 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) in bch2_get_key_or_hole()
1040 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); in bch2_get_key_or_hole()
1048 k = bch2_btree_iter_peek_upto(&iter2, end); in bch2_get_key_or_hole()
1054 if (bkey_err(k)) in bch2_get_key_or_hole()
1055 return k; in bch2_get_key_or_hole()
1095 struct bkey_s_c k; in bch2_get_key_or_real_bucket_hole() local
1097 k = bch2_get_key_or_hole(iter, POS_MAX, hole); in bch2_get_key_or_real_bucket_hole()
1098 if (bkey_err(k)) in bch2_get_key_or_real_bucket_hole()
1099 return k; in bch2_get_key_or_real_bucket_hole()
1101 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1103 if (!k.k->type) { in bch2_get_key_or_real_bucket_hole()
1104 struct bpos hole_start = bkey_start_pos(k.k); in bch2_get_key_or_real_bucket_hole()
1114 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1118 return k; in bch2_get_key_or_real_bucket_hole()
1134 struct bkey_s_c k; in bch2_check_alloc_key() local
1138 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key()
1142 alloc_k.k->p.inode, alloc_k.k->p.offset)) in bch2_check_alloc_key()
1153 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); in bch2_check_alloc_key()
1154 k = bch2_btree_iter_peek_slot(discard_iter); in bch2_check_alloc_key()
1155 ret = bkey_err(k); in bch2_check_alloc_key()
1159 if (fsck_err_on(k.k->type != discard_key_type, in bch2_check_alloc_key()
1163 bch2_bkey_types[k.k->type], in bch2_check_alloc_key()
1173 bkey_init(&update->k); in bch2_check_alloc_key()
1174 update->k.type = discard_key_type; in bch2_check_alloc_key()
1175 update->k.p = discard_iter->pos; in bch2_check_alloc_key()
1183 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); in bch2_check_alloc_key()
1184 k = bch2_btree_iter_peek_slot(freespace_iter); in bch2_check_alloc_key()
1185 ret = bkey_err(k); in bch2_check_alloc_key()
1189 if (fsck_err_on(k.k->type != freespace_key_type, in bch2_check_alloc_key()
1193 bch2_bkey_types[k.k->type], in bch2_check_alloc_key()
1204 bkey_init(&update->k); in bch2_check_alloc_key()
1205 update->k.type = freespace_key_type; in bch2_check_alloc_key()
1206 update->k.p = freespace_iter->pos; in bch2_check_alloc_key()
1207 bch2_key_resize(&update->k, 1); in bch2_check_alloc_key()
1214 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); in bch2_check_alloc_key()
1215 k = bch2_btree_iter_peek_slot(bucket_gens_iter); in bch2_check_alloc_key()
1216 ret = bkey_err(k); in bch2_check_alloc_key()
1220 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), in bch2_check_alloc_key()
1224 alloc_gen(k, gens_offset), a->gen, in bch2_check_alloc_key()
1234 if (k.k->type == KEY_TYPE_bucket_gens) { in bch2_check_alloc_key()
1235 bkey_reassemble(&g->k_i, k); in bch2_check_alloc_key()
1238 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset); in bch2_check_alloc_key()
1262 struct bkey_s_c k; in bch2_check_alloc_hole_freespace() local
1271 k = bch2_btree_iter_peek_slot(freespace_iter); in bch2_check_alloc_hole_freespace()
1272 ret = bkey_err(k); in bch2_check_alloc_hole_freespace()
1276 *end = bkey_min(k.k->p, *end); in bch2_check_alloc_hole_freespace()
1278 if (fsck_err_on(k.k->type != KEY_TYPE_set, in bch2_check_alloc_hole_freespace()
1292 bkey_init(&update->k); in bch2_check_alloc_hole_freespace()
1293 update->k.type = KEY_TYPE_set; in bch2_check_alloc_hole_freespace()
1294 update->k.p = freespace_iter->pos; in bch2_check_alloc_hole_freespace()
1295 bch2_key_resize(&update->k, in bch2_check_alloc_hole_freespace()
1315 struct bkey_s_c k; in bch2_check_alloc_hole_bucket_gens() local
1322 k = bch2_btree_iter_peek_slot(bucket_gens_iter); in bch2_check_alloc_hole_bucket_gens()
1323 ret = bkey_err(k); in bch2_check_alloc_hole_bucket_gens()
1331 if (k.k->type == KEY_TYPE_bucket_gens) { in bch2_check_alloc_hole_bucket_gens()
1335 bkey_reassemble(&g.k_i, k); in bch2_check_alloc_hole_bucket_gens()
1341 bucket_gens_pos_to_alloc(k.k->p, i).inode, in bch2_check_alloc_hole_bucket_gens()
1342 bucket_gens_pos_to_alloc(k.k->p, i).offset, in bch2_check_alloc_hole_bucket_gens()
1364 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); in bch2_check_alloc_hole_bucket_gens()
1438 struct bkey_s_c k) in bch2_check_bucket_gens_key() argument
1442 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; in bch2_check_bucket_gens_key()
1443 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; in bch2_check_bucket_gens_key()
1449 BUG_ON(k.k->type != KEY_TYPE_bucket_gens); in bch2_check_bucket_gens_key()
1450 bkey_reassemble(&g.k_i, k); in bch2_check_bucket_gens_key()
1452 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key()
1456 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) in bch2_check_bucket_gens_key()
1465 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { in bch2_check_bucket_gens_key()
1509 struct bkey_s_c k; in bch2_check_alloc_info() local
1526 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); in bch2_check_alloc_info()
1527 ret = bkey_err(k); in bch2_check_alloc_info()
1531 if (!k.k) in bch2_check_alloc_info()
1534 if (k.k->type) { in bch2_check_alloc_info()
1535 next = bpos_nosnap_successor(k.k->p); in bch2_check_alloc_info()
1538 k, &iter, in bch2_check_alloc_info()
1545 next = k.k->p; in bch2_check_alloc_info()
1548 bkey_start_pos(k.k), in bch2_check_alloc_info()
1552 bkey_start_pos(k.k), in bch2_check_alloc_info()
1583 BTREE_ITER_prefetch, k, in bch2_check_alloc_info()
1592 k = bch2_btree_iter_peek(&iter); in bch2_check_alloc_info()
1593 if (!k.k) in bch2_check_alloc_info()
1596 ret = bkey_err(k) ?: in bch2_check_alloc_info()
1604 bch2_bkey_val_to_text(&buf, c, k); in bch2_check_alloc_info()
1619 BTREE_ITER_prefetch, k, in bch2_check_alloc_info()
1621 bch2_check_bucket_gens_key(trans, &iter, k)); in bch2_check_alloc_info()
1640 if (!alloc_k.k) in bch2_check_alloc_to_lru_ref()
1647 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref()
1685 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ], in bch2_check_alloc_to_lru_ref()
1701 bkey_init(&last_flushed.k->k); in bch2_check_alloc_to_lru_refs()
1705 POS_MIN, BTREE_ITER_prefetch, k, in bch2_check_alloc_to_lru_refs()
1765 struct bkey_s_c k; in bch2_discard_one_bucket() local
1784 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, in bch2_discard_one_bucket()
1787 ret = bkey_err(k); in bch2_discard_one_bucket()
1791 a = bch2_alloc_to_v4_mut(trans, k); in bch2_discard_one_bucket()
1799 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) in bch2_discard_one_bucket()
1815 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) in bch2_discard_one_bucket()
1825 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) in bch2_discard_one_bucket()
1843 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1892 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1930 struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); in bch2_clear_bucket_needs_discard() local
1931 int ret = bkey_err(k); in bch2_clear_bucket_needs_discard()
1935 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); in bch2_clear_bucket_needs_discard()
2025 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); in invalidate_one_bucket()
2102 struct bkey_s_c k; in next_lru_key() local
2104 k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2105 if (!k.k && !*wrapped) { in next_lru_key()
2111 return k; in next_lru_key()
2138 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); in bch2_do_invalidates_work() local
2139 ret = bkey_err(k); in bch2_do_invalidates_work()
2142 if (!k.k) in bch2_do_invalidates_work()
2145 ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate); in bch2_do_invalidates_work()
2190 struct bkey_s_c k; in bch2_dev_freespace_init() local
2221 k = bch2_get_key_or_hole(&iter, end, &hole); in bch2_dev_freespace_init()
2222 ret = bkey_err(k); in bch2_dev_freespace_init()
2226 if (k.k->type) { in bch2_dev_freespace_init()
2232 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); in bch2_dev_freespace_init()
2234 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: in bch2_dev_freespace_init()
2249 bkey_init(&freespace->k); in bch2_dev_freespace_init()
2250 freespace->k.type = KEY_TYPE_set; in bch2_dev_freespace_init()
2251 freespace->k.p = k.k->p; in bch2_dev_freespace_init()
2252 freespace->k.size = k.k->size; in bch2_dev_freespace_init()
2260 bch2_btree_iter_set_pos(&iter, k.k->p); in bch2_dev_freespace_init()