Lines Matching full:k

33 	i->k = bkey_next(i->k);  in sort_key_next()
35 if (i->k == i->end) in sort_key_next()
43 int64_t c = bkey_cmp(_l->k, _r->k); in new_bch_key_sort_cmp()
45 return !(c ? c > 0 : _l->k < _r->k); in new_bch_key_sort_cmp()
48 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) in __ptr_invalid() argument
52 for (i = 0; i < KEY_PTRS(k); i++) in __ptr_invalid()
53 if (ptr_available(c, k, i)) { in __ptr_invalid()
55 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid()
56 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in __ptr_invalid()
58 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size || in __ptr_invalid()
69 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) in bch_ptr_status() argument
73 for (i = 0; i < KEY_PTRS(k); i++) in bch_ptr_status()
74 if (ptr_available(c, k, i)) { in bch_ptr_status()
76 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status()
77 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in bch_ptr_status()
79 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size) in bch_ptr_status()
85 if (ptr_stale(c, k, i)) in bch_ptr_status()
89 if (!bkey_cmp(k, &ZERO_KEY)) in bch_ptr_status()
91 if (!KEY_PTRS(k)) in bch_ptr_status()
93 if (!KEY_SIZE(k)) in bch_ptr_status()
98 void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) in bch_extent_to_text() argument
105 p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); in bch_extent_to_text()
107 for (i = 0; i < KEY_PTRS(k); i++) { in bch_extent_to_text()
111 if (PTR_DEV(k, i) == PTR_CHECK_DEV) in bch_extent_to_text()
114 p("%llu:%llu gen %llu", PTR_DEV(k, i), in bch_extent_to_text()
115 PTR_OFFSET(k, i), PTR_GEN(k, i)); in bch_extent_to_text()
120 if (KEY_DIRTY(k)) in bch_extent_to_text()
122 if (KEY_CSUM(k)) in bch_extent_to_text()
123 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]); in bch_extent_to_text()
127 static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) in bch_bkey_dump() argument
133 bch_extent_to_text(buf, sizeof(buf), k); in bch_bkey_dump()
136 for (j = 0; j < KEY_PTRS(k); j++) { in bch_bkey_dump()
137 size_t n = PTR_BUCKET_NR(b->c, k, j); in bch_bkey_dump()
142 PTR_BUCKET(b->c, k, j)->prio); in bch_bkey_dump()
145 pr_cont(" %s\n", bch_ptr_status(b->c, k)); in bch_bkey_dump()
150 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) in __bch_btree_ptr_invalid() argument
154 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)) in __bch_btree_ptr_invalid()
157 if (__ptr_invalid(c, k)) in __bch_btree_ptr_invalid()
162 bch_extent_to_text(buf, sizeof(buf), k); in __bch_btree_ptr_invalid()
163 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); in __bch_btree_ptr_invalid()
167 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) in bch_btree_ptr_invalid() argument
171 return __bch_btree_ptr_invalid(b->c, k); in bch_btree_ptr_invalid()
174 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() argument
181 for (i = 0; i < KEY_PTRS(k); i++) in btree_ptr_bad_expensive()
182 if (ptr_available(b->c, k, i)) { in btree_ptr_bad_expensive()
183 g = PTR_BUCKET(b->c, k, i); in btree_ptr_bad_expensive()
185 if (KEY_DIRTY(k) || in btree_ptr_bad_expensive()
198 bch_extent_to_text(buf, sizeof(buf), k); in btree_ptr_bad_expensive()
201 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), in btree_ptr_bad_expensive()
206 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) in bch_btree_ptr_bad() argument
211 if (!bkey_cmp(k, &ZERO_KEY) || in bch_btree_ptr_bad()
212 !KEY_PTRS(k) || in bch_btree_ptr_bad()
213 bch_ptr_invalid(bk, k)) in bch_btree_ptr_bad()
216 for (i = 0; i < KEY_PTRS(k); i++) in bch_btree_ptr_bad()
217 if (!ptr_available(b->c, k, i) || in bch_btree_ptr_bad()
218 ptr_stale(b->c, k, i)) in bch_btree_ptr_bad()
222 btree_ptr_bad_expensive(b, k)) in bch_btree_ptr_bad()
264 int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k)); in new_bch_extent_sort_cmp()
266 return !(c ? c > 0 : _l->k < _r->k); in new_bch_extent_sort_cmp()
291 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) in bch_extent_sort_fixup()
294 if (!KEY_SIZE(i->k)) { in bch_extent_sort_fixup()
300 if (top->k > i->k) { in bch_extent_sort_fixup()
301 if (bkey_cmp(top->k, i->k) >= 0) in bch_extent_sort_fixup()
304 bch_cut_front(top->k, i->k); in bch_extent_sort_fixup()
309 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); in bch_extent_sort_fixup()
311 if (bkey_cmp(i->k, top->k) < 0) { in bch_extent_sort_fixup()
312 bkey_copy(tmp, top->k); in bch_extent_sort_fixup()
314 bch_cut_back(&START_KEY(i->k), tmp); in bch_extent_sort_fixup()
315 bch_cut_front(i->k, top->k); in bch_extent_sort_fixup()
320 bch_cut_back(&START_KEY(i->k), top->k); in bch_extent_sort_fixup()
328 static void bch_subtract_dirty(struct bkey *k, in bch_subtract_dirty() argument
333 if (KEY_DIRTY(k)) in bch_subtract_dirty()
334 bcache_dev_sectors_dirty_add(c, KEY_INODE(k), in bch_subtract_dirty()
352 struct bkey *k = bch_btree_iter_next(iter); in bch_extent_insert_fixup() local
354 if (!k) in bch_extent_insert_fixup()
357 if (bkey_cmp(&START_KEY(k), insert) >= 0) { in bch_extent_insert_fixup()
358 if (KEY_SIZE(k)) in bch_extent_insert_fixup()
364 if (bkey_cmp(k, &START_KEY(insert)) <= 0) in bch_extent_insert_fixup()
367 old_offset = KEY_START(k); in bch_extent_insert_fixup()
368 old_size = KEY_SIZE(k); in bch_extent_insert_fixup()
378 if (replace_key && KEY_SIZE(k)) { in bch_extent_insert_fixup()
380 * k might have been split since we inserted/found the in bch_extent_insert_fixup()
384 uint64_t offset = KEY_START(k) - in bch_extent_insert_fixup()
388 if (KEY_START(k) < KEY_START(replace_key) || in bch_extent_insert_fixup()
389 KEY_OFFSET(k) > KEY_OFFSET(replace_key)) in bch_extent_insert_fixup()
393 if (KEY_START(k) > KEY_START(insert) + sectors_found) in bch_extent_insert_fixup()
396 if (!bch_bkey_equal_header(k, replace_key)) in bch_extent_insert_fixup()
405 if (k->ptr[i] != replace_key->ptr[i] + offset) in bch_extent_insert_fixup()
408 sectors_found = KEY_OFFSET(k) - KEY_START(insert); in bch_extent_insert_fixup()
411 if (bkey_cmp(insert, k) < 0 && in bch_extent_insert_fixup()
412 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) { in bch_extent_insert_fixup()
422 bch_subtract_dirty(k, c, KEY_START(insert), in bch_extent_insert_fixup()
425 if (bkey_written(b, k)) { in bch_extent_insert_fixup()
440 bch_bset_insert(b, top, k); in bch_extent_insert_fixup()
443 bkey_copy(&temp.key, k); in bch_extent_insert_fixup()
444 bch_bset_insert(b, k, &temp.key); in bch_extent_insert_fixup()
445 top = bkey_next(k); in bch_extent_insert_fixup()
449 bch_cut_back(&START_KEY(insert), k); in bch_extent_insert_fixup()
450 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
454 if (bkey_cmp(insert, k) < 0) { in bch_extent_insert_fixup()
455 bch_cut_front(insert, k); in bch_extent_insert_fixup()
457 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) in bch_extent_insert_fixup()
460 if (bkey_written(b, k) && in bch_extent_insert_fixup()
461 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) { in bch_extent_insert_fixup()
466 bch_cut_front(k, k); in bch_extent_insert_fixup()
468 __bch_cut_back(&START_KEY(insert), k); in bch_extent_insert_fixup()
469 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
473 bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); in bch_extent_insert_fixup()
495 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) in __bch_extent_invalid() argument
499 if (!KEY_SIZE(k)) in __bch_extent_invalid()
502 if (KEY_SIZE(k) > KEY_OFFSET(k)) in __bch_extent_invalid()
505 if (__ptr_invalid(c, k)) in __bch_extent_invalid()
510 bch_extent_to_text(buf, sizeof(buf), k); in __bch_extent_invalid()
511 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); in __bch_extent_invalid()
515 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) in bch_extent_invalid() argument
519 return __bch_extent_invalid(b->c, k); in bch_extent_invalid()
522 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() argument
525 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
532 (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) in bch_extent_bad_expensive()
544 bch_extent_to_text(buf, sizeof(buf), k); in bch_extent_bad_expensive()
547 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), in bch_extent_bad_expensive()
552 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) in bch_extent_bad() argument
558 if (!KEY_PTRS(k) || in bch_extent_bad()
559 bch_extent_invalid(bk, k)) in bch_extent_bad()
562 for (i = 0; i < KEY_PTRS(k); i++) in bch_extent_bad()
563 if (!ptr_available(b->c, k, i)) in bch_extent_bad()
566 for (i = 0; i < KEY_PTRS(k); i++) { in bch_extent_bad()
567 stale = ptr_stale(b->c, k, i); in bch_extent_bad()
569 if (stale && KEY_DIRTY(k)) { in bch_extent_bad()
570 bch_extent_to_text(buf, sizeof(buf), k); in bch_extent_bad()
583 bch_extent_bad_expensive(b, k, i)) in bch_extent_bad()