Lines Matching +full:- +full:b
1 // SPDX-License-Identifier: GPL-2.0
23 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++; \
37 if (!c->btree_roots_known[0].b) in bch2_recalc_btree_reserve()
43 if (r->b) in bch2_recalc_btree_reserve()
44 reserve += min_t(unsigned, 1, r->b->c.level) * 8; in bch2_recalc_btree_reserve()
47 c->btree_cache.nr_reserve = reserve; in bch2_recalc_btree_reserve()
52 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in btree_cache_can_free()
54 size_t can_free = list->nr; in btree_cache_can_free()
55 if (!list->idx) in btree_cache_can_free()
56 can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve); in btree_cache_can_free()
60 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist() argument
62 BUG_ON(!list_empty(&b->list)); in btree_node_to_freedlist()
64 if (b->c.lock.readers) in btree_node_to_freedlist()
65 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
67 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
70 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist() argument
72 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_to_freelist()
73 BUG_ON(!b->data); in __bch2_btree_node_to_freelist()
75 bc->nr_freeable++; in __bch2_btree_node_to_freelist()
76 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
79 void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) in bch2_btree_node_to_freelist() argument
81 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_to_freelist()
83 mutex_lock(&bc->lock); in bch2_btree_node_to_freelist()
84 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_to_freelist()
85 mutex_unlock(&bc->lock); in bch2_btree_node_to_freelist()
87 six_unlock_write(&b->c.lock); in bch2_btree_node_to_freelist()
88 six_unlock_intent(&b->c.lock); in bch2_btree_node_to_freelist()
91 static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) in __btree_node_data_free() argument
93 BUG_ON(!list_empty(&b->list)); in __btree_node_data_free()
94 BUG_ON(btree_node_hashed(b)); in __btree_node_data_free()
101 if (b->data) in __btree_node_data_free()
102 mm_account_reclaimed_pages(btree_buf_bytes(b) / PAGE_SIZE); in __btree_node_data_free()
103 if (b->aux_data) in __btree_node_data_free()
104 mm_account_reclaimed_pages(btree_aux_data_bytes(b) / PAGE_SIZE); in __btree_node_data_free()
106 EBUG_ON(btree_node_write_in_flight(b)); in __btree_node_data_free()
108 clear_btree_node_just_written(b); in __btree_node_data_free()
110 kvfree(b->data); in __btree_node_data_free()
111 b->data = NULL; in __btree_node_data_free()
113 kvfree(b->aux_data); in __btree_node_data_free()
115 munmap(b->aux_data, btree_aux_data_bytes(b)); in __btree_node_data_free()
117 b->aux_data = NULL; in __btree_node_data_free()
119 btree_node_to_freedlist(bc, b); in __btree_node_data_free()
122 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free() argument
124 BUG_ON(list_empty(&b->list)); in btree_node_data_free()
125 list_del_init(&b->list); in btree_node_data_free()
126 --bc->nr_freeable; in btree_node_data_free()
127 __btree_node_data_free(bc, b); in btree_node_data_free()
133 const struct btree *b = obj; in bch2_btree_cache_cmp_fn() local
134 const u64 *v = arg->key; in bch2_btree_cache_cmp_fn()
136 return b->hash_val == *v ? 0 : 1; in bch2_btree_cache_cmp_fn()
147 static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) in btree_node_data_alloc() argument
149 BUG_ON(b->data || b->aux_data); in btree_node_data_alloc()
153 b->data = kvmalloc(btree_buf_bytes(b), gfp); in btree_node_data_alloc()
154 if (!b->data) in btree_node_data_alloc()
155 return -BCH_ERR_ENOMEM_btree_node_mem_alloc; in btree_node_data_alloc()
157 b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp); in btree_node_data_alloc()
159 b->aux_data = mmap(NULL, btree_aux_data_bytes(b), in btree_node_data_alloc()
162 if (b->aux_data == MAP_FAILED) in btree_node_data_alloc()
163 b->aux_data = NULL; in btree_node_data_alloc()
165 if (!b->aux_data) { in btree_node_data_alloc()
166 kvfree(b->data); in btree_node_data_alloc()
167 b->data = NULL; in btree_node_data_alloc()
168 return -BCH_ERR_ENOMEM_btree_node_mem_alloc; in btree_node_data_alloc()
176 struct btree *b; in __btree_node_mem_alloc() local
178 b = kzalloc(sizeof(struct btree), gfp); in __btree_node_mem_alloc()
179 if (!b) in __btree_node_mem_alloc()
182 bkey_btree_ptr_init(&b->key); in __btree_node_mem_alloc()
183 INIT_LIST_HEAD(&b->list); in __btree_node_mem_alloc()
184 INIT_LIST_HEAD(&b->write_blocked); in __btree_node_mem_alloc()
185 b->byte_order = ilog2(c->opts.btree_node_size); in __btree_node_mem_alloc()
186 return b; in __btree_node_mem_alloc()
191 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_mem_alloc()
192 struct btree *b; in __bch2_btree_node_mem_alloc() local
194 b = __btree_node_mem_alloc(c, GFP_KERNEL); in __bch2_btree_node_mem_alloc()
195 if (!b) in __bch2_btree_node_mem_alloc()
198 if (btree_node_data_alloc(c, b, GFP_KERNEL)) { in __bch2_btree_node_mem_alloc()
199 kfree(b); in __bch2_btree_node_mem_alloc()
203 bch2_btree_lock_init(&b->c, 0); in __bch2_btree_node_mem_alloc()
205 __bch2_btree_node_to_freelist(bc, b); in __bch2_btree_node_mem_alloc()
206 return b; in __bch2_btree_node_mem_alloc()
209 static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) in __btree_node_pinned() argument
211 struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); in __btree_node_pinned()
213 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
215 return ((mask & BIT_ULL(b->c.btree_id)) && in __btree_node_pinned()
216 bbpos_cmp(bc->pinned_nodes_start, pos) < 0 && in __btree_node_pinned()
217 bbpos_cmp(bc->pinned_nodes_end, pos) >= 0); in __btree_node_pinned()
220 void bch2_node_pin(struct bch_fs *c, struct btree *b) in bch2_node_pin() argument
222 struct btree_cache *bc = &c->btree_cache; in bch2_node_pin()
224 mutex_lock(&bc->lock); in bch2_node_pin()
225 BUG_ON(!__btree_node_pinned(bc, b)); in bch2_node_pin()
226 if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { in bch2_node_pin()
227 set_btree_node_pinned(b); in bch2_node_pin()
228 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
229 bc->live[0].nr--; in bch2_node_pin()
230 bc->live[1].nr++; in bch2_node_pin()
232 mutex_unlock(&bc->lock); in bch2_node_pin()
237 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_unpin()
238 struct btree *b, *n; in bch2_btree_cache_unpin() local
240 mutex_lock(&bc->lock); in bch2_btree_cache_unpin()
241 c->btree_cache.pinned_nodes_mask[0] = 0; in bch2_btree_cache_unpin()
242 c->btree_cache.pinned_nodes_mask[1] = 0; in bch2_btree_cache_unpin()
244 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
245 clear_btree_node_pinned(b); in bch2_btree_cache_unpin()
246 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
247 bc->live[0].nr++; in bch2_btree_cache_unpin()
248 bc->live[1].nr--; in bch2_btree_cache_unpin()
251 mutex_unlock(&bc->lock); in bch2_btree_cache_unpin()
254 /* Btree in memory cache - hash table */
256 void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_remove() argument
258 lockdep_assert_held(&bc->lock); in __bch2_btree_node_hash_remove()
260 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
264 b->hash_val = 0; in __bch2_btree_node_hash_remove()
266 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_remove()
267 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
268 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
269 list_del_init(&b->list); in __bch2_btree_node_hash_remove()
272 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in bch2_btree_node_hash_remove() argument
274 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_hash_remove()
275 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_hash_remove()
278 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_insert() argument
280 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_hash_insert()
281 BUG_ON(b->hash_val); in __bch2_btree_node_hash_insert()
283 b->hash_val = btree_ptr_hash_val(&b->key); in __bch2_btree_node_hash_insert()
284 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
289 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_insert()
290 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
292 bool p = __btree_node_pinned(bc, b); in __bch2_btree_node_hash_insert()
293 mod_bit(BTREE_NODE_pinned, &b->flags, p); in __bch2_btree_node_hash_insert()
295 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
296 bc->live[p].nr++; in __bch2_btree_node_hash_insert()
300 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, in bch2_btree_node_hash_insert() argument
303 b->c.level = level; in bch2_btree_node_hash_insert()
304 b->c.btree_id = id; in bch2_btree_node_hash_insert()
306 mutex_lock(&bc->lock); in bch2_btree_node_hash_insert()
307 int ret = __bch2_btree_node_hash_insert(bc, b); in bch2_btree_node_hash_insert()
308 mutex_unlock(&bc->lock); in bch2_btree_node_hash_insert()
317 struct bch_fs *c = trans->c; in bch2_btree_node_update_key_early()
318 struct btree *b; in bch2_btree_node_update_key_early() local
325 b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true); in bch2_btree_node_update_key_early()
326 if (!IS_ERR_OR_NULL(b)) { in bch2_btree_node_update_key_early()
327 mutex_lock(&c->btree_cache.lock); in bch2_btree_node_update_key_early()
329 bch2_btree_node_hash_remove(&c->btree_cache, b); in bch2_btree_node_update_key_early()
331 bkey_copy(&b->key, new); in bch2_btree_node_update_key_early()
332 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); in bch2_btree_node_update_key_early()
335 mutex_unlock(&c->btree_cache.lock); in bch2_btree_node_update_key_early()
336 six_unlock_read(&b->c.lock); in bch2_btree_node_update_key_early()
348 return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params); in btree_cache_find()
355 static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counte… in __btree_node_reclaim() argument
357 struct btree_cache *bc = &c->btree_cache; in __btree_node_reclaim()
360 lockdep_assert_held(&bc->lock); in __btree_node_reclaim()
362 if (b->flags & ((1U << BTREE_NODE_dirty)| in __btree_node_reclaim()
366 if (btree_node_dirty(b)) in __btree_node_reclaim()
368 else if (btree_node_read_in_flight(b)) in __btree_node_reclaim()
370 else if (btree_node_write_in_flight(b)) in __btree_node_reclaim()
372 return -BCH_ERR_ENOMEM_btree_node_reclaim; in __btree_node_reclaim()
376 bch2_btree_node_wait_on_read(b); in __btree_node_reclaim()
377 bch2_btree_node_wait_on_write(b); in __btree_node_reclaim()
380 if (!six_trylock_intent(&b->c.lock)) { in __btree_node_reclaim()
382 return -BCH_ERR_ENOMEM_btree_node_reclaim; in __btree_node_reclaim()
385 if (!six_trylock_write(&b->c.lock)) { in __btree_node_reclaim()
391 if (b->flags & ((1U << BTREE_NODE_read_in_flight)| in __btree_node_reclaim()
394 if (btree_node_read_in_flight(b)) in __btree_node_reclaim()
396 else if (btree_node_write_in_flight(b)) in __btree_node_reclaim()
400 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
401 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
405 if (btree_node_noevict(b)) { in __btree_node_reclaim()
409 if (btree_node_write_blocked(b)) { in __btree_node_reclaim()
413 if (btree_node_will_make_reachable(b)) { in __btree_node_reclaim()
418 if (btree_node_dirty(b)) { in __btree_node_reclaim()
426 * - unless btree verify mode is enabled, since it runs out of in __btree_node_reclaim()
430 bch2_btree_node_write(c, b, SIX_LOCK_intent, in __btree_node_reclaim()
433 __bch2_btree_node_write(c, b, in __btree_node_reclaim()
436 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
437 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
441 if (b->hash_val && !ret) in __btree_node_reclaim()
442 trace_and_count(c, btree_cache_reap, c, b); in __btree_node_reclaim()
445 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
447 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
448 ret = -BCH_ERR_ENOMEM_btree_node_reclaim; in __btree_node_reclaim()
452 static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter) in btree_node_reclaim() argument
454 return __btree_node_reclaim(c, b, false, shrinker_counter); in btree_node_reclaim()
457 static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b) in btree_node_write_and_reclaim() argument
459 return __btree_node_reclaim(c, b, true, false); in btree_node_write_and_reclaim()
465 struct btree_cache_list *list = shrink->private_data; in bch2_btree_cache_scan()
466 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in bch2_btree_cache_scan()
468 struct btree *b, *t; in bch2_btree_cache_scan() local
469 unsigned long nr = sc->nr_to_scan; in bch2_btree_cache_scan()
475 bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4; in bch2_btree_cache_scan()
480 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
484 * It's _really_ critical that we don't free too many btree nodes - we in bch2_btree_cache_scan()
494 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
507 if (!btree_node_reclaim(c, b, true)) { in bch2_btree_cache_scan()
508 btree_node_data_free(bc, b); in bch2_btree_cache_scan()
509 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
510 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
512 bc->nr_freed++; in bch2_btree_cache_scan()
516 list_for_each_entry_safe(b, t, &list->list, list) { in bch2_btree_cache_scan()
519 if (btree_node_accessed(b)) { in bch2_btree_cache_scan()
520 clear_btree_node_accessed(b); in bch2_btree_cache_scan()
521 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; in bch2_btree_cache_scan()
522 --touched;; in bch2_btree_cache_scan()
523 } else if (!btree_node_reclaim(c, b, true)) { in bch2_btree_cache_scan()
524 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_cache_scan()
525 __btree_node_data_free(bc, b); in bch2_btree_cache_scan()
528 bc->nr_freed++; in bch2_btree_cache_scan()
530 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
531 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
536 btree_node_dirty(b) && in bch2_btree_cache_scan()
537 !btree_node_will_make_reachable(b) && in bch2_btree_cache_scan()
538 !btree_node_write_blocked(b) && in bch2_btree_cache_scan()
539 six_trylock_read(&b->c.lock)) { in bch2_btree_cache_scan()
540 list_move(&list->list, &b->list); in bch2_btree_cache_scan()
541 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
542 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); in bch2_btree_cache_scan()
543 six_unlock_read(&b->c.lock); in bch2_btree_cache_scan()
546 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
554 if (&t->list != &list->list) in bch2_btree_cache_scan()
555 list_move_tail(&list->list, &t->list); in bch2_btree_cache_scan()
557 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
561 trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret); in bch2_btree_cache_scan()
568 struct btree_cache_list *list = shrink->private_data; in bch2_btree_cache_count()
578 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_exit()
579 struct btree *b, *t; in bch2_fs_btree_cache_exit() local
582 shrinker_free(bc->live[1].shrink); in bch2_fs_btree_cache_exit()
583 shrinker_free(bc->live[0].shrink); in bch2_fs_btree_cache_exit()
587 mutex_lock(&bc->lock); in bch2_fs_btree_cache_exit()
589 if (c->verify_data) in bch2_fs_btree_cache_exit()
590 list_move(&c->verify_data->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
592 kvfree(c->verify_ondisk); in bch2_fs_btree_cache_exit()
597 if (r->b) in bch2_fs_btree_cache_exit()
598 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
601 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
602 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
603 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
604 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
606 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
607 BUG_ON(btree_node_read_in_flight(b) || in bch2_fs_btree_cache_exit()
608 btree_node_write_in_flight(b)); in bch2_fs_btree_cache_exit()
610 btree_node_data_free(bc, b); in bch2_fs_btree_cache_exit()
613 BUG_ON(!bch2_journal_error(&c->journal) && in bch2_fs_btree_cache_exit()
614 atomic_long_read(&c->btree_cache.nr_dirty)); in bch2_fs_btree_cache_exit()
616 list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); in bch2_fs_btree_cache_exit()
618 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
619 list_del(&b->list); in bch2_fs_btree_cache_exit()
620 six_lock_exit(&b->c.lock); in bch2_fs_btree_cache_exit()
621 kfree(b); in bch2_fs_btree_cache_exit()
624 mutex_unlock(&bc->lock); in bch2_fs_btree_cache_exit()
627 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_fs_btree_cache_exit()
628 BUG_ON(bc->nr_by_btree[i]); in bch2_fs_btree_cache_exit()
629 BUG_ON(bc->live[0].nr); in bch2_fs_btree_cache_exit()
630 BUG_ON(bc->live[1].nr); in bch2_fs_btree_cache_exit()
631 BUG_ON(bc->nr_freeable); in bch2_fs_btree_cache_exit()
633 if (bc->table_init_done) in bch2_fs_btree_cache_exit()
634 rhashtable_destroy(&bc->table); in bch2_fs_btree_cache_exit()
639 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_init()
644 ret = rhashtable_init(&bc->table, &bch_btree_cache_params); in bch2_fs_btree_cache_init()
648 bc->table_init_done = true; in bch2_fs_btree_cache_init()
652 for (i = 0; i < bc->nr_reserve; i++) in bch2_fs_btree_cache_init()
656 list_splice_init(&bc->live[0].list, &bc->freeable); in bch2_fs_btree_cache_init()
658 mutex_init(&c->verify_lock); in bch2_fs_btree_cache_init()
660 shrink = shrinker_alloc(0, "%s-btree_cache", c->name); in bch2_fs_btree_cache_init()
663 bc->live[0].shrink = shrink; in bch2_fs_btree_cache_init()
664 shrink->count_objects = bch2_btree_cache_count; in bch2_fs_btree_cache_init()
665 shrink->scan_objects = bch2_btree_cache_scan; in bch2_fs_btree_cache_init()
666 shrink->seeks = 2; in bch2_fs_btree_cache_init()
667 shrink->private_data = &bc->live[0]; in bch2_fs_btree_cache_init()
670 shrink = shrinker_alloc(0, "%s-btree_cache-pinned", c->name); in bch2_fs_btree_cache_init()
673 bc->live[1].shrink = shrink; in bch2_fs_btree_cache_init()
674 shrink->count_objects = bch2_btree_cache_count; in bch2_fs_btree_cache_init()
675 shrink->scan_objects = bch2_btree_cache_scan; in bch2_fs_btree_cache_init()
676 shrink->seeks = 8; in bch2_fs_btree_cache_init()
677 shrink->private_data = &bc->live[1]; in bch2_fs_btree_cache_init()
682 return -BCH_ERR_ENOMEM_fs_btree_cache_init; in bch2_fs_btree_cache_init()
687 mutex_init(&bc->lock); in bch2_fs_btree_cache_init_early()
688 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) { in bch2_fs_btree_cache_init_early()
689 bc->live[i].idx = i; in bch2_fs_btree_cache_init_early()
690 INIT_LIST_HEAD(&bc->live[i].list); in bch2_fs_btree_cache_init_early()
692 INIT_LIST_HEAD(&bc->freeable); in bch2_fs_btree_cache_init_early()
693 INIT_LIST_HEAD(&bc->freed_pcpu); in bch2_fs_btree_cache_init_early()
694 INIT_LIST_HEAD(&bc->freed_nonpcpu); in bch2_fs_btree_cache_init_early()
705 struct bch_fs *c = trans->c; in bch2_btree_cache_cannibalize_unlock()
706 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_unlock()
708 if (bc->alloc_lock == current) { in bch2_btree_cache_cannibalize_unlock()
710 bc->alloc_lock = NULL; in bch2_btree_cache_cannibalize_unlock()
711 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_unlock()
717 struct bch_fs *c = trans->c; in bch2_btree_cache_cannibalize_lock()
718 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_lock()
722 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) in bch2_btree_cache_cannibalize_lock()
727 return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock; in bch2_btree_cache_cannibalize_lock()
730 closure_wait(&bc->alloc_wait, cl); in bch2_btree_cache_cannibalize_lock()
734 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) { in bch2_btree_cache_cannibalize_lock()
736 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_lock()
741 return -BCH_ERR_btree_cache_cannibalize_lock_blocked; in bch2_btree_cache_cannibalize_lock()
750 struct btree_cache *bc = &c->btree_cache; in btree_node_cannibalize()
751 struct btree *b; in btree_node_cannibalize() local
753 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
754 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
755 if (!btree_node_reclaim(c, b, false)) in btree_node_cannibalize()
756 return b; in btree_node_cannibalize()
759 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
760 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
761 if (!btree_node_write_and_reclaim(c, b)) in btree_node_cannibalize()
762 return b; in btree_node_cannibalize()
765 * Rare case: all nodes were intent-locked. in btree_node_cannibalize()
766 * Just busy-wait. in btree_node_cannibalize()
775 struct bch_fs *c = trans->c; in bch2_btree_node_mem_alloc()
776 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_mem_alloc()
778 ? &bc->freed_pcpu in bch2_btree_node_mem_alloc()
779 : &bc->freed_nonpcpu; in bch2_btree_node_mem_alloc()
780 struct btree *b, *b2; in bch2_btree_node_mem_alloc() local
783 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
789 list_for_each_entry(b, freed, list) in bch2_btree_node_mem_alloc()
790 if (!btree_node_reclaim(c, b, false)) { in bch2_btree_node_mem_alloc()
791 list_del_init(&b->list); in bch2_btree_node_mem_alloc()
795 b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); in bch2_btree_node_mem_alloc()
796 if (!b) { in bch2_btree_node_mem_alloc()
797 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
799 b = __btree_node_mem_alloc(c, GFP_KERNEL); in bch2_btree_node_mem_alloc()
800 if (!b) in bch2_btree_node_mem_alloc()
802 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
805 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0); in bch2_btree_node_mem_alloc()
807 BUG_ON(!six_trylock_intent(&b->c.lock)); in bch2_btree_node_mem_alloc()
808 BUG_ON(!six_trylock_write(&b->c.lock)); in bch2_btree_node_mem_alloc()
815 list_for_each_entry(b2, &bc->freeable, list) in bch2_btree_node_mem_alloc()
817 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
818 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
820 list_del_init(&b2->list); in bch2_btree_node_mem_alloc()
821 --bc->nr_freeable; in bch2_btree_node_mem_alloc()
823 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
825 six_unlock_write(&b2->c.lock); in bch2_btree_node_mem_alloc()
826 six_unlock_intent(&b2->c.lock); in bch2_btree_node_mem_alloc()
830 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
832 if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { in bch2_btree_node_mem_alloc()
834 if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) in bch2_btree_node_mem_alloc()
839 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
840 BUG_ON(btree_node_hashed(b)); in bch2_btree_node_mem_alloc()
841 BUG_ON(btree_node_dirty(b)); in bch2_btree_node_mem_alloc()
842 BUG_ON(btree_node_write_in_flight(b)); in bch2_btree_node_mem_alloc()
844 b->flags = 0; in bch2_btree_node_mem_alloc()
845 b->written = 0; in bch2_btree_node_mem_alloc()
846 b->nsets = 0; in bch2_btree_node_mem_alloc()
847 b->sib_u64s[0] = 0; in bch2_btree_node_mem_alloc()
848 b->sib_u64s[1] = 0; in bch2_btree_node_mem_alloc()
849 b->whiteout_u64s = 0; in bch2_btree_node_mem_alloc()
850 bch2_btree_keys_init(b); in bch2_btree_node_mem_alloc()
851 set_btree_node_accessed(b); in bch2_btree_node_mem_alloc()
853 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], in bch2_btree_node_mem_alloc()
858 bch2_btree_node_to_freelist(c, b); in bch2_btree_node_mem_alloc()
862 return b; in bch2_btree_node_mem_alloc()
864 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
867 if (bc->alloc_lock == current) { in bch2_btree_node_mem_alloc()
872 if (b) { in bch2_btree_node_mem_alloc()
873 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
874 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
876 six_unlock_write(&b2->c.lock); in bch2_btree_node_mem_alloc()
877 six_unlock_intent(&b2->c.lock); in bch2_btree_node_mem_alloc()
879 b = b2; in bch2_btree_node_mem_alloc()
882 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
883 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
889 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
890 return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc); in bch2_btree_node_mem_alloc()
902 struct bch_fs *c = trans->c; in bch2_btree_node_fill()
903 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_fill()
904 struct btree *b; in bch2_btree_node_fill() local
912 if (unlikely(!bkey_is_btree_ptr(&k->k))) { in bch2_btree_node_fill()
916 …int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf); in bch2_btree_node_fill()
921 if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) { in bch2_btree_node_fill()
939 b = bch2_btree_node_mem_alloc(trans, level != 0); in bch2_btree_node_fill()
941 if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) { in bch2_btree_node_fill()
943 return b; in bch2_btree_node_fill()
945 trans->memory_allocation_failure = true; in bch2_btree_node_fill()
950 if (IS_ERR(b)) in bch2_btree_node_fill()
951 return b; in bch2_btree_node_fill()
953 bkey_copy(&b->key, k); in bch2_btree_node_fill()
954 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { in bch2_btree_node_fill()
958 b->hash_val = 0; in bch2_btree_node_fill()
960 mutex_lock(&bc->lock); in bch2_btree_node_fill()
961 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_fill()
962 mutex_unlock(&bc->lock); in bch2_btree_node_fill()
964 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
965 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
969 set_btree_node_read_in_flight(b); in bch2_btree_node_fill()
970 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
973 u32 seq = six_lock_seq(&b->c.lock); in bch2_btree_node_fill()
976 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
979 bch2_btree_node_read(trans, b, sync); in bch2_btree_node_fill()
988 if (!six_relock_type(&b->c.lock, lock_type, seq)) in bch2_btree_node_fill()
989 b = NULL; in bch2_btree_node_fill()
991 bch2_btree_node_read(trans, b, sync); in bch2_btree_node_fill()
993 six_lock_downgrade(&b->c.lock); in bch2_btree_node_fill()
996 return b; in bch2_btree_node_fill()
999 static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) in btree_bad_header() argument
1003 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) in btree_bad_header()
1010 bch2_btree_id_str(b->c.btree_id), b->c.level); in btree_bad_header()
1011 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); in btree_bad_header()
1015 bch2_btree_id_str(BTREE_NODE_ID(b->data)), in btree_bad_header()
1016 BTREE_NODE_LEVEL(b->data)); in btree_bad_header()
1017 bch2_bpos_to_text(&buf, b->data->min_key); in btree_bad_header()
1020 bch2_bpos_to_text(&buf, b->data->max_key); in btree_bad_header()
1027 static inline void btree_check_header(struct bch_fs *c, struct btree *b) in btree_check_header() argument
1029 if (b->c.btree_id != BTREE_NODE_ID(b->data) || in btree_check_header()
1030 b->c.level != BTREE_NODE_LEVEL(b->data) || in btree_check_header()
1031 !bpos_eq(b->data->max_key, b->key.k.p) || in btree_check_header()
1032 (b->key.k.type == KEY_TYPE_btree_ptr_v2 && in btree_check_header()
1033 !bpos_eq(b->data->min_key, in btree_check_header()
1034 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) in btree_check_header()
1035 btree_bad_header(c, b); in btree_check_header()
1043 struct bch_fs *c = trans->c; in __bch2_btree_node_get()
1044 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_get()
1045 struct btree *b; in __bch2_btree_node_get() local
1051 b = btree_cache_find(bc, k); in __bch2_btree_node_get()
1052 if (unlikely(!b)) { in __bch2_btree_node_get()
1058 b = bch2_btree_node_fill(trans, path, k, path->btree_id, in __bch2_btree_node_get()
1063 if (!b) in __bch2_btree_node_get()
1066 if (IS_ERR(b)) in __bch2_btree_node_get()
1067 return b; in __bch2_btree_node_get()
1072 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in __bch2_btree_node_get()
1078 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in __bch2_btree_node_get()
1079 b->c.level != level || in __bch2_btree_node_get()
1081 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1090 if (!btree_node_accessed(b)) in __bch2_btree_node_get()
1091 set_btree_node_accessed(b); in __bch2_btree_node_get()
1094 if (unlikely(btree_node_read_in_flight(b))) { in __bch2_btree_node_get()
1095 u32 seq = six_lock_seq(&b->c.lock); in __bch2_btree_node_get()
1097 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1101 bch2_btree_node_wait_on_read(b); in __bch2_btree_node_get()
1111 if (!six_relock_type(&b->c.lock, lock_type, seq)) in __bch2_btree_node_get()
1119 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1124 prefetch(b->aux_data); in __bch2_btree_node_get()
1126 for_each_bset(b, t) { in __bch2_btree_node_get()
1127 void *p = (u64 *) b->aux_data + t->aux_data_offset; in __bch2_btree_node_get()
1134 if (unlikely(btree_node_read_error(b))) { in __bch2_btree_node_get()
1135 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1136 return ERR_PTR(-BCH_ERR_btree_node_read_error); in __bch2_btree_node_get()
1139 EBUG_ON(b->c.btree_id != path->btree_id); in __bch2_btree_node_get()
1140 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in __bch2_btree_node_get()
1141 btree_check_header(c, b); in __bch2_btree_node_get()
1143 return b; in __bch2_btree_node_get()
1147 * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
1167 struct bch_fs *c = trans->c; in bch2_btree_node_get()
1168 struct btree *b; in bch2_btree_node_get() local
1173 b = btree_node_mem_ptr(k); in bch2_btree_node_get()
1176 * Check b->hash_val _before_ calling btree_node_lock() - this might not in bch2_btree_node_get()
1180 if (unlikely(!c->opts.btree_node_mem_ptr_optimization || in bch2_btree_node_get()
1181 !b || in bch2_btree_node_get()
1182 b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_get()
1188 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in bch2_btree_node_get()
1194 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get()
1195 b->c.level != level || in bch2_btree_node_get()
1197 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1205 if (unlikely(btree_node_read_in_flight(b))) { in bch2_btree_node_get()
1206 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1210 prefetch(b->aux_data); in bch2_btree_node_get()
1212 for_each_bset(b, t) { in bch2_btree_node_get()
1213 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get()
1221 if (!btree_node_accessed(b)) in bch2_btree_node_get()
1222 set_btree_node_accessed(b); in bch2_btree_node_get()
1224 if (unlikely(btree_node_read_error(b))) { in bch2_btree_node_get()
1225 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1226 return ERR_PTR(-BCH_ERR_btree_node_read_error); in bch2_btree_node_get()
1229 EBUG_ON(b->c.btree_id != path->btree_id); in bch2_btree_node_get()
1230 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get()
1231 btree_check_header(c, b); in bch2_btree_node_get()
1233 return b; in bch2_btree_node_get()
1242 struct bch_fs *c = trans->c; in bch2_btree_node_get_noiter()
1243 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_get_noiter()
1244 struct btree *b; in bch2_btree_node_get_noiter() local
1249 if (c->opts.btree_node_mem_ptr_optimization) { in bch2_btree_node_get_noiter()
1250 b = btree_node_mem_ptr(k); in bch2_btree_node_get_noiter()
1251 if (b) in bch2_btree_node_get_noiter()
1255 b = btree_cache_find(bc, k); in bch2_btree_node_get_noiter()
1256 if (unlikely(!b)) { in bch2_btree_node_get_noiter()
1260 b = bch2_btree_node_fill(trans, NULL, k, btree_id, in bch2_btree_node_get_noiter()
1264 if (!b) in bch2_btree_node_get_noiter()
1267 if (IS_ERR(b) && in bch2_btree_node_get_noiter()
1271 if (IS_ERR(b)) in bch2_btree_node_get_noiter()
1275 ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_); in bch2_btree_node_get_noiter()
1281 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get_noiter()
1282 b->c.btree_id != btree_id || in bch2_btree_node_get_noiter()
1283 b->c.level != level)) { in bch2_btree_node_get_noiter()
1284 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1290 __bch2_btree_node_wait_on_read(b); in bch2_btree_node_get_noiter()
1292 prefetch(b->aux_data); in bch2_btree_node_get_noiter()
1294 for_each_bset(b, t) { in bch2_btree_node_get_noiter()
1295 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get_noiter()
1303 if (!btree_node_accessed(b)) in bch2_btree_node_get_noiter()
1304 set_btree_node_accessed(b); in bch2_btree_node_get_noiter()
1306 if (unlikely(btree_node_read_error(b))) { in bch2_btree_node_get_noiter()
1307 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1308 b = ERR_PTR(-BCH_ERR_btree_node_read_error); in bch2_btree_node_get_noiter()
1312 EBUG_ON(b->c.btree_id != btree_id); in bch2_btree_node_get_noiter()
1313 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get_noiter()
1314 btree_check_header(c, b); in bch2_btree_node_get_noiter()
1317 return b; in bch2_btree_node_get_noiter()
1325 struct bch_fs *c = trans->c; in bch2_btree_node_prefetch()
1326 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_prefetch()
1331 struct btree *b = btree_cache_find(bc, k); in bch2_btree_node_prefetch() local
1332 if (b) in bch2_btree_node_prefetch()
1335 b = bch2_btree_node_fill(trans, path, k, btree_id, in bch2_btree_node_prefetch()
1337 int ret = PTR_ERR_OR_ZERO(b); in bch2_btree_node_prefetch()
1340 if (b) in bch2_btree_node_prefetch()
1341 six_unlock_read(&b->c.lock); in bch2_btree_node_prefetch()
1347 struct bch_fs *c = trans->c; in bch2_btree_node_evict()
1348 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_evict()
1349 struct btree *b; in bch2_btree_node_evict() local
1351 b = btree_cache_find(bc, k); in bch2_btree_node_evict()
1352 if (!b) in bch2_btree_node_evict()
1355 BUG_ON(b == btree_node_root(trans->c, b)); in bch2_btree_node_evict()
1362 __bch2_btree_node_wait_on_read(b); in bch2_btree_node_evict()
1363 __bch2_btree_node_wait_on_write(b); in bch2_btree_node_evict()
1365 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_node_evict()
1366 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_node_evict()
1367 if (unlikely(b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_evict()
1370 if (btree_node_dirty(b)) { in bch2_btree_node_evict()
1371 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); in bch2_btree_node_evict()
1372 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1373 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1377 BUG_ON(btree_node_dirty(b)); in bch2_btree_node_evict()
1379 mutex_lock(&bc->lock); in bch2_btree_node_evict()
1380 bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_evict()
1381 btree_node_data_free(bc, b); in bch2_btree_node_evict()
1382 mutex_unlock(&bc->lock); in bch2_btree_node_evict()
1384 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1385 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1401 void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) in bch2_btree_pos_to_text() argument
1404 bch2_btree_id_str(b->c.btree_id), in bch2_btree_pos_to_text()
1405 b->c.level, in bch2_btree_pos_to_text()
1406 bch2_btree_id_root(c, b->c.btree_id)->level); in bch2_btree_pos_to_text()
1407 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); in bch2_btree_pos_to_text()
1410 void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) in bch2_btree_node_to_text() argument
1416 bch2_btree_keys_stats(b, &stats); in bch2_btree_node_to_text()
1418 prt_printf(out, "l %u ", b->c.level); in bch2_btree_node_to_text()
1419 bch2_bpos_to_text(out, b->data->min_key); in bch2_btree_node_to_text()
1420 prt_printf(out, " - "); in bch2_btree_node_to_text()
1421 bch2_bpos_to_text(out, b->data->max_key); in bch2_btree_node_to_text()
1424 bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key)); in bch2_btree_node_to_text()
1429 bch2_bkey_format_to_text(out, &b->format); in bch2_btree_node_to_text()
1439 b->unpack_fn_len, in bch2_btree_node_to_text()
1440 b->nr.live_u64s * sizeof(u64), in bch2_btree_node_to_text()
1441 btree_buf_bytes(b) - sizeof(struct btree_node), in bch2_btree_node_to_text()
1442 b->nr.live_u64s * 100 / btree_max_u64s(c), in bch2_btree_node_to_text()
1443 b->sib_u64s[0], in bch2_btree_node_to_text()
1444 b->sib_u64s[1], in bch2_btree_node_to_text()
1445 c->btree_foreground_merge_threshold, in bch2_btree_node_to_text()
1446 b->nr.packed_keys, in bch2_btree_node_to_text()
1447 b->nr.unpacked_keys, in bch2_btree_node_to_text()
1456 prt_human_readable_u64(out, nr * c->opts.btree_node_size); in prt_btree_cache_line()
1471 if (!out->nr_tabstops) in bch2_btree_cache_to_text()
1474 prt_btree_cache_line(out, c, "live:", bc->live[0].nr); in bch2_btree_cache_to_text()
1475 prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr); in bch2_btree_cache_to_text()
1476 prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable); in bch2_btree_cache_to_text()
1477 prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty)); in bch2_btree_cache_to_text()
1478 prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); in bch2_btree_cache_to_text()
1481 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_btree_cache_to_text()
1482 prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]); in bch2_btree_cache_to_text()
1485 prt_printf(out, "freed:\t%zu\n", bc->nr_freed); in bch2_btree_cache_to_text()
1488 for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) in bch2_btree_cache_to_text()
1490 bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]); in bch2_btree_cache_to_text()