Lines Matching full:bc

23 		bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_##counter]++;	 \
52 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in btree_cache_can_free() local
56 can_free = max_t(ssize_t, 0, can_free - bc->nr_reserve); in btree_cache_can_free()
60 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist() argument
65 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
67 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
70 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist() argument
75 bc->nr_freeable++; in __bch2_btree_node_to_freelist()
76 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
81 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_to_freelist() local
83 mutex_lock(&bc->lock); in bch2_btree_node_to_freelist()
84 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_to_freelist()
85 mutex_unlock(&bc->lock); in bch2_btree_node_to_freelist()
91 static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) in __btree_node_data_free() argument
119 btree_node_to_freedlist(bc, b); in __btree_node_data_free()
122 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free() argument
126 --bc->nr_freeable; in btree_node_data_free()
127 __btree_node_data_free(bc, b); in btree_node_data_free()
191 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_mem_alloc() local
205 __bch2_btree_node_to_freelist(bc, b); in __bch2_btree_node_mem_alloc()
209 static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) in __btree_node_pinned() argument
213 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
216 bbpos_cmp(bc->pinned_nodes_start, pos) < 0 && in __btree_node_pinned()
217 bbpos_cmp(bc->pinned_nodes_end, pos) >= 0); in __btree_node_pinned()
222 struct btree_cache *bc = &c->btree_cache; in bch2_node_pin() local
224 mutex_lock(&bc->lock); in bch2_node_pin()
225 BUG_ON(!__btree_node_pinned(bc, b)); in bch2_node_pin()
228 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
229 bc->live[0].nr--; in bch2_node_pin()
230 bc->live[1].nr++; in bch2_node_pin()
232 mutex_unlock(&bc->lock); in bch2_node_pin()
237 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_unpin() local
240 mutex_lock(&bc->lock); in bch2_btree_cache_unpin()
244 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
246 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
247 bc->live[0].nr++; in bch2_btree_cache_unpin()
248 bc->live[1].nr--; in bch2_btree_cache_unpin()
251 mutex_unlock(&bc->lock); in bch2_btree_cache_unpin()
256 void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_remove() argument
258 lockdep_assert_held(&bc->lock); in __bch2_btree_node_hash_remove()
260 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
267 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
268 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
272 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in bch2_btree_node_hash_remove() argument
274 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_hash_remove()
275 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_hash_remove()
278 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_insert() argument
284 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
290 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
292 bool p = __btree_node_pinned(bc, b); in __bch2_btree_node_hash_insert()
295 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
296 bc->live[p].nr++; in __bch2_btree_node_hash_insert()
300 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, in bch2_btree_node_hash_insert() argument
306 mutex_lock(&bc->lock); in bch2_btree_node_hash_insert()
307 int ret = __bch2_btree_node_hash_insert(bc, b); in bch2_btree_node_hash_insert()
308 mutex_unlock(&bc->lock); in bch2_btree_node_hash_insert()
343 static inline struct btree *btree_cache_find(struct btree_cache *bc, in btree_cache_find() argument
348 return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params); in btree_cache_find()
357 struct btree_cache *bc = &c->btree_cache; in __btree_node_reclaim() local
360 lockdep_assert_held(&bc->lock); in __btree_node_reclaim()
466 struct btree_cache *bc = container_of(list, struct btree_cache, live[list->idx]); in bch2_btree_cache_scan() local
467 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache); in bch2_btree_cache_scan()
475 bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4; in bch2_btree_cache_scan()
480 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
494 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
508 btree_node_data_free(bc, b); in bch2_btree_cache_scan()
512 bc->nr_freed++; in bch2_btree_cache_scan()
521 bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; in bch2_btree_cache_scan()
524 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_cache_scan()
525 __btree_node_data_free(bc, b); in bch2_btree_cache_scan()
528 bc->nr_freed++; in bch2_btree_cache_scan()
541 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
546 mutex_lock(&bc->lock); in bch2_btree_cache_scan()
557 mutex_unlock(&bc->lock); in bch2_btree_cache_scan()
578 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_exit() local
582 shrinker_free(bc->live[1].shrink); in bch2_fs_btree_cache_exit()
583 shrinker_free(bc->live[0].shrink); in bch2_fs_btree_cache_exit()
587 mutex_lock(&bc->lock); in bch2_fs_btree_cache_exit()
590 list_move(&c->verify_data->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
598 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
601 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
602 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
603 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
604 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
606 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
610 btree_node_data_free(bc, b); in bch2_fs_btree_cache_exit()
616 list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); in bch2_fs_btree_cache_exit()
618 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
624 mutex_unlock(&bc->lock); in bch2_fs_btree_cache_exit()
627 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_fs_btree_cache_exit()
628 BUG_ON(bc->nr_by_btree[i]); in bch2_fs_btree_cache_exit()
629 BUG_ON(bc->live[0].nr); in bch2_fs_btree_cache_exit()
630 BUG_ON(bc->live[1].nr); in bch2_fs_btree_cache_exit()
631 BUG_ON(bc->nr_freeable); in bch2_fs_btree_cache_exit()
633 if (bc->table_init_done) in bch2_fs_btree_cache_exit()
634 rhashtable_destroy(&bc->table); in bch2_fs_btree_cache_exit()
639 struct btree_cache *bc = &c->btree_cache; in bch2_fs_btree_cache_init() local
644 ret = rhashtable_init(&bc->table, &bch_btree_cache_params); in bch2_fs_btree_cache_init()
648 bc->table_init_done = true; in bch2_fs_btree_cache_init()
652 for (i = 0; i < bc->nr_reserve; i++) in bch2_fs_btree_cache_init()
656 list_splice_init(&bc->live[0].list, &bc->freeable); in bch2_fs_btree_cache_init()
663 bc->live[0].shrink = shrink; in bch2_fs_btree_cache_init()
667 shrink->private_data = &bc->live[0]; in bch2_fs_btree_cache_init()
673 bc->live[1].shrink = shrink; in bch2_fs_btree_cache_init()
677 shrink->private_data = &bc->live[1]; in bch2_fs_btree_cache_init()
685 void bch2_fs_btree_cache_init_early(struct btree_cache *bc) in bch2_fs_btree_cache_init_early() argument
687 mutex_init(&bc->lock); in bch2_fs_btree_cache_init_early()
688 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) { in bch2_fs_btree_cache_init_early()
689 bc->live[i].idx = i; in bch2_fs_btree_cache_init_early()
690 INIT_LIST_HEAD(&bc->live[i].list); in bch2_fs_btree_cache_init_early()
692 INIT_LIST_HEAD(&bc->freeable); in bch2_fs_btree_cache_init_early()
693 INIT_LIST_HEAD(&bc->freed_pcpu); in bch2_fs_btree_cache_init_early()
694 INIT_LIST_HEAD(&bc->freed_nonpcpu); in bch2_fs_btree_cache_init_early()
706 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_unlock() local
708 if (bc->alloc_lock == current) { in bch2_btree_cache_cannibalize_unlock()
710 bc->alloc_lock = NULL; in bch2_btree_cache_cannibalize_unlock()
711 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_unlock()
718 struct btree_cache *bc = &c->btree_cache; in bch2_btree_cache_cannibalize_lock() local
722 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) in bch2_btree_cache_cannibalize_lock()
730 closure_wait(&bc->alloc_wait, cl); in bch2_btree_cache_cannibalize_lock()
734 if (try_cmpxchg(&bc->alloc_lock, &old, current) || old == current) { in bch2_btree_cache_cannibalize_lock()
736 closure_wake_up(&bc->alloc_wait); in bch2_btree_cache_cannibalize_lock()
750 struct btree_cache *bc = &c->btree_cache; in btree_node_cannibalize() local
753 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
754 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
759 for (unsigned i = 0; i < ARRAY_SIZE(bc->live); i++) in btree_node_cannibalize()
760 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
776 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_mem_alloc() local
778 ? &bc->freed_pcpu in bch2_btree_node_mem_alloc()
779 : &bc->freed_nonpcpu; in bch2_btree_node_mem_alloc()
783 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
797 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
802 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
815 list_for_each_entry(b2, &bc->freeable, list) in bch2_btree_node_mem_alloc()
821 --bc->nr_freeable; in bch2_btree_node_mem_alloc()
822 btree_node_to_freedlist(bc, b2); in bch2_btree_node_mem_alloc()
823 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
830 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
864 mutex_lock(&bc->lock); in bch2_btree_node_mem_alloc()
867 if (bc->alloc_lock == current) { in bch2_btree_node_mem_alloc()
870 __bch2_btree_node_hash_remove(bc, b2); in bch2_btree_node_mem_alloc()
875 btree_node_to_freedlist(bc, b2); in bch2_btree_node_mem_alloc()
883 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
889 mutex_unlock(&bc->lock); in bch2_btree_node_mem_alloc()
903 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_fill() local
954 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { in bch2_btree_node_fill()
960 mutex_lock(&bc->lock); in bch2_btree_node_fill()
961 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_fill()
962 mutex_unlock(&bc->lock); in bch2_btree_node_fill()
1044 struct btree_cache *bc = &c->btree_cache; in __bch2_btree_node_get() local
1051 b = btree_cache_find(bc, k); in __bch2_btree_node_get()
1243 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_get_noiter() local
1255 b = btree_cache_find(bc, k); in bch2_btree_node_get_noiter()
1326 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_prefetch() local
1331 struct btree *b = btree_cache_find(bc, k); in bch2_btree_node_prefetch()
1348 struct btree_cache *bc = &c->btree_cache; in bch2_btree_node_evict() local
1351 b = btree_cache_find(bc, k); in bch2_btree_node_evict()
1379 mutex_lock(&bc->lock); in bch2_btree_node_evict()
1380 bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_evict()
1381 btree_node_data_free(bc, b); in bch2_btree_node_evict()
1382 mutex_unlock(&bc->lock); in bch2_btree_node_evict()
1467 void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc) in bch2_btree_cache_to_text() argument
1469 struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache); in bch2_btree_cache_to_text()
1474 prt_btree_cache_line(out, c, "live:", bc->live[0].nr); in bch2_btree_cache_to_text()
1475 prt_btree_cache_line(out, c, "pinned:", bc->live[1].nr); in bch2_btree_cache_to_text()
1476 prt_btree_cache_line(out, c, "freeable:", bc->nr_freeable); in bch2_btree_cache_to_text()
1477 prt_btree_cache_line(out, c, "dirty:", atomic_long_read(&bc->nr_dirty)); in bch2_btree_cache_to_text()
1478 prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); in bch2_btree_cache_to_text()
1481 for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) in bch2_btree_cache_to_text()
1482 prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]); in bch2_btree_cache_to_text()
1485 prt_printf(out, "freed:\t%zu\n", bc->nr_freed); in bch2_btree_cache_to_text()
1488 for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) in bch2_btree_cache_to_text()
1490 bch2_btree_cache_not_freed_reasons_strs[i], bc->not_freed[i]); in bch2_btree_cache_to_text()