Lines Matching refs:b

37 	if (!c->btree_roots_known[0].b)  in bch2_recalc_btree_reserve()
43 if (r->b) in bch2_recalc_btree_reserve()
44 reserve += min_t(unsigned, 1, r->b->c.level) * 8; in bch2_recalc_btree_reserve()
60 static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) in btree_node_to_freedlist() argument
62 BUG_ON(!list_empty(&b->list)); in btree_node_to_freedlist()
64 if (b->c.lock.readers) in btree_node_to_freedlist()
65 list_add(&b->list, &bc->freed_pcpu); in btree_node_to_freedlist()
67 list_add(&b->list, &bc->freed_nonpcpu); in btree_node_to_freedlist()
70 static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_to_freelist() argument
72 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_to_freelist()
73 BUG_ON(!b->data); in __bch2_btree_node_to_freelist()
76 list_add(&b->list, &bc->freeable); in __bch2_btree_node_to_freelist()
79 void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) in bch2_btree_node_to_freelist() argument
84 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_to_freelist()
87 six_unlock_write(&b->c.lock); in bch2_btree_node_to_freelist()
88 six_unlock_intent(&b->c.lock); in bch2_btree_node_to_freelist()
91 static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) in __btree_node_data_free() argument
93 BUG_ON(!list_empty(&b->list)); in __btree_node_data_free()
94 BUG_ON(btree_node_hashed(b)); in __btree_node_data_free()
101 if (b->data) in __btree_node_data_free()
102 mm_account_reclaimed_pages(btree_buf_bytes(b) / PAGE_SIZE); in __btree_node_data_free()
103 if (b->aux_data) in __btree_node_data_free()
104 mm_account_reclaimed_pages(btree_aux_data_bytes(b) / PAGE_SIZE); in __btree_node_data_free()
106 EBUG_ON(btree_node_write_in_flight(b)); in __btree_node_data_free()
108 clear_btree_node_just_written(b); in __btree_node_data_free()
110 kvfree(b->data); in __btree_node_data_free()
111 b->data = NULL; in __btree_node_data_free()
113 kvfree(b->aux_data); in __btree_node_data_free()
115 munmap(b->aux_data, btree_aux_data_bytes(b)); in __btree_node_data_free()
117 b->aux_data = NULL; in __btree_node_data_free()
119 btree_node_to_freedlist(bc, b); in __btree_node_data_free()
122 static void btree_node_data_free(struct btree_cache *bc, struct btree *b) in btree_node_data_free() argument
124 BUG_ON(list_empty(&b->list)); in btree_node_data_free()
125 list_del_init(&b->list); in btree_node_data_free()
127 __btree_node_data_free(bc, b); in btree_node_data_free()
133 const struct btree *b = obj; in bch2_btree_cache_cmp_fn() local
136 return b->hash_val == *v ? 0 : 1; in bch2_btree_cache_cmp_fn()
147 static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) in btree_node_data_alloc() argument
149 BUG_ON(b->data || b->aux_data); in btree_node_data_alloc()
153 b->data = kvmalloc(btree_buf_bytes(b), gfp); in btree_node_data_alloc()
154 if (!b->data) in btree_node_data_alloc()
157 b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp); in btree_node_data_alloc()
159 b->aux_data = mmap(NULL, btree_aux_data_bytes(b), in btree_node_data_alloc()
162 if (b->aux_data == MAP_FAILED) in btree_node_data_alloc()
163 b->aux_data = NULL; in btree_node_data_alloc()
165 if (!b->aux_data) { in btree_node_data_alloc()
166 kvfree(b->data); in btree_node_data_alloc()
167 b->data = NULL; in btree_node_data_alloc()
176 struct btree *b; in __btree_node_mem_alloc() local
178 b = kzalloc(sizeof(struct btree), gfp); in __btree_node_mem_alloc()
179 if (!b) in __btree_node_mem_alloc()
182 bkey_btree_ptr_init(&b->key); in __btree_node_mem_alloc()
183 INIT_LIST_HEAD(&b->list); in __btree_node_mem_alloc()
184 INIT_LIST_HEAD(&b->write_blocked); in __btree_node_mem_alloc()
185 b->byte_order = ilog2(c->opts.btree_node_size); in __btree_node_mem_alloc()
186 return b; in __btree_node_mem_alloc()
192 struct btree *b; in __bch2_btree_node_mem_alloc() local
194 b = __btree_node_mem_alloc(c, GFP_KERNEL); in __bch2_btree_node_mem_alloc()
195 if (!b) in __bch2_btree_node_mem_alloc()
198 if (btree_node_data_alloc(c, b, GFP_KERNEL)) { in __bch2_btree_node_mem_alloc()
199 kfree(b); in __bch2_btree_node_mem_alloc()
203 bch2_btree_lock_init(&b->c, 0); in __bch2_btree_node_mem_alloc()
205 __bch2_btree_node_to_freelist(bc, b); in __bch2_btree_node_mem_alloc()
206 return b; in __bch2_btree_node_mem_alloc()
209 static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) in __btree_node_pinned() argument
211 struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); in __btree_node_pinned()
213 u64 mask = bc->pinned_nodes_mask[!!b->c.level]; in __btree_node_pinned()
215 return ((mask & BIT_ULL(b->c.btree_id)) && in __btree_node_pinned()
220 void bch2_node_pin(struct bch_fs *c, struct btree *b) in bch2_node_pin() argument
225 BUG_ON(!__btree_node_pinned(bc, b)); in bch2_node_pin()
226 if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { in bch2_node_pin()
227 set_btree_node_pinned(b); in bch2_node_pin()
228 list_move(&b->list, &bc->live[1].list); in bch2_node_pin()
238 struct btree *b, *n; in bch2_btree_cache_unpin() local
244 list_for_each_entry_safe(b, n, &bc->live[1].list, list) { in bch2_btree_cache_unpin()
245 clear_btree_node_pinned(b); in bch2_btree_cache_unpin()
246 list_move(&b->list, &bc->live[0].list); in bch2_btree_cache_unpin()
256 void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_remove() argument
260 int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); in __bch2_btree_node_hash_remove()
264 b->hash_val = 0; in __bch2_btree_node_hash_remove()
266 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_remove()
267 --bc->nr_by_btree[b->c.btree_id]; in __bch2_btree_node_hash_remove()
268 --bc->live[btree_node_pinned(b)].nr; in __bch2_btree_node_hash_remove()
269 list_del_init(&b->list); in __bch2_btree_node_hash_remove()
272 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) in bch2_btree_node_hash_remove() argument
274 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_hash_remove()
275 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_hash_remove()
278 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) in __bch2_btree_node_hash_insert() argument
280 BUG_ON(!list_empty(&b->list)); in __bch2_btree_node_hash_insert()
281 BUG_ON(b->hash_val); in __bch2_btree_node_hash_insert()
283 b->hash_val = btree_ptr_hash_val(&b->key); in __bch2_btree_node_hash_insert()
284 int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, in __bch2_btree_node_hash_insert()
289 if (b->c.btree_id < BTREE_ID_NR) in __bch2_btree_node_hash_insert()
290 bc->nr_by_btree[b->c.btree_id]++; in __bch2_btree_node_hash_insert()
292 bool p = __btree_node_pinned(bc, b); in __bch2_btree_node_hash_insert()
293 mod_bit(BTREE_NODE_pinned, &b->flags, p); in __bch2_btree_node_hash_insert()
295 list_add_tail(&b->list, &bc->live[p].list); in __bch2_btree_node_hash_insert()
300 int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, in bch2_btree_node_hash_insert() argument
303 b->c.level = level; in bch2_btree_node_hash_insert()
304 b->c.btree_id = id; in bch2_btree_node_hash_insert()
307 int ret = __bch2_btree_node_hash_insert(bc, b); in bch2_btree_node_hash_insert()
318 struct btree *b; in bch2_btree_node_update_key_early() local
325 b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true); in bch2_btree_node_update_key_early()
326 if (!IS_ERR_OR_NULL(b)) { in bch2_btree_node_update_key_early()
329 bch2_btree_node_hash_remove(&c->btree_cache, b); in bch2_btree_node_update_key_early()
331 bkey_copy(&b->key, new); in bch2_btree_node_update_key_early()
332 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); in bch2_btree_node_update_key_early()
336 six_unlock_read(&b->c.lock); in bch2_btree_node_update_key_early()
355 static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counte… in __btree_node_reclaim() argument
362 if (b->flags & ((1U << BTREE_NODE_dirty)| in __btree_node_reclaim()
366 if (btree_node_dirty(b)) in __btree_node_reclaim()
368 else if (btree_node_read_in_flight(b)) in __btree_node_reclaim()
370 else if (btree_node_write_in_flight(b)) in __btree_node_reclaim()
376 bch2_btree_node_wait_on_read(b); in __btree_node_reclaim()
377 bch2_btree_node_wait_on_write(b); in __btree_node_reclaim()
380 if (!six_trylock_intent(&b->c.lock)) { in __btree_node_reclaim()
385 if (!six_trylock_write(&b->c.lock)) { in __btree_node_reclaim()
391 if (b->flags & ((1U << BTREE_NODE_read_in_flight)| in __btree_node_reclaim()
394 if (btree_node_read_in_flight(b)) in __btree_node_reclaim()
396 else if (btree_node_write_in_flight(b)) in __btree_node_reclaim()
400 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
401 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
405 if (btree_node_noevict(b)) { in __btree_node_reclaim()
409 if (btree_node_write_blocked(b)) { in __btree_node_reclaim()
413 if (btree_node_will_make_reachable(b)) { in __btree_node_reclaim()
418 if (btree_node_dirty(b)) { in __btree_node_reclaim()
430 bch2_btree_node_write(c, b, SIX_LOCK_intent, in __btree_node_reclaim()
433 __bch2_btree_node_write(c, b, in __btree_node_reclaim()
436 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
437 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
441 if (b->hash_val && !ret) in __btree_node_reclaim()
442 trace_and_count(c, btree_cache_reap, c, b); in __btree_node_reclaim()
445 six_unlock_write(&b->c.lock); in __btree_node_reclaim()
447 six_unlock_intent(&b->c.lock); in __btree_node_reclaim()
452 static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter) in btree_node_reclaim() argument
454 return __btree_node_reclaim(c, b, false, shrinker_counter); in btree_node_reclaim()
457 static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b) in btree_node_write_and_reclaim() argument
459 return __btree_node_reclaim(c, b, true, false); in btree_node_write_and_reclaim()
468 struct btree *b, *t; in bch2_btree_cache_scan() local
494 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_btree_cache_scan()
507 if (!btree_node_reclaim(c, b, true)) { in bch2_btree_cache_scan()
508 btree_node_data_free(bc, b); in bch2_btree_cache_scan()
509 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
510 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
516 list_for_each_entry_safe(b, t, &list->list, list) { in bch2_btree_cache_scan()
519 if (btree_node_accessed(b)) { in bch2_btree_cache_scan()
520 clear_btree_node_accessed(b); in bch2_btree_cache_scan()
523 } else if (!btree_node_reclaim(c, b, true)) { in bch2_btree_cache_scan()
524 __bch2_btree_node_hash_remove(bc, b); in bch2_btree_cache_scan()
525 __btree_node_data_free(bc, b); in bch2_btree_cache_scan()
530 six_unlock_write(&b->c.lock); in bch2_btree_cache_scan()
531 six_unlock_intent(&b->c.lock); in bch2_btree_cache_scan()
536 btree_node_dirty(b) && in bch2_btree_cache_scan()
537 !btree_node_will_make_reachable(b) && in bch2_btree_cache_scan()
538 !btree_node_write_blocked(b) && in bch2_btree_cache_scan()
539 six_trylock_read(&b->c.lock)) { in bch2_btree_cache_scan()
540 list_move(&list->list, &b->list); in bch2_btree_cache_scan()
542 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); in bch2_btree_cache_scan()
543 six_unlock_read(&b->c.lock); in bch2_btree_cache_scan()
579 struct btree *b, *t; in bch2_fs_btree_cache_exit() local
597 if (r->b) in bch2_fs_btree_cache_exit()
598 list_add(&r->b->list, &bc->live[0].list); in bch2_fs_btree_cache_exit()
601 list_for_each_entry_safe(b, t, &bc->live[1].list, list) in bch2_fs_btree_cache_exit()
602 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
603 list_for_each_entry_safe(b, t, &bc->live[0].list, list) in bch2_fs_btree_cache_exit()
604 bch2_btree_node_hash_remove(bc, b); in bch2_fs_btree_cache_exit()
606 list_for_each_entry_safe(b, t, &bc->freeable, list) { in bch2_fs_btree_cache_exit()
607 BUG_ON(btree_node_read_in_flight(b) || in bch2_fs_btree_cache_exit()
608 btree_node_write_in_flight(b)); in bch2_fs_btree_cache_exit()
610 btree_node_data_free(bc, b); in bch2_fs_btree_cache_exit()
618 list_for_each_entry_safe(b, t, &bc->freed_nonpcpu, list) { in bch2_fs_btree_cache_exit()
619 list_del(&b->list); in bch2_fs_btree_cache_exit()
620 six_lock_exit(&b->c.lock); in bch2_fs_btree_cache_exit()
621 kfree(b); in bch2_fs_btree_cache_exit()
751 struct btree *b; in btree_node_cannibalize() local
754 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
755 if (!btree_node_reclaim(c, b, false)) in btree_node_cannibalize()
756 return b; in btree_node_cannibalize()
760 list_for_each_entry_reverse(b, &bc->live[i].list, list) in btree_node_cannibalize()
761 if (!btree_node_write_and_reclaim(c, b)) in btree_node_cannibalize()
762 return b; in btree_node_cannibalize()
780 struct btree *b, *b2; in bch2_btree_node_mem_alloc() local
789 list_for_each_entry(b, freed, list) in bch2_btree_node_mem_alloc()
790 if (!btree_node_reclaim(c, b, false)) { in bch2_btree_node_mem_alloc()
791 list_del_init(&b->list); in bch2_btree_node_mem_alloc()
795 b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); in bch2_btree_node_mem_alloc()
796 if (!b) { in bch2_btree_node_mem_alloc()
799 b = __btree_node_mem_alloc(c, GFP_KERNEL); in bch2_btree_node_mem_alloc()
800 if (!b) in bch2_btree_node_mem_alloc()
805 bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0); in bch2_btree_node_mem_alloc()
807 BUG_ON(!six_trylock_intent(&b->c.lock)); in bch2_btree_node_mem_alloc()
808 BUG_ON(!six_trylock_write(&b->c.lock)); in bch2_btree_node_mem_alloc()
817 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
818 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
832 if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { in bch2_btree_node_mem_alloc()
834 if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) in bch2_btree_node_mem_alloc()
839 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
840 BUG_ON(btree_node_hashed(b)); in bch2_btree_node_mem_alloc()
841 BUG_ON(btree_node_dirty(b)); in bch2_btree_node_mem_alloc()
842 BUG_ON(btree_node_write_in_flight(b)); in bch2_btree_node_mem_alloc()
844 b->flags = 0; in bch2_btree_node_mem_alloc()
845 b->written = 0; in bch2_btree_node_mem_alloc()
846 b->nsets = 0; in bch2_btree_node_mem_alloc()
847 b->sib_u64s[0] = 0; in bch2_btree_node_mem_alloc()
848 b->sib_u64s[1] = 0; in bch2_btree_node_mem_alloc()
849 b->whiteout_u64s = 0; in bch2_btree_node_mem_alloc()
850 bch2_btree_keys_init(b); in bch2_btree_node_mem_alloc()
851 set_btree_node_accessed(b); in bch2_btree_node_mem_alloc()
858 bch2_btree_node_to_freelist(c, b); in bch2_btree_node_mem_alloc()
862 return b; in bch2_btree_node_mem_alloc()
872 if (b) { in bch2_btree_node_mem_alloc()
873 swap(b->data, b2->data); in bch2_btree_node_mem_alloc()
874 swap(b->aux_data, b2->aux_data); in bch2_btree_node_mem_alloc()
879 b = b2; in bch2_btree_node_mem_alloc()
882 BUG_ON(!list_empty(&b->list)); in bch2_btree_node_mem_alloc()
904 struct btree *b; in bch2_btree_node_fill() local
939 b = bch2_btree_node_mem_alloc(trans, level != 0); in bch2_btree_node_fill()
941 if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) { in bch2_btree_node_fill()
943 return b; in bch2_btree_node_fill()
950 if (IS_ERR(b)) in bch2_btree_node_fill()
951 return b; in bch2_btree_node_fill()
953 bkey_copy(&b->key, k); in bch2_btree_node_fill()
954 if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { in bch2_btree_node_fill()
958 b->hash_val = 0; in bch2_btree_node_fill()
961 __bch2_btree_node_to_freelist(bc, b); in bch2_btree_node_fill()
964 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
965 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
969 set_btree_node_read_in_flight(b); in bch2_btree_node_fill()
970 six_unlock_write(&b->c.lock); in bch2_btree_node_fill()
973 u32 seq = six_lock_seq(&b->c.lock); in bch2_btree_node_fill()
976 six_unlock_intent(&b->c.lock); in bch2_btree_node_fill()
979 bch2_btree_node_read(trans, b, sync); in bch2_btree_node_fill()
988 if (!six_relock_type(&b->c.lock, lock_type, seq)) in bch2_btree_node_fill()
989 b = NULL; in bch2_btree_node_fill()
991 bch2_btree_node_read(trans, b, sync); in bch2_btree_node_fill()
993 six_lock_downgrade(&b->c.lock); in bch2_btree_node_fill()
996 return b; in bch2_btree_node_fill()
999 static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) in btree_bad_header() argument
1010 bch2_btree_id_str(b->c.btree_id), b->c.level); in btree_bad_header()
1011 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); in btree_bad_header()
1015 bch2_btree_id_str(BTREE_NODE_ID(b->data)), in btree_bad_header()
1016 BTREE_NODE_LEVEL(b->data)); in btree_bad_header()
1017 bch2_bpos_to_text(&buf, b->data->min_key); in btree_bad_header()
1020 bch2_bpos_to_text(&buf, b->data->max_key); in btree_bad_header()
1027 static inline void btree_check_header(struct bch_fs *c, struct btree *b) in btree_check_header() argument
1029 if (b->c.btree_id != BTREE_NODE_ID(b->data) || in btree_check_header()
1030 b->c.level != BTREE_NODE_LEVEL(b->data) || in btree_check_header()
1031 !bpos_eq(b->data->max_key, b->key.k.p) || in btree_check_header()
1032 (b->key.k.type == KEY_TYPE_btree_ptr_v2 && in btree_check_header()
1033 !bpos_eq(b->data->min_key, in btree_check_header()
1034 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) in btree_check_header()
1035 btree_bad_header(c, b); in btree_check_header()
1045 struct btree *b; in __bch2_btree_node_get() local
1051 b = btree_cache_find(bc, k); in __bch2_btree_node_get()
1052 if (unlikely(!b)) { in __bch2_btree_node_get()
1058 b = bch2_btree_node_fill(trans, path, k, path->btree_id, in __bch2_btree_node_get()
1063 if (!b) in __bch2_btree_node_get()
1066 if (IS_ERR(b)) in __bch2_btree_node_get()
1067 return b; in __bch2_btree_node_get()
1072 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in __bch2_btree_node_get()
1078 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in __bch2_btree_node_get()
1079 b->c.level != level || in __bch2_btree_node_get()
1081 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1090 if (!btree_node_accessed(b)) in __bch2_btree_node_get()
1091 set_btree_node_accessed(b); in __bch2_btree_node_get()
1094 if (unlikely(btree_node_read_in_flight(b))) { in __bch2_btree_node_get()
1095 u32 seq = six_lock_seq(&b->c.lock); in __bch2_btree_node_get()
1097 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1101 bch2_btree_node_wait_on_read(b); in __bch2_btree_node_get()
1111 if (!six_relock_type(&b->c.lock, lock_type, seq)) in __bch2_btree_node_get()
1119 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1124 prefetch(b->aux_data); in __bch2_btree_node_get()
1126 for_each_bset(b, t) { in __bch2_btree_node_get()
1127 void *p = (u64 *) b->aux_data + t->aux_data_offset; in __bch2_btree_node_get()
1134 if (unlikely(btree_node_read_error(b))) { in __bch2_btree_node_get()
1135 six_unlock_type(&b->c.lock, lock_type); in __bch2_btree_node_get()
1139 EBUG_ON(b->c.btree_id != path->btree_id); in __bch2_btree_node_get()
1140 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in __bch2_btree_node_get()
1141 btree_check_header(c, b); in __bch2_btree_node_get()
1143 return b; in __bch2_btree_node_get()
1168 struct btree *b; in bch2_btree_node_get() local
1173 b = btree_node_mem_ptr(k); in bch2_btree_node_get()
1181 !b || in bch2_btree_node_get()
1182 b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_get()
1188 ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); in bch2_btree_node_get()
1194 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get()
1195 b->c.level != level || in bch2_btree_node_get()
1197 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1205 if (unlikely(btree_node_read_in_flight(b))) { in bch2_btree_node_get()
1206 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1210 prefetch(b->aux_data); in bch2_btree_node_get()
1212 for_each_bset(b, t) { in bch2_btree_node_get()
1213 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get()
1221 if (!btree_node_accessed(b)) in bch2_btree_node_get()
1222 set_btree_node_accessed(b); in bch2_btree_node_get()
1224 if (unlikely(btree_node_read_error(b))) { in bch2_btree_node_get()
1225 six_unlock_type(&b->c.lock, lock_type); in bch2_btree_node_get()
1229 EBUG_ON(b->c.btree_id != path->btree_id); in bch2_btree_node_get()
1230 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get()
1231 btree_check_header(c, b); in bch2_btree_node_get()
1233 return b; in bch2_btree_node_get()
1244 struct btree *b; in bch2_btree_node_get_noiter() local
1250 b = btree_node_mem_ptr(k); in bch2_btree_node_get_noiter()
1251 if (b) in bch2_btree_node_get_noiter()
1255 b = btree_cache_find(bc, k); in bch2_btree_node_get_noiter()
1256 if (unlikely(!b)) { in bch2_btree_node_get_noiter()
1260 b = bch2_btree_node_fill(trans, NULL, k, btree_id, in bch2_btree_node_get_noiter()
1264 if (!b) in bch2_btree_node_get_noiter()
1267 if (IS_ERR(b) && in bch2_btree_node_get_noiter()
1271 if (IS_ERR(b)) in bch2_btree_node_get_noiter()
1275 ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_); in bch2_btree_node_get_noiter()
1281 if (unlikely(b->hash_val != btree_ptr_hash_val(k) || in bch2_btree_node_get_noiter()
1282 b->c.btree_id != btree_id || in bch2_btree_node_get_noiter()
1283 b->c.level != level)) { in bch2_btree_node_get_noiter()
1284 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1290 __bch2_btree_node_wait_on_read(b); in bch2_btree_node_get_noiter()
1292 prefetch(b->aux_data); in bch2_btree_node_get_noiter()
1294 for_each_bset(b, t) { in bch2_btree_node_get_noiter()
1295 void *p = (u64 *) b->aux_data + t->aux_data_offset; in bch2_btree_node_get_noiter()
1303 if (!btree_node_accessed(b)) in bch2_btree_node_get_noiter()
1304 set_btree_node_accessed(b); in bch2_btree_node_get_noiter()
1306 if (unlikely(btree_node_read_error(b))) { in bch2_btree_node_get_noiter()
1307 six_unlock_read(&b->c.lock); in bch2_btree_node_get_noiter()
1308 b = ERR_PTR(-BCH_ERR_btree_node_read_error); in bch2_btree_node_get_noiter()
1312 EBUG_ON(b->c.btree_id != btree_id); in bch2_btree_node_get_noiter()
1313 EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); in bch2_btree_node_get_noiter()
1314 btree_check_header(c, b); in bch2_btree_node_get_noiter()
1317 return b; in bch2_btree_node_get_noiter()
1331 struct btree *b = btree_cache_find(bc, k); in bch2_btree_node_prefetch() local
1332 if (b) in bch2_btree_node_prefetch()
1335 b = bch2_btree_node_fill(trans, path, k, btree_id, in bch2_btree_node_prefetch()
1337 int ret = PTR_ERR_OR_ZERO(b); in bch2_btree_node_prefetch()
1340 if (b) in bch2_btree_node_prefetch()
1341 six_unlock_read(&b->c.lock); in bch2_btree_node_prefetch()
1349 struct btree *b; in bch2_btree_node_evict() local
1351 b = btree_cache_find(bc, k); in bch2_btree_node_evict()
1352 if (!b) in bch2_btree_node_evict()
1355 BUG_ON(b == btree_node_root(trans->c, b)); in bch2_btree_node_evict()
1362 __bch2_btree_node_wait_on_read(b); in bch2_btree_node_evict()
1363 __bch2_btree_node_wait_on_write(b); in bch2_btree_node_evict()
1365 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); in bch2_btree_node_evict()
1366 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); in bch2_btree_node_evict()
1367 if (unlikely(b->hash_val != btree_ptr_hash_val(k))) in bch2_btree_node_evict()
1370 if (btree_node_dirty(b)) { in bch2_btree_node_evict()
1371 __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); in bch2_btree_node_evict()
1372 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1373 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1377 BUG_ON(btree_node_dirty(b)); in bch2_btree_node_evict()
1380 bch2_btree_node_hash_remove(bc, b); in bch2_btree_node_evict()
1381 btree_node_data_free(bc, b); in bch2_btree_node_evict()
1384 six_unlock_write(&b->c.lock); in bch2_btree_node_evict()
1385 six_unlock_intent(&b->c.lock); in bch2_btree_node_evict()
1401 void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) in bch2_btree_pos_to_text() argument
1404 bch2_btree_id_str(b->c.btree_id), in bch2_btree_pos_to_text()
1405 b->c.level, in bch2_btree_pos_to_text()
1406 bch2_btree_id_root(c, b->c.btree_id)->level); in bch2_btree_pos_to_text()
1407 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); in bch2_btree_pos_to_text()
1410 void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) in bch2_btree_node_to_text() argument
1416 bch2_btree_keys_stats(b, &stats); in bch2_btree_node_to_text()
1418 prt_printf(out, "l %u ", b->c.level); in bch2_btree_node_to_text()
1419 bch2_bpos_to_text(out, b->data->min_key); in bch2_btree_node_to_text()
1421 bch2_bpos_to_text(out, b->data->max_key); in bch2_btree_node_to_text()
1424 bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key)); in bch2_btree_node_to_text()
1429 bch2_bkey_format_to_text(out, &b->format); in bch2_btree_node_to_text()
1439 b->unpack_fn_len, in bch2_btree_node_to_text()
1440 b->nr.live_u64s * sizeof(u64), in bch2_btree_node_to_text()
1441 btree_buf_bytes(b) - sizeof(struct btree_node), in bch2_btree_node_to_text()
1442 b->nr.live_u64s * 100 / btree_max_u64s(c), in bch2_btree_node_to_text()
1443 b->sib_u64s[0], in bch2_btree_node_to_text()
1444 b->sib_u64s[1], in bch2_btree_node_to_text()
1446 b->nr.packed_keys, in bch2_btree_node_to_text()
1447 b->nr.unpacked_keys, in bch2_btree_node_to_text()