Lines Matching full:ca

73 	for_each_member_device_rcu(c, ca, NULL)  in bch2_reset_alloc_cursors()
74 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor)); in bch2_reset_alloc_cursors()
103 struct bch_dev *ca = ob_dev(c, ob); in __bch2_open_bucket_put() local
126 ca->nr_open_buckets--; in __bch2_open_bucket_put()
179 long bch2_bucket_alloc_new_fs(struct bch_dev *ca) in bch2_bucket_alloc_new_fs() argument
181 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) { in bch2_bucket_alloc_new_fs()
182 u64 b = ca->new_fs_bucket_idx++; in bch2_bucket_alloc_new_fs()
184 if (!is_superblock_bucket(ca, b) && in bch2_bucket_alloc_new_fs()
185 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse))) in bch2_bucket_alloc_new_fs()
209 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, in __try_alloc_bucket() argument
218 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { in __try_alloc_bucket()
223 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { in __try_alloc_bucket()
229 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) { in __try_alloc_bucket()
234 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) { in __try_alloc_bucket()
251 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { in __try_alloc_bucket()
262 ob->sectors_free = ca->mi.bucket_size; in __try_alloc_bucket()
263 ob->dev = ca->dev_idx; in __try_alloc_bucket()
268 ca->nr_open_buckets++; in __try_alloc_bucket()
278 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, in try_alloc_bucket() argument
295 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) { in try_alloc_bucket()
298 ca->mi.first_bucket, ca->mi.nbuckets); in try_alloc_bucket()
306 BTREE_ID_alloc, POS(ca->dev_idx, b), in try_alloc_bucket()
349 ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1, in try_alloc_bucket()
368 ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl); in try_alloc_bucket()
382 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
383 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
387 struct bch_dev *ca, in bch2_bucket_alloc_early() argument
395 u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx); in bch2_bucket_alloc_early()
396 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; in bch2_bucket_alloc_early()
411 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), in bch2_bucket_alloc_early()
415 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) in bch2_bucket_alloc_early()
418 if (ca->new_fs_bucket_idx && in bch2_bucket_alloc_early()
419 is_superblock_bucket(ca, k.k->p.offset)) in bch2_bucket_alloc_early()
423 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_early()
424 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { in bch2_bucket_alloc_early()
426 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) in bch2_bucket_alloc_early()
429 bucket = sector_to_bucket(ca, in bch2_bucket_alloc_early()
430 round_up(bucket_to_sector(ca, bucket) + 1, in bch2_bucket_alloc_early()
431 1ULL << ca->mi.btree_bitmap_shift)); in bch2_bucket_alloc_early()
432 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket)); in bch2_bucket_alloc_early()
455 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl); in bch2_bucket_alloc_early()
480 struct bch_dev *ca, in bch2_bucket_alloc_freelist() argument
488 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; in bch2_bucket_alloc_freelist()
489 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor)); in bch2_bucket_alloc_freelist()
493 BUG_ON(ca->new_fs_bucket_idx); in bch2_bucket_alloc_freelist()
496 POS(ca->dev_idx, alloc_cursor), 0, k, ret) { in bch2_bucket_alloc_freelist()
497 if (k.k->p.inode != ca->dev_idx) in bch2_bucket_alloc_freelist()
507 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_freelist()
508 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { in bch2_bucket_alloc_freelist()
510 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) in bch2_bucket_alloc_freelist()
513 bucket = sector_to_bucket(ca, in bch2_bucket_alloc_freelist()
514 round_up(bucket_to_sector(ca, bucket) + 1, in bch2_bucket_alloc_freelist()
515 1ULL << ca->mi.btree_bitmap_shift)); in bch2_bucket_alloc_freelist()
520 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); in bch2_bucket_alloc_freelist()
525 ob = try_alloc_bucket(trans, ca, watermark, in bch2_bucket_alloc_freelist()
542 if (!ob && alloc_start > ca->mi.first_bucket) { in bch2_bucket_alloc_freelist()
543 alloc_cursor = alloc_start = ca->mi.first_bucket; in bch2_bucket_alloc_freelist()
552 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca, in trace_bucket_alloc2() argument
564 prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx); in trace_bucket_alloc2()
569 prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark)); in trace_bucket_alloc2()
594 * @ca: device to allocate from
603 struct bch_dev *ca, in bch2_bucket_alloc_trans() argument
612 bool freespace = READ_ONCE(ca->mi.freespace_initialized); in bch2_bucket_alloc_trans()
619 bch2_dev_usage_read_fast(ca, usage); in bch2_bucket_alloc_trans()
620 avail = dev_buckets_free(ca, *usage, watermark); in bch2_bucket_alloc_trans()
623 bch2_dev_do_discards(ca); in bch2_bucket_alloc_trans()
628 if (should_invalidate_buckets(ca, *usage)) in bch2_bucket_alloc_trans()
629 bch2_dev_do_invalidates(ca); in bch2_bucket_alloc_trans()
648 ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl) in bch2_bucket_alloc_trans()
649 : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl); in bch2_bucket_alloc_trans()
678 trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob); in bch2_bucket_alloc_trans()
683 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, in bch2_bucket_alloc() argument
692 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark, in bch2_bucket_alloc()
720 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, in bch2_dev_stripe_increment_inlined() argument
724 u64 *v = stripe->next_alloc + ca->dev_idx; in bch2_dev_stripe_increment_inlined()
725 u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal); in bch2_dev_stripe_increment_inlined()
741 void bch2_dev_stripe_increment(struct bch_dev *ca, in bch2_dev_stripe_increment() argument
746 bch2_dev_usage_read_fast(ca, &usage); in bch2_dev_stripe_increment()
747 bch2_dev_stripe_increment_inlined(ca, stripe, &usage); in bch2_dev_stripe_increment()
799 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); in bch2_bucket_alloc_set_trans() local
800 if (!ca) in bch2_bucket_alloc_set_trans()
803 if (!ca->mi.durability && *have_cache) { in bch2_bucket_alloc_set_trans()
804 bch2_dev_put(ca); in bch2_bucket_alloc_set_trans()
808 ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, in bch2_bucket_alloc_set_trans()
811 bch2_dev_stripe_increment_inlined(ca, stripe, &usage); in bch2_bucket_alloc_set_trans()
812 bch2_dev_put(ca); in bch2_bucket_alloc_set_trans()
905 struct bch_dev *ca = ob_dev(c, ob); in want_bucket() local
913 if (!ca->mi.durability && in want_bucket()
974 struct bch_dev *ca = ob_dev(c, ob); in bucket_alloc_set_partial() local
978 bch2_dev_usage_read_fast(ca, &usage); in bucket_alloc_set_partial()
979 avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets; in bucket_alloc_set_partial()
1112 * @ca: if set, we're killing buckets for a particular device
1122 struct bch_dev *ca, bool ec) in should_drop_bucket() argument
1126 } else if (ca) { in should_drop_bucket()
1127 bool drop = ob->dev == ca->dev_idx; in should_drop_bucket()
1142 drop |= ob2->dev == ca->dev_idx; in should_drop_bucket()
1153 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, in bch2_writepoint_stop() argument
1162 if (should_drop_bucket(ob, c, ca, ec)) in bch2_writepoint_stop()
1170 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, in bch2_open_buckets_stop() argument
1177 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]); in bch2_open_buckets_stop()
1179 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point); in bch2_open_buckets_stop()
1180 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); in bch2_open_buckets_stop()
1181 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); in bch2_open_buckets_stop()
1198 if (should_drop_bucket(ob, c, ca, ec)) { in bch2_open_buckets_stop()
1218 bch2_ec_stop_dev(c, ca); in bch2_open_buckets_stop()
1522 struct bch_dev *ca = ob_dev(c, ob); in bch2_ob_ptr() local
1528 .offset = bucket_to_sector(ca, ob->bucket) + in bch2_ob_ptr()
1529 ca->mi.bucket_size - in bch2_ob_ptr()
1598 struct bch_dev *ca = ob_dev(c, ob); in bch2_open_bucket_to_text() local
1608 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size); in bch2_open_bucket_to_text()
1617 struct bch_dev *ca) in bch2_open_buckets_to_text() argument
1627 if (ob->valid && (!ca || ob->dev == ca->dev_idx)) in bch2_open_buckets_to_text()
1734 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) in bch2_dev_alloc_debug_to_text() argument
1736 struct bch_fs *c = ca->fs; in bch2_dev_alloc_debug_to_text()
1737 struct bch_dev_usage stats = bch2_dev_usage_read(ca); in bch2_dev_alloc_debug_to_text()
1745 bch2_dev_usage_to_text(out, ca, &stats); in bch2_dev_alloc_debug_to_text()
1751 prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i)); in bch2_dev_alloc_debug_to_text()
1759 prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets); in bch2_dev_alloc_debug_to_text()
1760 prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats)); in bch2_dev_alloc_debug_to_text()
1776 for_each_online_member(c, ca) { in bch2_print_allocator_stuck()
1777 prt_printf(&buf, "Dev %u:\n", ca->dev_idx); in bch2_print_allocator_stuck()
1779 bch2_dev_alloc_debug_to_text(&buf, ca); in bch2_print_allocator_stuck()