Lines Matching full:ca
349 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text() local
368 if (ca) in bch2_alloc_to_text()
369 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in bch2_alloc_to_text()
373 bch2_dev_put(ca); in bch2_alloc_to_text()
599 struct bch_dev *ca = NULL; in bch2_alloc_read() local
611 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
616 if (!ca) { in bch2_alloc_read()
623 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read()
624 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read()
626 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read()
632 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
637 if (!ca) { in bch2_alloc_read()
642 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
643 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
647 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
653 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
658 bch2_dev_put(ca); in bch2_alloc_read()
668 struct bch_dev *ca, in bch2_bucket_do_index() argument
715 if (ca->mi.freespace_initialized && in bch2_bucket_do_index()
774 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, in bch2_dev_data_type_accounting_mod() argument
782 .dev_data_type.dev = ca->dev_idx, in bch2_dev_data_type_accounting_mod()
790 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, in bch2_alloc_key_to_dev_counters() argument
798 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
799 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: in bch2_alloc_key_to_dev_counters()
800 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, in bch2_alloc_key_to_dev_counters()
801 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
805 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
808 bch2_bucket_sectors_fragmented(ca, *new) - in bch2_alloc_key_to_dev_counters()
809 bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
817 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, in bch2_alloc_key_to_dev_counters()
838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc() local
839 if (!ca) in bch2_trigger_alloc()
879 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: in bch2_trigger_alloc()
880 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); in bch2_trigger_alloc()
899 old_lru = alloc_lru_idx_fragmentation(*old_a, ca); in bch2_trigger_alloc()
900 new_lru = alloc_lru_idx_fragmentation(*new_a, ca); in bch2_trigger_alloc()
918 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, in bch2_trigger_alloc()
925 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); in bch2_trigger_alloc()
966 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
986 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
990 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) in bch2_trigger_alloc()
991 bch2_dev_do_invalidates(ca); in bch2_trigger_alloc()
999 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1010 bch2_dev_put(ca); in bch2_trigger_alloc()
1065 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) in next_bucket() argument
1067 if (*ca) { in next_bucket()
1068 if (bucket->offset < (*ca)->mi.first_bucket) in next_bucket()
1069 bucket->offset = (*ca)->mi.first_bucket; in next_bucket()
1071 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket()
1074 bch2_dev_put(*ca); in next_bucket()
1075 *ca = NULL; in next_bucket()
1081 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); in next_bucket()
1082 if (*ca) { in next_bucket()
1083 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); in next_bucket()
1084 bch2_dev_get(*ca); in next_bucket()
1088 return *ca != NULL; in next_bucket()
1092 struct bch_dev **ca, struct bkey *hole) in bch2_get_key_or_real_bucket_hole() argument
1101 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1106 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { in bch2_get_key_or_real_bucket_hole()
1107 if (!next_bucket(c, ca, &hole_start)) in bch2_get_key_or_real_bucket_hole()
1114 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1115 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
1138 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key() local
1139 if (fsck_err_on(!ca, in bch2_check_alloc_key()
1144 if (!ca) in bch2_check_alloc_key()
1147 if (!ca->mi.freespace_initialized) in bch2_check_alloc_key()
1250 bch2_dev_put(ca); in bch2_check_alloc_key()
1257 struct bch_dev *ca, in bch2_check_alloc_hole_freespace() argument
1266 if (!ca->mi.freespace_initialized) in bch2_check_alloc_hole_freespace()
1452 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key() local
1453 if (!ca) { in bch2_check_bucket_gens_key()
1461 if (fsck_err_on(end <= ca->mi.first_bucket || in bch2_check_bucket_gens_key()
1462 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key()
1470 for (b = start; b < ca->mi.first_bucket; b++) in bch2_check_bucket_gens_key()
1478 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key()
1498 bch2_dev_put(ca); in bch2_check_bucket_gens_key()
1507 struct bch_dev *ca = NULL; in bch2_check_alloc_info() local
1526 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); in bch2_check_alloc_info()
1547 ret = bch2_check_alloc_hole_freespace(trans, ca, in bch2_check_alloc_info()
1575 bch2_dev_put(ca); in bch2_check_alloc_info()
1576 ca = NULL; in bch2_check_alloc_info()
1647 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref() local
1648 if (!ca) in bch2_check_alloc_to_lru_ref()
1653 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); in bch2_check_alloc_to_lru_ref()
1691 bch2_dev_put(ca); in bch2_check_alloc_to_lru_ref()
1714 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) in discard_in_flight_add() argument
1718 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1719 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_add()
1725 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { in discard_in_flight_add()
1730 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1734 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) in discard_in_flight_remove() argument
1736 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1737 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_remove()
1740 darray_remove_item(&ca->discard_buckets_in_flight, i); in discard_in_flight_remove()
1745 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1757 struct bch_dev *ca, in bch2_discard_one_bucket() argument
1830 if (discard_in_flight_add(ca, iter.pos.offset, true)) in bch2_discard_one_bucket()
1836 ca->mi.discard && !c->opts.nochanges) { in bch2_discard_one_bucket()
1842 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_discard_one_bucket()
1843 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1844 ca->mi.bucket_size, in bch2_discard_one_bucket()
1868 discard_in_flight_remove(ca, iter.pos.offset); in bch2_discard_one_bucket()
1877 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); in bch2_do_discards_work() local
1878 struct bch_fs *c = ca->fs; in bch2_do_discards_work()
1891 POS(ca->dev_idx, 0), in bch2_do_discards_work()
1892 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1893 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s))); in bch2_do_discards_work()
1898 percpu_ref_put(&ca->io_ref); in bch2_do_discards_work()
1902 void bch2_dev_do_discards(struct bch_dev *ca) in bch2_dev_do_discards() argument
1904 struct bch_fs *c = ca->fs; in bch2_dev_do_discards()
1909 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_discards()
1912 if (queue_work(c->write_ref_wq, &ca->discard_work)) in bch2_dev_do_discards()
1915 percpu_ref_put(&ca->io_ref); in bch2_dev_do_discards()
1922 for_each_member_device(c, ca) in bch2_do_discards()
1923 bch2_dev_do_discards(ca); in bch2_do_discards()
1952 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); in bch2_do_discards_fast_work() local
1953 struct bch_fs *c = ca->fs; in bch2_do_discards_fast_work()
1959 mutex_lock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
1960 darray_for_each(ca->discard_buckets_in_flight, i) { in bch2_do_discards_fast_work()
1969 mutex_unlock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
1974 if (ca->mi.discard && !c->opts.nochanges) in bch2_do_discards_fast_work()
1975 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_do_discards_fast_work()
1976 bucket_to_sector(ca, bucket), in bch2_do_discards_fast_work()
1977 ca->mi.bucket_size, in bch2_do_discards_fast_work()
1983 bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket))); in bch2_do_discards_fast_work()
1986 discard_in_flight_remove(ca, bucket); in bch2_do_discards_fast_work()
1992 percpu_ref_put(&ca->io_ref); in bch2_do_discards_fast_work()
1996 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) in bch2_discard_one_bucket_fast() argument
1998 struct bch_fs *c = ca->fs; in bch2_discard_one_bucket_fast()
2000 if (discard_in_flight_add(ca, bucket, false)) in bch2_discard_one_bucket_fast()
2006 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_discard_one_bucket_fast()
2009 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) in bch2_discard_one_bucket_fast()
2012 percpu_ref_put(&ca->io_ref); in bch2_discard_one_bucket_fast()
2100 struct bch_dev *ca, bool *wrapped) in next_lru_key() argument
2104 k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2106 bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); in next_lru_key()
2116 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); in bch2_do_invalidates_work() local
2117 struct bch_fs *c = ca->fs; in bch2_do_invalidates_work()
2126 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); in bch2_do_invalidates_work()
2131 lru_pos(ca->dev_idx, 0, in bch2_do_invalidates_work()
2138 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); in bch2_do_invalidates_work()
2157 percpu_ref_put(&ca->io_ref); in bch2_do_invalidates_work()
2161 void bch2_dev_do_invalidates(struct bch_dev *ca) in bch2_dev_do_invalidates() argument
2163 struct bch_fs *c = ca->fs; in bch2_dev_do_invalidates()
2168 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_invalidates()
2171 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) in bch2_dev_do_invalidates()
2174 percpu_ref_put(&ca->io_ref); in bch2_dev_do_invalidates()
2181 for_each_member_device(c, ca) in bch2_do_invalidates()
2182 bch2_dev_do_invalidates(ca); in bch2_do_invalidates()
2185 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, in bch2_dev_freespace_init() argument
2192 struct bpos end = POS(ca->dev_idx, bucket_end); in bch2_dev_freespace_init()
2198 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init()
2201 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), in bch2_dev_freespace_init()
2204 * Scan the alloc btree for every bucket on @ca, and add buckets to the in bch2_dev_freespace_init()
2209 bch_info(ca, "%s: currently at %llu/%llu", in bch2_dev_freespace_init()
2210 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init()
2234 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: in bch2_dev_freespace_init()
2273 bch_err_msg(ca, ret, "initializing free space"); in bch2_dev_freespace_init()
2278 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_dev_freespace_init()
2295 for_each_member_device(c, ca) { in bch2_fs_freespace_init()
2296 if (ca->mi.freespace_initialized) in bch2_fs_freespace_init()
2304 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init()
2306 bch2_dev_put(ca); in bch2_fs_freespace_init()
2324 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_remove_alloc() argument
2326 struct bpos start = POS(ca->dev_idx, 0); in bch2_dev_remove_alloc()
2327 struct bpos end = POS(ca->dev_idx, U64_MAX); in bch2_dev_remove_alloc()
2334 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: in bch2_dev_remove_alloc()
2347 bch2_dev_usage_remove(c, ca->dev_idx); in bch2_dev_remove_alloc()
2348 bch_err_msg(ca, ret, "removing dev alloc info"); in bch2_dev_remove_alloc()
2398 for_each_online_member(c, ca) { in bch2_recalc_capacity()
2399 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; in bch2_recalc_capacity()
2406 for_each_rw_member(c, ca) { in bch2_recalc_capacity()
2426 dev_reserve += ca->nr_btree_reserve * 2; in bch2_recalc_capacity()
2427 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ in bch2_recalc_capacity()
2433 dev_reserve *= ca->mi.bucket_size; in bch2_recalc_capacity()
2435 capacity += bucket_to_sector(ca, ca->mi.nbuckets - in bch2_recalc_capacity()
2436 ca->mi.first_bucket); in bch2_recalc_capacity()
2441 ca->mi.bucket_size); in bch2_recalc_capacity()
2465 for_each_rw_member(c, ca) in bch2_min_rw_member_capacity()
2466 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); in bch2_min_rw_member_capacity()
2470 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_has_open_write_point() argument
2480 ob->dev == ca->dev_idx) in bch2_dev_has_open_write_point()
2489 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_remove() argument
2496 clear_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_remove()
2505 bch2_open_buckets_stop(c, ca, false); in bch2_dev_allocator_remove()
2522 !bch2_dev_has_open_write_point(c, ca)); in bch2_dev_allocator_remove()
2526 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_add() argument
2531 if (ca->mi.data_allowed & (1 << i)) in bch2_dev_allocator_add()
2532 set_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_add()
2537 void bch2_dev_allocator_background_exit(struct bch_dev *ca) in bch2_dev_allocator_background_exit() argument
2539 darray_exit(&ca->discard_buckets_in_flight); in bch2_dev_allocator_background_exit()
2542 void bch2_dev_allocator_background_init(struct bch_dev *ca) in bch2_dev_allocator_background_init() argument
2544 mutex_init(&ca->discard_buckets_in_flight_lock); in bch2_dev_allocator_background_init()
2545 INIT_WORK(&ca->discard_work, bch2_do_discards_work); in bch2_dev_allocator_background_init()
2546 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); in bch2_dev_allocator_background_init()
2547 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); in bch2_dev_allocator_background_init()