Lines Matching full:ca
358 struct cache *ca = bio->bi_private; in write_super_endio() local
361 bch_count_io_errors(ca, bio->bi_status, 0, in write_super_endio()
363 closure_put(&ca->set->sb_write); in write_super_endio()
376 struct cache *ca = c->cache; in bcache_write_super() local
377 struct bio *bio = &ca->sb_bio; in bcache_write_super()
383 ca->sb.seq++; in bcache_write_super()
385 if (ca->sb.version < version) in bcache_write_super()
386 ca->sb.version = version; in bcache_write_super()
388 bio_init(bio, ca->bdev, ca->sb_bv, 1, 0); in bcache_write_super()
390 bio->bi_private = ca; in bcache_write_super()
393 __write_super(&ca->sb, ca->sb_disk, bio); in bcache_write_super()
503 struct cache *ca = c->cache; in __uuid_write() local
512 size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS; in __uuid_write()
518 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written); in __uuid_write()
583 struct cache *ca = bio->bi_private; in prio_endio() local
585 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities"); in prio_endio()
586 bch_bbio_free(bio, ca->set); in prio_endio()
587 closure_put(&ca->prio); in prio_endio()
590 static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf) in prio_io() argument
592 struct closure *cl = &ca->prio; in prio_io()
593 struct bio *bio = bch_bbio_alloc(ca->set); in prio_io()
597 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
598 bio_set_dev(bio, ca->bdev); in prio_io()
599 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb); in prio_io()
602 bio->bi_private = ca; in prio_io()
604 bch_bio_map(bio, ca->disk_buckets); in prio_io()
606 closure_bio_submit(ca->set, bio, &ca->prio); in prio_io()
610 int bch_prio_write(struct cache *ca, bool wait) in bch_prio_write() argument
617 fifo_used(&ca->free[RESERVE_PRIO]), in bch_prio_write()
618 fifo_used(&ca->free[RESERVE_NONE]), in bch_prio_write()
619 fifo_used(&ca->free_inc)); in bch_prio_write()
627 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) + in bch_prio_write()
628 fifo_used(&ca->free[RESERVE_NONE]); in bch_prio_write()
629 if (prio_buckets(ca) > avail) in bch_prio_write()
635 lockdep_assert_held(&ca->set->bucket_lock); in bch_prio_write()
637 ca->disk_buckets->seq++; in bch_prio_write()
639 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), in bch_prio_write()
640 &ca->meta_sectors_written); in bch_prio_write()
642 for (i = prio_buckets(ca) - 1; i >= 0; --i) { in bch_prio_write()
644 struct prio_set *p = ca->disk_buckets; in bch_prio_write()
646 struct bucket_disk *end = d + prios_per_bucket(ca); in bch_prio_write()
648 for (b = ca->buckets + i * prios_per_bucket(ca); in bch_prio_write()
649 b < ca->buckets + ca->sb.nbuckets && d < end; in bch_prio_write()
655 p->next_bucket = ca->prio_buckets[i + 1]; in bch_prio_write()
656 p->magic = pset_magic(&ca->sb); in bch_prio_write()
657 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8); in bch_prio_write()
659 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait); in bch_prio_write()
662 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
663 prio_io(ca, bucket, REQ_OP_WRITE); in bch_prio_write()
664 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
666 ca->prio_buckets[i] = bucket; in bch_prio_write()
667 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
670 mutex_unlock(&ca->set->bucket_lock); in bch_prio_write()
672 bch_journal_meta(ca->set, &cl); in bch_prio_write()
675 mutex_lock(&ca->set->bucket_lock); in bch_prio_write()
681 for (i = 0; i < prio_buckets(ca); i++) { in bch_prio_write()
682 if (ca->prio_last_buckets[i]) in bch_prio_write()
683 __bch_bucket_free(ca, in bch_prio_write()
684 &ca->buckets[ca->prio_last_buckets[i]]); in bch_prio_write()
686 ca->prio_last_buckets[i] = ca->prio_buckets[i]; in bch_prio_write()
691 static int prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
693 struct prio_set *p = ca->disk_buckets; in prio_read()
694 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; in prio_read()
699 for (b = ca->buckets; in prio_read()
700 b < ca->buckets + ca->sb.nbuckets; in prio_read()
703 ca->prio_buckets[bucket_nr] = bucket; in prio_read()
704 ca->prio_last_buckets[bucket_nr] = bucket; in prio_read()
707 prio_io(ca, bucket, REQ_OP_READ); in prio_read()
710 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) { in prio_read()
715 if (p->magic != pset_magic(&ca->sb)) { in prio_read()
793 struct cache *ca = d->c->cache; in bcache_device_unlink() local
798 bd_unlink_disk_holder(ca->bdev, d->disk); in bcache_device_unlink()
805 struct cache *ca = c->cache; in bcache_device_link() local
808 bd_link_disk_holder(ca->bdev, d->disk); in bcache_device_link()
1669 struct cache *ca; in CLOSURE_CALLBACK() local
1681 ca = c->cache; in CLOSURE_CALLBACK()
1682 if (ca) { in CLOSURE_CALLBACK()
1683 ca->set = NULL; in CLOSURE_CALLBACK()
1685 kobject_put(&ca->kobj); in CLOSURE_CALLBACK()
1710 struct cache *ca = c->cache; in CLOSURE_CALLBACK() local
1736 if (ca->alloc_thread) in CLOSURE_CALLBACK()
1737 kthread_stop(ca->alloc_thread); in CLOSURE_CALLBACK()
1853 struct cache *ca = container_of(sb, struct cache, sb); in bch_cache_set_alloc() local
1877 c->cache = ca; in bch_cache_set_alloc()
1968 struct cache *ca = c->cache; in run_cache_set() local
1975 c->nbuckets = ca->sb.nbuckets; in run_cache_set()
1995 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev])) in run_cache_set()
2040 if (bch_cache_allocator_start(ca)) in run_cache_set()
2063 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, in run_cache_set()
2066 for (j = 0; j < ca->sb.keys; j++) in run_cache_set()
2067 ca->sb.d[j] = ca->sb.first_bucket + j; in run_cache_set()
2072 if (bch_cache_allocator_start(ca)) in run_cache_set()
2076 bch_prio_write(ca, true); in run_cache_set()
2140 static const char *register_cache_set(struct cache *ca) in register_cache_set() argument
2147 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
2154 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
2170 sprintf(buf, "cache%i", ca->sb.nr_this_dev); in register_cache_set()
2171 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
2172 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
2175 kobject_get(&ca->kobj); in register_cache_set()
2176 ca->set = c; in register_cache_set()
2177 ca->set->cache = ca; in register_cache_set()
2191 /* When ca->kobj released */
2194 struct cache *ca = container_of(kobj, struct cache, kobj); in bch_cache_release() local
2197 if (ca->set) { in bch_cache_release()
2198 BUG_ON(ca->set->cache != ca); in bch_cache_release()
2199 ca->set->cache = NULL; in bch_cache_release()
2202 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb))); in bch_cache_release()
2203 kfree(ca->prio_buckets); in bch_cache_release()
2204 vfree(ca->buckets); in bch_cache_release()
2206 free_heap(&ca->heap); in bch_cache_release()
2207 free_fifo(&ca->free_inc); in bch_cache_release()
2210 free_fifo(&ca->free[i]); in bch_cache_release()
2212 if (ca->sb_disk) in bch_cache_release()
2213 put_page(virt_to_page(ca->sb_disk)); in bch_cache_release()
2215 if (ca->bdev_file) in bch_cache_release()
2216 fput(ca->bdev_file); in bch_cache_release()
2218 kfree(ca); in bch_cache_release()
2222 static int cache_alloc(struct cache *ca) in cache_alloc() argument
2231 kobject_init(&ca->kobj, &bch_cache_ktype); in cache_alloc()
2233 bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0); in cache_alloc()
2236 * when ca->sb.njournal_buckets is not zero, journal exists, in cache_alloc()
2244 btree_buckets = ca->sb.njournal_buckets ?: 8; in cache_alloc()
2245 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; in cache_alloc()
2248 err = "ca->sb.nbuckets is too small"; in cache_alloc()
2252 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, in cache_alloc()
2254 err = "ca->free[RESERVE_BTREE] alloc failed"; in cache_alloc()
2258 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), in cache_alloc()
2260 err = "ca->free[RESERVE_PRIO] alloc failed"; in cache_alloc()
2264 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) { in cache_alloc()
2265 err = "ca->free[RESERVE_MOVINGGC] alloc failed"; in cache_alloc()
2269 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) { in cache_alloc()
2270 err = "ca->free[RESERVE_NONE] alloc failed"; in cache_alloc()
2274 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) { in cache_alloc()
2275 err = "ca->free_inc alloc failed"; in cache_alloc()
2279 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) { in cache_alloc()
2280 err = "ca->heap alloc failed"; in cache_alloc()
2284 ca->buckets = vzalloc(array_size(sizeof(struct bucket), in cache_alloc()
2285 ca->sb.nbuckets)); in cache_alloc()
2286 if (!ca->buckets) { in cache_alloc()
2287 err = "ca->buckets alloc failed"; in cache_alloc()
2291 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t), in cache_alloc()
2292 prio_buckets(ca), 2), in cache_alloc()
2294 if (!ca->prio_buckets) { in cache_alloc()
2295 err = "ca->prio_buckets alloc failed"; in cache_alloc()
2299 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb); in cache_alloc()
2300 if (!ca->disk_buckets) { in cache_alloc()
2301 err = "ca->disk_buckets alloc failed"; in cache_alloc()
2305 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); in cache_alloc()
2307 for_each_bucket(b, ca) in cache_alloc()
2312 kfree(ca->prio_buckets); in cache_alloc()
2314 vfree(ca->buckets); in cache_alloc()
2316 free_heap(&ca->heap); in cache_alloc()
2318 free_fifo(&ca->free_inc); in cache_alloc()
2320 free_fifo(&ca->free[RESERVE_NONE]); in cache_alloc()
2322 free_fifo(&ca->free[RESERVE_MOVINGGC]); in cache_alloc()
2324 free_fifo(&ca->free[RESERVE_PRIO]); in cache_alloc()
2326 free_fifo(&ca->free[RESERVE_BTREE]); in cache_alloc()
2331 pr_notice("error %pg: %s\n", ca->bdev, err); in cache_alloc()
2337 struct cache *ca) in register_cache() argument
2342 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); in register_cache()
2343 ca->bdev_file = bdev_file; in register_cache()
2344 ca->bdev = file_bdev(bdev_file); in register_cache()
2345 ca->sb_disk = sb_disk; in register_cache()
2348 ca->discard = CACHE_DISCARD(&ca->sb); in register_cache()
2350 ret = cache_alloc(ca); in register_cache()
2360 * If we failed here, it means ca->kobj is not initialized yet, in register_cache()
2369 if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) { in register_cache()
2377 err = register_cache_set(ca); in register_cache()
2385 pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file)); in register_cache()
2388 kobject_put(&ca->kobj); in register_cache()
2424 struct cache *ca = c->cache; in bch_is_open_cache() local
2426 if (ca->bdev->bd_dev == dev) in bch_is_open_cache()