Lines Matching refs:bs

112 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)  in bs_bio_slab_size()  argument
114 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
117 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) in bio_find_or_create_slab() argument
119 unsigned int size = bs_bio_slab_size(bs); in bio_find_or_create_slab()
135 static void bio_put_slab(struct bio_set *bs) in bio_put_slab() argument
138 unsigned int slab_size = bs_bio_slab_size(bs); in bio_put_slab()
146 WARN_ON_ONCE(bslab->slab != bs->bio_slab); in bio_put_slab()
230 struct bio_set *bs = bio->bi_pool; in bio_free() local
233 WARN_ON_ONCE(!bs); in bio_free()
236 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
237 mempool_free(p - bs->front_pad, &bs->bio_pool); in bio_free()
375 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); in bio_alloc_rescue() local
379 spin_lock(&bs->rescue_lock); in bio_alloc_rescue()
380 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
381 spin_unlock(&bs->rescue_lock); in bio_alloc_rescue()
390 static void punt_bios_to_rescuer(struct bio_set *bs) in punt_bios_to_rescuer() argument
395 if (WARN_ON_ONCE(!bs->rescue_workqueue)) in punt_bios_to_rescuer()
412 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
417 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
420 spin_lock(&bs->rescue_lock); in punt_bios_to_rescuer()
421 bio_list_merge(&bs->rescue_list, &punt); in punt_bios_to_rescuer()
422 spin_unlock(&bs->rescue_lock); in punt_bios_to_rescuer()
424 queue_work(bs->rescue_workqueue, &bs->rescue_work); in punt_bios_to_rescuer()
445 struct bio_set *bs) in bio_alloc_percpu_cache() argument
450 cache = per_cpu_ptr(bs->cache, get_cpu()); in bio_alloc_percpu_cache()
465 bio->bi_pool = bs; in bio_alloc_percpu_cache()
505 struct bio_set *bs) in bio_alloc_bioset() argument
512 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) in bio_alloc_bioset()
516 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { in bio_alloc_bioset()
518 gfp_mask, bs); in bio_alloc_bioset()
551 bs->rescue_workqueue) in bio_alloc_bioset()
554 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
556 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
558 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
562 if (!mempool_is_saturated(&bs->bio_pool)) in bio_alloc_bioset()
565 bio = p + bs->front_pad; in bio_alloc_bioset()
569 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
571 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
573 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
585 bio->bi_pool = bs; in bio_alloc_bioset()
589 mempool_free(p, &bs->bio_pool); in bio_alloc_bioset()
742 struct bio_set *bs; in bio_cpu_dead() local
744 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); in bio_cpu_dead()
745 if (bs->cache) { in bio_cpu_dead()
746 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); in bio_cpu_dead()
753 static void bio_alloc_cache_destroy(struct bio_set *bs) in bio_alloc_cache_destroy() argument
757 if (!bs->cache) in bio_alloc_cache_destroy()
760 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bio_alloc_cache_destroy()
764 cache = per_cpu_ptr(bs->cache, cpu); in bio_alloc_cache_destroy()
767 free_percpu(bs->cache); in bio_alloc_cache_destroy()
768 bs->cache = NULL; in bio_alloc_cache_destroy()
860 gfp_t gfp, struct bio_set *bs) in bio_alloc_clone() argument
864 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
1727 gfp_t gfp, struct bio_set *bs) in bio_split() argument
1738 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1800 void bioset_exit(struct bio_set *bs) in bioset_exit() argument
1802 bio_alloc_cache_destroy(bs); in bioset_exit()
1803 if (bs->rescue_workqueue) in bioset_exit()
1804 destroy_workqueue(bs->rescue_workqueue); in bioset_exit()
1805 bs->rescue_workqueue = NULL; in bioset_exit()
1807 mempool_exit(&bs->bio_pool); in bioset_exit()
1808 mempool_exit(&bs->bvec_pool); in bioset_exit()
1810 bioset_integrity_free(bs); in bioset_exit()
1811 if (bs->bio_slab) in bioset_exit()
1812 bio_put_slab(bs); in bioset_exit()
1813 bs->bio_slab = NULL; in bioset_exit()
1838 int bioset_init(struct bio_set *bs, in bioset_init() argument
1843 bs->front_pad = front_pad; in bioset_init()
1845 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); in bioset_init()
1847 bs->back_pad = 0; in bioset_init()
1849 spin_lock_init(&bs->rescue_lock); in bioset_init()
1850 bio_list_init(&bs->rescue_list); in bioset_init()
1851 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); in bioset_init()
1853 bs->bio_slab = bio_find_or_create_slab(bs); in bioset_init()
1854 if (!bs->bio_slab) in bioset_init()
1857 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) in bioset_init()
1861 biovec_init_pool(&bs->bvec_pool, pool_size)) in bioset_init()
1865 bs->rescue_workqueue = alloc_workqueue("bioset", in bioset_init()
1867 if (!bs->rescue_workqueue) in bioset_init()
1871 bs->cache = alloc_percpu(struct bio_alloc_cache); in bioset_init()
1872 if (!bs->cache) in bioset_init()
1874 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bioset_init()
1879 bioset_exit(bs); in bioset_init()