Lines Matching full:bc
411 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) in cache_read_lock() argument
413 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_lock()
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_lock()
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_lock()
419 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
421 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_unlock()
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_unlock()
424 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_unlock()
427 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
429 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_lock()
430 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_lock()
432 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_lock()
435 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
437 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_unlock()
438 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_unlock()
440 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_unlock()
535 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) in cache_init() argument
539 bc->num_locks = num_locks; in cache_init()
540 bc->no_sleep = no_sleep; in cache_init()
542 for (i = 0; i < bc->num_locks; i++) { in cache_init()
544 rwlock_init(&bc->trees[i].u.spinlock); in cache_init()
546 init_rwsem(&bc->trees[i].u.lock); in cache_init()
547 bc->trees[i].root = RB_ROOT; in cache_init()
550 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
551 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
554 static void cache_destroy(struct dm_buffer_cache *bc) in cache_destroy() argument
558 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
559 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
561 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
562 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
570 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) in cache_count() argument
572 return bc->lru[list_mode].count; in cache_count()
575 static inline unsigned long cache_total(struct dm_buffer_cache *bc) in cache_total() argument
577 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); in cache_total()
612 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
616 cache_read_lock(bc, block); in cache_get()
617 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
622 cache_read_unlock(bc, block); in cache_get()
633 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_put() argument
637 cache_read_lock(bc, b->block); in cache_put()
640 cache_read_unlock(bc, b->block); in cache_put()
677 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, in __cache_evict() argument
685 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); in __cache_evict()
691 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
696 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, in cache_evict() argument
702 lh_init(&lh, bc, true); in cache_evict()
703 b = __cache_evict(bc, list_mode, pred, context, &lh); in cache_evict()
714 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) in cache_mark() argument
716 cache_write_lock(bc, b->block); in cache_mark()
718 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
720 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
722 cache_write_unlock(bc, b->block); in cache_mark()
731 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in __cache_mark_many() argument
739 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); in __cache_mark_many()
745 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
749 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in cache_mark_many() argument
754 lh_init(&lh, bc, true); in cache_mark_many()
755 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); in cache_mark_many()
777 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, in __cache_iterate() argument
780 struct lru *lru = &bc->lru[list_mode]; in __cache_iterate()
805 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, in cache_iterate() argument
810 lh_init(&lh, bc, false); in cache_iterate()
811 __cache_iterate(bc, list_mode, fn, context, &lh); in cache_iterate()
848 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_insert() argument
855 cache_write_lock(bc, b->block); in cache_insert()
857 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
859 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
860 cache_write_unlock(bc, b->block); in cache_insert()
873 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_remove() argument
877 cache_write_lock(bc, b->block); in cache_remove()
883 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
884 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
887 cache_write_unlock(bc, b->block); in cache_remove()
919 static void __remove_range(struct dm_buffer_cache *bc, in __remove_range() argument
940 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
946 static void cache_remove_range(struct dm_buffer_cache *bc, in cache_remove_range() argument
952 BUG_ON(bc->no_sleep); in cache_remove_range()
953 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
954 down_write(&bc->trees[i].u.lock); in cache_remove_range()
955 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
956 up_write(&bc->trees[i].u.lock); in cache_remove_range()