Lines Matching full:ca

76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)  in bch_inc_gen()  argument
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen()
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen()
88 struct cache *ca; in bch_rescale_priorities() local
106 ca = c->cache; in bch_rescale_priorities()
107 for_each_bucket(b, ca) in bch_rescale_priorities()
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() argument
132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) && in bch_can_invalidate_bucket()
137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() argument
139 lockdep_assert_held(&ca->set->bucket_lock); in __bch_invalidate_one_bucket()
143 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
145 bch_inc_gen(ca, b); in __bch_invalidate_one_bucket()
151 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() argument
153 __bch_invalidate_one_bucket(ca, b); in bch_invalidate_one_bucket()
155 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
167 static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b) in new_bucket_prio() argument
169 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; in new_bucket_prio()
171 return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); in new_bucket_prio()
178 struct cache *ca = args; in new_bucket_max_cmp() local
180 return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs); in new_bucket_max_cmp()
187 struct cache *ca = args; in new_bucket_min_cmp() local
189 return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs); in new_bucket_min_cmp()
199 static void invalidate_buckets_lru(struct cache *ca) in invalidate_buckets_lru() argument
211 ca->heap.nr = 0; in invalidate_buckets_lru()
213 for_each_bucket(b, ca) { in invalidate_buckets_lru()
214 if (!bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_lru()
217 if (!min_heap_full(&ca->heap)) in invalidate_buckets_lru()
218 min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca); in invalidate_buckets_lru()
219 else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) { in invalidate_buckets_lru()
220 ca->heap.data[0] = b; in invalidate_buckets_lru()
221 min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca); in invalidate_buckets_lru()
225 min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca); in invalidate_buckets_lru()
227 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_lru()
228 if (!ca->heap.nr) { in invalidate_buckets_lru()
233 ca->invalidate_needs_gc = 1; in invalidate_buckets_lru()
234 wake_up_gc(ca->set); in invalidate_buckets_lru()
237 b = min_heap_peek(&ca->heap)[0]; in invalidate_buckets_lru()
238 min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca); in invalidate_buckets_lru()
240 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_lru()
244 static void invalidate_buckets_fifo(struct cache *ca) in invalidate_buckets_fifo() argument
249 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_fifo()
250 if (ca->fifo_last_bucket < ca->sb.first_bucket || in invalidate_buckets_fifo()
251 ca->fifo_last_bucket >= ca->sb.nbuckets) in invalidate_buckets_fifo()
252 ca->fifo_last_bucket = ca->sb.first_bucket; in invalidate_buckets_fifo()
254 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
256 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_fifo()
257 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_fifo()
259 if (++checked >= ca->sb.nbuckets) { in invalidate_buckets_fifo()
260 ca->invalidate_needs_gc = 1; in invalidate_buckets_fifo()
261 wake_up_gc(ca->set); in invalidate_buckets_fifo()
267 static void invalidate_buckets_random(struct cache *ca) in invalidate_buckets_random() argument
272 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_random()
277 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); in invalidate_buckets_random()
278 n += ca->sb.first_bucket; in invalidate_buckets_random()
280 b = ca->buckets + n; in invalidate_buckets_random()
282 if (bch_can_invalidate_bucket(ca, b)) in invalidate_buckets_random()
283 bch_invalidate_one_bucket(ca, b); in invalidate_buckets_random()
285 if (++checked >= ca->sb.nbuckets / 2) { in invalidate_buckets_random()
286 ca->invalidate_needs_gc = 1; in invalidate_buckets_random()
287 wake_up_gc(ca->set); in invalidate_buckets_random()
293 static void invalidate_buckets(struct cache *ca) in invalidate_buckets() argument
295 BUG_ON(ca->invalidate_needs_gc); in invalidate_buckets()
297 switch (CACHE_REPLACEMENT(&ca->sb)) { in invalidate_buckets()
299 invalidate_buckets_lru(ca); in invalidate_buckets()
302 invalidate_buckets_fifo(ca); in invalidate_buckets()
305 invalidate_buckets_random(ca); in invalidate_buckets()
310 #define allocator_wait(ca, cond) \ argument
317 mutex_unlock(&(ca)->set->bucket_lock); \
319 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
325 mutex_lock(&(ca)->set->bucket_lock); \
330 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
335 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) in bch_allocator_push()
339 if (fifo_push(&ca->free[i], bucket)) in bch_allocator_push()
347 struct cache *ca = arg; in bch_allocator_thread() local
349 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
360 if (!fifo_pop(&ca->free_inc, bucket)) in bch_allocator_thread()
363 if (ca->discard) { in bch_allocator_thread()
364 mutex_unlock(&ca->set->bucket_lock); in bch_allocator_thread()
365 blkdev_issue_discard(ca->bdev, in bch_allocator_thread()
366 bucket_to_sector(ca->set, bucket), in bch_allocator_thread()
367 ca->sb.bucket_size, GFP_KERNEL); in bch_allocator_thread()
368 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
371 allocator_wait(ca, bch_allocator_push(ca, bucket)); in bch_allocator_thread()
372 wake_up(&ca->set->btree_cache_wait); in bch_allocator_thread()
373 wake_up(&ca->set->bucket_wait); in bch_allocator_thread()
383 allocator_wait(ca, !ca->invalidate_needs_gc); in bch_allocator_thread()
384 invalidate_buckets(ca); in bch_allocator_thread()
390 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); in bch_allocator_thread()
391 if (CACHE_SYNC(&ca->sb)) { in bch_allocator_thread()
403 if (!fifo_full(&ca->free_inc)) in bch_allocator_thread()
406 if (bch_prio_write(ca, false) < 0) { in bch_allocator_thread()
407 ca->invalidate_needs_gc = 1; in bch_allocator_thread()
408 wake_up_gc(ca->set); in bch_allocator_thread()
419 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) in bch_bucket_alloc() argument
427 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags))) in bch_bucket_alloc()
431 if (fifo_pop(&ca->free[RESERVE_NONE], r) || in bch_bucket_alloc()
432 fifo_pop(&ca->free[reserve], r)) in bch_bucket_alloc()
436 trace_bcache_alloc_fail(ca, reserve); in bch_bucket_alloc()
441 prepare_to_wait(&ca->set->bucket_wait, &w, in bch_bucket_alloc()
444 mutex_unlock(&ca->set->bucket_lock); in bch_bucket_alloc()
446 mutex_lock(&ca->set->bucket_lock); in bch_bucket_alloc()
447 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) && in bch_bucket_alloc()
448 !fifo_pop(&ca->free[reserve], r)); in bch_bucket_alloc()
450 finish_wait(&ca->set->bucket_wait, &w); in bch_bucket_alloc()
452 if (ca->alloc_thread) in bch_bucket_alloc()
453 wake_up_process(ca->alloc_thread); in bch_bucket_alloc()
455 trace_bcache_alloc(ca, reserve); in bch_bucket_alloc()
457 if (expensive_debug_checks(ca->set)) { in bch_bucket_alloc()
462 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) in bch_bucket_alloc()
463 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); in bch_bucket_alloc()
466 fifo_for_each(i, &ca->free[j], iter) in bch_bucket_alloc()
468 fifo_for_each(i, &ca->free_inc, iter) in bch_bucket_alloc()
472 b = ca->buckets + r; in bch_bucket_alloc()
476 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc()
488 if (ca->set->avail_nbuckets > 0) { in bch_bucket_alloc()
489 ca->set->avail_nbuckets--; in bch_bucket_alloc()
490 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); in bch_bucket_alloc()
496 void __bch_bucket_free(struct cache *ca, struct bucket *b) in __bch_bucket_free() argument
501 if (ca->set->avail_nbuckets < ca->set->nbuckets) { in __bch_bucket_free()
502 ca->set->avail_nbuckets++; in __bch_bucket_free()
503 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); in __bch_bucket_free()
518 struct cache *ca; in __bch_bucket_alloc_set() local
529 ca = c->cache; in __bch_bucket_alloc_set()
530 b = bch_bucket_alloc(ca, reserve, wait); in __bch_bucket_alloc_set()
534 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
536 ca->sb.nr_this_dev); in __bch_bucket_alloc_set()
750 int bch_cache_allocator_start(struct cache *ca) in bch_cache_allocator_start() argument
753 ca, "bcache_allocator"); in bch_cache_allocator_start()
757 ca->alloc_thread = k; in bch_cache_allocator_start()