Lines Matching +full:0 +full:e

29 	return d ? n / d : 0u;  in safe_div()
34 return d ? n % d : 0u; in safe_mod()
70 return 0; in space_init()
78 return 0; in space_init()
88 struct entry *e; in __get_entry() local
90 e = es->begin + block; in __get_entry()
91 BUG_ON(e >= es->end); in __get_entry()
93 return e; in __get_entry()
96 static unsigned int to_index(struct entry_space *es, struct entry *e) in to_index() argument
98 BUG_ON(e < es->begin || e >= es->end); in to_index()
99 return e - es->begin; in to_index()
119 l->nr_elts = 0; in l_init()
133 static struct entry *l_next(struct entry_space *es, struct entry *e) in l_next() argument
135 return to_entry(es, e->next); in l_next()
138 static struct entry *l_prev(struct entry_space *es, struct entry *e) in l_prev() argument
140 return to_entry(es, e->prev); in l_prev()
148 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_head() argument
152 e->next = l->head; in l_add_head()
153 e->prev = INDEXER_NULL; in l_add_head()
156 head->prev = l->head = to_index(es, e); in l_add_head()
158 l->head = l->tail = to_index(es, e); in l_add_head()
160 if (!e->sentinel) in l_add_head()
164 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_tail() argument
168 e->next = INDEXER_NULL; in l_add_tail()
169 e->prev = l->tail; in l_add_tail()
172 tail->next = l->tail = to_index(es, e); in l_add_tail()
174 l->head = l->tail = to_index(es, e); in l_add_tail()
176 if (!e->sentinel) in l_add_tail()
181 struct entry *old, struct entry *e) in l_add_before() argument
186 l_add_head(es, l, e); in l_add_before()
189 e->prev = old->prev; in l_add_before()
190 e->next = to_index(es, old); in l_add_before()
191 prev->next = old->prev = to_index(es, e); in l_add_before()
193 if (!e->sentinel) in l_add_before()
198 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) in l_del() argument
200 struct entry *prev = l_prev(es, e); in l_del()
201 struct entry *next = l_next(es, e); in l_del()
204 prev->next = e->next; in l_del()
206 l->head = e->next; in l_del()
209 next->prev = e->prev; in l_del()
211 l->tail = e->prev; in l_del()
213 if (!e->sentinel) in l_del()
219 struct entry *e; in l_pop_head() local
221 for (e = l_head(es, l); e; e = l_next(es, e)) in l_pop_head()
222 if (!e->sentinel) { in l_pop_head()
223 l_del(es, l, e); in l_pop_head()
224 return e; in l_pop_head()
232 struct entry *e; in l_pop_tail() local
234 for (e = l_tail(es, l); e; e = l_prev(es, e)) in l_pop_tail()
235 if (!e->sentinel) { in l_pop_tail()
236 l_del(es, l, e); in l_pop_tail()
237 return e; in l_pop_tail()
275 q->nr_elts = 0; in q_init()
278 for (i = 0; i < q->nr_levels; i++) { in q_init()
280 q->target_count[i] = 0u; in q_init()
283 q->last_target_nr_elts = 0u; in q_init()
284 q->nr_top_levels = 0u; in q_init()
285 q->nr_in_top_levels = 0u; in q_init()
296 static void q_push(struct queue *q, struct entry *e) in q_push() argument
298 BUG_ON(e->pending_work); in q_push()
300 if (!e->sentinel) in q_push()
303 l_add_tail(q->es, q->qs + e->level, e); in q_push()
306 static void q_push_front(struct queue *q, struct entry *e) in q_push_front() argument
308 BUG_ON(e->pending_work); in q_push_front()
310 if (!e->sentinel) in q_push_front()
313 l_add_head(q->es, q->qs + e->level, e); in q_push_front()
316 static void q_push_before(struct queue *q, struct entry *old, struct entry *e) in q_push_before() argument
318 BUG_ON(e->pending_work); in q_push_before()
320 if (!e->sentinel) in q_push_before()
323 l_add_before(q->es, q->qs + e->level, old, e); in q_push_before()
326 static void q_del(struct queue *q, struct entry *e) in q_del() argument
328 l_del(q->es, q->qs + e->level, e); in q_del()
329 if (!e->sentinel) in q_del()
339 struct entry *e; in q_peek() local
343 for (level = 0; level < max_level; level++) in q_peek()
344 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { in q_peek()
345 if (e->sentinel) { in q_peek()
352 return e; in q_peek()
360 struct entry *e = q_peek(q, q->nr_levels, true); in q_pop() local
362 if (e) in q_pop()
363 q_del(q, e); in q_pop()
365 return e; in q_pop()
375 struct entry *e; in __redist_pop_from() local
378 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) in __redist_pop_from()
379 if (!e->sentinel) { in __redist_pop_from()
380 l_del(q->es, q->qs + e->level, e); in __redist_pop_from()
381 return e; in __redist_pop_from()
415 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels); in q_set_targets()
423 0, q->nr_levels - q->nr_top_levels); in q_set_targets()
425 q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels); in q_set_targets()
433 struct entry *e; in q_redistribute() local
437 for (level = 0u; level < q->nr_levels - 1u; level++) { in q_redistribute()
445 e = __redist_pop_from(q, level + 1u); in q_redistribute()
446 if (!e) { in q_redistribute()
451 e->level = level; in q_redistribute()
452 l_add_tail(q->es, l, e); in q_redistribute()
460 e = l_pop_tail(q->es, l); in q_redistribute()
462 if (!e) in q_redistribute()
466 e->level = level + 1u; in q_redistribute()
467 l_add_tail(q->es, l_above, e); in q_redistribute()
472 static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels, in q_requeue() argument
476 unsigned int sentinels_passed = 0; in q_requeue()
477 unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels); in q_requeue()
480 if (extra_levels && (e->level < q->nr_levels - 1u)) { in q_requeue()
486 de->level = e->level; in q_requeue()
489 case 0: in q_requeue()
505 q_del(q, e); in q_requeue()
506 e->level = new_level; in q_requeue()
507 q_push(q, e); in q_requeue()
531 s->hits = 0u; in stats_init()
532 s->misses = 0u; in stats_init()
537 s->hits = s->misses = 0u; in stats_reset()
597 for (i = 0; i < nr_buckets; i++) in h_init()
600 return 0; in h_init()
613 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e) in h_next() argument
615 return to_entry(ht->es, e->hash_next); in h_next()
618 static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e) in __h_insert() argument
620 e->hash_next = ht->buckets[bucket]; in __h_insert()
621 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
624 static void h_insert(struct smq_hash_table *ht, struct entry *e) in h_insert() argument
626 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_insert()
628 __h_insert(ht, h, e); in h_insert()
634 struct entry *e; in __h_lookup() local
637 for (e = h_head(ht, h); e; e = h_next(ht, e)) { in __h_lookup()
638 if (e->oblock == oblock) in __h_lookup()
639 return e; in __h_lookup()
641 *prev = e; in __h_lookup()
648 struct entry *e, struct entry *prev) in __h_unlink() argument
651 prev->hash_next = e->hash_next; in __h_unlink()
653 ht->buckets[h] = e->hash_next; in __h_unlink()
661 struct entry *e, *prev; in h_lookup() local
664 e = __h_lookup(ht, h, oblock, &prev); in h_lookup()
665 if (e && prev) { in h_lookup()
670 __h_unlink(ht, h, e, prev); in h_lookup()
671 __h_insert(ht, h, e); in h_lookup()
674 return e; in h_lookup()
677 static void h_remove(struct smq_hash_table *ht, struct entry *e) in h_remove() argument
679 unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_remove()
686 e = __h_lookup(ht, h, e->oblock, &prev); in h_remove()
687 if (e) in h_remove()
688 __h_unlink(ht, h, e, prev); in h_remove()
707 ea->nr_allocated = 0u; in init_allocator()
715 static void init_entry(struct entry *e) in init_entry() argument
721 e->hash_next = INDEXER_NULL; in init_entry()
722 e->next = INDEXER_NULL; in init_entry()
723 e->prev = INDEXER_NULL; in init_entry()
724 e->level = 0u; in init_entry()
725 e->dirty = true; /* FIXME: audit */ in init_entry()
726 e->allocated = true; in init_entry()
727 e->sentinel = false; in init_entry()
728 e->pending_work = false; in init_entry()
733 struct entry *e; in alloc_entry() local
738 e = l_pop_head(ea->es, &ea->free); in alloc_entry()
739 init_entry(e); in alloc_entry()
742 return e; in alloc_entry()
750 struct entry *e = __get_entry(ea->es, ea->begin + i); in alloc_particular_entry() local
752 BUG_ON(e->allocated); in alloc_particular_entry()
754 l_del(ea->es, &ea->free, e); in alloc_particular_entry()
755 init_entry(e); in alloc_particular_entry()
758 return e; in alloc_particular_entry()
761 static void free_entry(struct entry_alloc *ea, struct entry *e) in free_entry() argument
764 BUG_ON(!e->allocated); in free_entry()
767 e->allocated = false; in free_entry()
768 l_add_tail(ea->es, &ea->free, e); in free_entry()
776 static unsigned int get_index(struct entry_alloc *ea, struct entry *e) in get_index() argument
778 return to_index(ea->es, e) - ea->begin; in get_index()
892 for (level = 0; level < q->nr_levels; level++) { in __update_writeback_sentinels()
905 for (level = 0; level < q->nr_levels; level++) { in __update_demote_sentinels()
932 for (level = 0; level < NR_CACHE_LEVELS; level++) { in __sentinels_init()
959 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
961 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
964 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
966 if (e->dirty) in push_queue()
967 q_push(&mq->dirty, e); in push_queue()
969 q_push(&mq->clean, e); in push_queue()
973 static void push(struct smq_policy *mq, struct entry *e) in push() argument
975 h_insert(&mq->table, e); in push()
976 if (!e->pending_work) in push()
977 push_queue(mq, e); in push()
980 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
982 if (e->dirty) in push_queue_front()
983 q_push_front(&mq->dirty, e); in push_queue_front()
985 q_push_front(&mq->clean, e); in push_queue_front()
988 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
990 h_insert(&mq->table, e); in push_front()
991 if (!e->pending_work) in push_front()
992 push_queue_front(mq, e); in push_front()
995 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
997 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
1000 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
1005 if (e->pending_work) in requeue()
1008 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1009 if (!e->dirty) { in requeue()
1010 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
1014 q_requeue(&mq->dirty, e, 1u, in requeue()
1015 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
1016 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
1151 return q_size(&mq->dirty) == 0u; in clean_target_met()
1171 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1173 BUG_ON(e->sentinel); in mark_pending()
1174 BUG_ON(!e->allocated); in mark_pending()
1175 BUG_ON(e->pending_work); in mark_pending()
1176 e->pending_work = true; in mark_pending()
1179 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1181 BUG_ON(!e->pending_work); in clear_pending()
1182 e->pending_work = false; in clear_pending()
1189 struct entry *e; in queue_writeback() local
1191 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); in queue_writeback()
1192 if (e) { in queue_writeback()
1193 mark_pending(mq, e); in queue_writeback()
1194 q_del(&mq->dirty, e); in queue_writeback()
1197 work.oblock = e->oblock; in queue_writeback()
1198 work.cblock = infer_cblock(mq, e); in queue_writeback()
1202 clear_pending(mq, e); in queue_writeback()
1203 q_push_front(&mq->dirty, e); in queue_writeback()
1212 struct entry *e; in queue_demotion() local
1217 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1218 if (!e) { in queue_demotion()
1224 mark_pending(mq, e); in queue_demotion()
1225 q_del(&mq->clean, e); in queue_demotion()
1228 work.oblock = e->oblock; in queue_demotion()
1229 work.cblock = infer_cblock(mq, e); in queue_demotion()
1232 clear_pending(mq, e); in queue_demotion()
1233 q_push_front(&mq->clean, e); in queue_demotion()
1241 struct entry *e; in queue_promotion() local
1264 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1265 BUG_ON(!e); in queue_promotion()
1266 e->pending_work = true; in queue_promotion()
1269 work.cblock = infer_cblock(mq, e); in queue_promotion()
1272 free_entry(&mq->cache_alloc, e); in queue_promotion()
1314 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue() local
1316 if (e) { in update_hotspot_queue()
1317 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1319 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1320 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1322 0u : mq->hotspot_level_jump, in update_hotspot_queue()
1328 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1329 if (!e) { in update_hotspot_queue()
1330 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1331 if (e) { in update_hotspot_queue()
1332 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1333 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1339 if (e) { in update_hotspot_queue()
1340 e->oblock = hb; in update_hotspot_queue()
1341 q_push(&mq->hotspot, e); in update_hotspot_queue()
1342 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1346 return e; in update_hotspot_queue()
1380 struct entry *e, *hs_e; in __lookup() local
1385 e = h_lookup(&mq->table, oblock); in __lookup()
1386 if (e) { in __lookup()
1387 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1389 requeue(mq, e); in __lookup()
1390 *cblock = infer_cblock(mq, e); in __lookup()
1391 return 0; in __lookup()
1473 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work() local
1479 clear_pending(mq, e); in __complete_background_work()
1481 e->oblock = work->oblock; in __complete_background_work()
1482 e->level = NR_CACHE_LEVELS - 1; in __complete_background_work()
1483 push(mq, e); in __complete_background_work()
1486 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1494 h_remove(&mq->table, e); in __complete_background_work()
1495 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1498 clear_pending(mq, e); in __complete_background_work()
1499 push_queue(mq, e); in __complete_background_work()
1506 clear_pending(mq, e); in __complete_background_work()
1507 push_queue(mq, e); in __complete_background_work()
1530 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty() local
1532 if (e->pending_work) in __smq_set_clear_dirty()
1533 e->dirty = set; in __smq_set_clear_dirty()
1535 del_queue(mq, e); in __smq_set_clear_dirty()
1536 e->dirty = set; in __smq_set_clear_dirty()
1537 push_queue(mq, e); in __smq_set_clear_dirty()
1571 struct entry *e; in smq_load_mapping() local
1573 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1574 e->oblock = oblock; in smq_load_mapping()
1575 e->dirty = dirty; in smq_load_mapping()
1576 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock); in smq_load_mapping()
1577 e->pending_work = false; in smq_load_mapping()
1583 push_front(mq, e); in smq_load_mapping()
1585 return 0; in smq_load_mapping()
1591 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping() local
1593 if (!e->allocated) in smq_invalidate_mapping()
1597 del_queue(mq, e); in smq_invalidate_mapping()
1598 h_remove(&mq->table, e); in smq_invalidate_mapping()
1599 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1600 return 0; in smq_invalidate_mapping()
1606 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint() local
1608 if (!e->allocated) in smq_get_hint()
1609 return 0; in smq_get_hint()
1611 return e->level; in smq_get_hint()
1666 return 0; in mq_set_config_value()
1677 DMEMIT("10 random_threshold 0 " in mq_emit_config_values()
1678 "sequential_threshold 0 " in mq_emit_config_values()
1679 "discard_promote_adjustment 0 " in mq_emit_config_values()
1680 "read_promote_adjustment 0 " in mq_emit_config_values()
1681 "write_promote_adjustment 0 "); in mq_emit_config_values()
1684 return 0; in mq_emit_config_values()
1757 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue); in __smq_create()
1758 for (i = 0; i < nr_sentinels_per_queue; i++) in __smq_create()
1762 for (i = 0; i < nr_sentinels_per_queue; i++) in __smq_create()
1789 mq->tick = 0; in __smq_create()
1868 .version = {2, 0, 0},
1876 .version = {2, 0, 0},
1884 .version = {2, 0, 0},
1892 .version = {2, 0, 0},
1927 return 0; in smq_init()