Lines Matching +full:current +full:- +full:rotate

1 // SPDX-License-Identifier: GPL-2.0-only
17 #define LOCAL_LIST_IDX(t) ((t) - BPF_LOCAL_LIST_T_OFFSET)
33 return &loc_l->lists[LOCAL_FREE_LIST_IDX]; in local_free_list()
38 return &loc_l->lists[LOCAL_PENDING_LIST_IDX]; in local_pending_list()
44 return READ_ONCE(node->ref); in bpf_lru_node_is_ref()
49 WRITE_ONCE(node->ref, 0); in bpf_lru_node_clear_ref()
56 l->counts[type]++; in bpf_lru_list_count_inc()
63 l->counts[type]--; in bpf_lru_list_count_dec()
71 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in __bpf_lru_node_move_to_free()
77 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move_to_free()
78 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move_to_free()
80 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move_to_free()
82 node->type = tgt_free_type; in __bpf_lru_node_move_to_free()
83 list_move(&node->list, free_list); in __bpf_lru_node_move_to_free()
91 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) || in __bpf_lru_node_move_in()
96 node->type = tgt_type; in __bpf_lru_node_move_in()
98 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move_in()
109 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) || in __bpf_lru_node_move()
113 if (node->type != tgt_type) { in __bpf_lru_node_move()
114 bpf_lru_list_count_dec(l, node->type); in __bpf_lru_node_move()
116 node->type = tgt_type; in __bpf_lru_node_move()
123 if (&node->list == l->next_inactive_rotation) in __bpf_lru_node_move()
124 l->next_inactive_rotation = l->next_inactive_rotation->prev; in __bpf_lru_node_move()
126 list_move(&node->list, &l->lists[tgt_type]); in __bpf_lru_node_move()
131 return l->counts[BPF_LRU_LIST_T_INACTIVE] < in bpf_lru_list_inactive_low()
132 l->counts[BPF_LRU_LIST_T_ACTIVE]; in bpf_lru_list_inactive_low()
135 /* Rotate the active list:
147 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_rotate_active()
158 if (++i == lru->nr_scans || node == first_node) in __bpf_lru_list_rotate_active()
163 /* Rotate the inactive list. It starts from the next_inactive_rotation
167 * at its current location (i.e. do nothing) so that it can
174 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_rotate_inactive()
182 last = l->next_inactive_rotation->next; in __bpf_lru_list_rotate_inactive()
184 last = last->next; in __bpf_lru_list_rotate_inactive()
186 cur = l->next_inactive_rotation; in __bpf_lru_list_rotate_inactive()
187 while (i < lru->nr_scans) { in __bpf_lru_list_rotate_inactive()
189 cur = cur->prev; in __bpf_lru_list_rotate_inactive()
194 next = cur->prev; in __bpf_lru_list_rotate_inactive()
203 l->next_inactive_rotation = next; in __bpf_lru_list_rotate_inactive()
217 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_shrink_inactive()
225 } else if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink_inactive()
232 if (++i == lru->nr_scans) in __bpf_lru_list_shrink_inactive()
239 /* 1. Rotate the active list (if needed)
240 * 2. Always rotate the inactive list
251 * ref-bit-cleared nodes and move them to the designated
257 * honoring the ref-bit. It prefers inactive list to active
277 if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE])) in __bpf_lru_list_shrink()
278 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in __bpf_lru_list_shrink()
280 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE]; in __bpf_lru_list_shrink()
284 if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink()
315 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) in bpf_lru_list_push_free()
318 raw_spin_lock_irqsave(&l->lock, flags); in bpf_lru_list_push_free()
320 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_lru_list_push_free()
326 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local()
330 raw_spin_lock(&l->lock); in bpf_lru_list_pop_free_to_local()
336 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE], in bpf_lru_list_pop_free_to_local()
345 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, in bpf_lru_list_pop_free_to_local()
349 raw_spin_unlock(&l->lock); in bpf_lru_list_pop_free_to_local()
358 *(u32 *)((void *)node + lru->hash_offset) = hash; in __local_list_add_pending()
359 node->cpu = cpu; in __local_list_add_pending()
360 node->type = BPF_LRU_LOCAL_LIST_T_PENDING; in __local_list_add_pending()
362 list_add(&node->list, local_pending_list(loc_l)); in __local_list_add_pending()
374 list_del(&node->list); in __local_list_pop_free()
390 lru->del_from_htab(lru->del_arg, node)) { in __local_list_pop_pending()
391 list_del(&node->list); in __local_list_pop_pending()
413 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
415 raw_spin_lock_irqsave(&l->lock, flags); in bpf_percpu_lru_pop_free()
419 free_list = &l->lists[BPF_LRU_LIST_T_FREE]; in bpf_percpu_lru_pop_free()
426 *(u32 *)((void *)node + lru->hash_offset) = hash; in bpf_percpu_lru_pop_free()
431 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_percpu_lru_pop_free()
440 struct bpf_common_lru *clru = &lru->common_lru; in bpf_common_lru_pop_free()
446 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_common_lru_pop_free()
448 raw_spin_lock_irqsave(&loc_l->lock, flags); in bpf_common_lru_pop_free()
459 raw_spin_unlock_irqrestore(&loc_l->lock, flags); in bpf_common_lru_pop_free()
468 * current CPU and remote CPU in RR. It starts in bpf_common_lru_pop_free()
469 * with the loc_l->next_steal CPU. in bpf_common_lru_pop_free()
472 first_steal = loc_l->next_steal; in bpf_common_lru_pop_free()
475 steal_loc_l = per_cpu_ptr(clru->local_list, steal); in bpf_common_lru_pop_free()
477 raw_spin_lock_irqsave(&steal_loc_l->lock, flags); in bpf_common_lru_pop_free()
483 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags); in bpf_common_lru_pop_free()
488 loc_l->next_steal = steal; in bpf_common_lru_pop_free()
491 raw_spin_lock_irqsave(&loc_l->lock, flags); in bpf_common_lru_pop_free()
493 raw_spin_unlock_irqrestore(&loc_l->lock, flags); in bpf_common_lru_pop_free()
501 if (lru->percpu) in bpf_lru_pop_free()
510 u8 node_type = READ_ONCE(node->type); in bpf_common_lru_push_free()
520 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free()
522 raw_spin_lock_irqsave(&loc_l->lock, flags); in bpf_common_lru_push_free()
524 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) { in bpf_common_lru_push_free()
525 raw_spin_unlock_irqrestore(&loc_l->lock, flags); in bpf_common_lru_push_free()
529 node->type = BPF_LRU_LOCAL_LIST_T_FREE; in bpf_common_lru_push_free()
531 list_move(&node->list, local_free_list(loc_l)); in bpf_common_lru_push_free()
533 raw_spin_unlock_irqrestore(&loc_l->lock, flags); in bpf_common_lru_push_free()
538 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free()
547 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
549 raw_spin_lock_irqsave(&l->lock, flags); in bpf_percpu_lru_push_free()
553 raw_spin_unlock_irqrestore(&l->lock, flags); in bpf_percpu_lru_push_free()
558 if (lru->percpu) in bpf_lru_push_free()
568 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate()
575 node->type = BPF_LRU_LIST_T_FREE; in bpf_common_lru_populate()
577 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_common_lru_populate()
597 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
600 node->cpu = cpu; in bpf_percpu_lru_populate()
601 node->type = BPF_LRU_LIST_T_FREE; in bpf_percpu_lru_populate()
603 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); in bpf_percpu_lru_populate()
616 if (lru->percpu) in bpf_lru_populate()
629 INIT_LIST_HEAD(&loc_l->lists[i]); in bpf_lru_locallist_init()
631 loc_l->next_steal = cpu; in bpf_lru_locallist_init()
633 raw_spin_lock_init(&loc_l->lock); in bpf_lru_locallist_init()
641 INIT_LIST_HEAD(&l->lists[i]); in bpf_lru_list_init()
644 l->counts[i] = 0; in bpf_lru_list_init()
646 l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE]; in bpf_lru_list_init()
648 raw_spin_lock_init(&l->lock); in bpf_lru_list_init()
657 lru->percpu_lru = alloc_percpu(struct bpf_lru_list); in bpf_lru_init()
658 if (!lru->percpu_lru) in bpf_lru_init()
659 return -ENOMEM; in bpf_lru_init()
664 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
667 lru->nr_scans = PERCPU_NR_SCANS; in bpf_lru_init()
669 struct bpf_common_lru *clru = &lru->common_lru; in bpf_lru_init()
671 clru->local_list = alloc_percpu(struct bpf_lru_locallist); in bpf_lru_init()
672 if (!clru->local_list) in bpf_lru_init()
673 return -ENOMEM; in bpf_lru_init()
678 loc_l = per_cpu_ptr(clru->local_list, cpu); in bpf_lru_init()
682 bpf_lru_list_init(&clru->lru_list); in bpf_lru_init()
683 lru->nr_scans = LOCAL_NR_SCANS; in bpf_lru_init()
686 lru->percpu = percpu; in bpf_lru_init()
687 lru->del_from_htab = del_from_htab; in bpf_lru_init()
688 lru->del_arg = del_arg; in bpf_lru_init()
689 lru->hash_offset = hash_offset; in bpf_lru_init()
696 if (lru->percpu) in bpf_lru_destroy()
697 free_percpu(lru->percpu_lru); in bpf_lru_destroy()
699 free_percpu(lru->common_lru.local_list); in bpf_lru_destroy()