Lines Matching full:qs
30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) in queue_stack_map_is_empty() argument
32 return qs->head == qs->tail; in queue_stack_map_is_empty()
35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) in queue_stack_map_is_full() argument
37 u32 head = qs->head + 1; in queue_stack_map_is_full()
39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full()
42 return head == qs->tail; in queue_stack_map_is_full()
67 struct bpf_queue_stack *qs; in queue_stack_map_alloc() local
71 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc()
73 qs = bpf_map_area_alloc(queue_size, numa_node); in queue_stack_map_alloc()
74 if (!qs) in queue_stack_map_alloc()
77 bpf_map_init_from_attr(&qs->map, attr); in queue_stack_map_alloc()
79 qs->size = size; in queue_stack_map_alloc()
81 raw_spin_lock_init(&qs->lock); in queue_stack_map_alloc()
83 return &qs->map; in queue_stack_map_alloc()
89 struct bpf_queue_stack *qs = bpf_queue_stack(map); in queue_stack_map_free() local
91 bpf_map_area_free(qs); in queue_stack_map_free()
96 struct bpf_queue_stack *qs = bpf_queue_stack(map); in __queue_map_get() local
102 if (!raw_spin_trylock_irqsave(&qs->lock, flags)) in __queue_map_get()
105 raw_spin_lock_irqsave(&qs->lock, flags); in __queue_map_get()
108 if (queue_stack_map_is_empty(qs)) { in __queue_map_get()
109 memset(value, 0, qs->map.value_size); in __queue_map_get()
114 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get()
115 memcpy(value, ptr, qs->map.value_size); in __queue_map_get()
118 if (unlikely(++qs->tail >= qs->size)) in __queue_map_get()
119 qs->tail = 0; in __queue_map_get()
123 raw_spin_unlock_irqrestore(&qs->lock, flags); in __queue_map_get()
130 struct bpf_queue_stack *qs = bpf_queue_stack(map); in __stack_map_get() local
137 if (!raw_spin_trylock_irqsave(&qs->lock, flags)) in __stack_map_get()
140 raw_spin_lock_irqsave(&qs->lock, flags); in __stack_map_get()
143 if (queue_stack_map_is_empty(qs)) { in __stack_map_get()
144 memset(value, 0, qs->map.value_size); in __stack_map_get()
149 index = qs->head - 1; in __stack_map_get()
150 if (unlikely(index >= qs->size)) in __stack_map_get()
151 index = qs->size - 1; in __stack_map_get()
153 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get()
154 memcpy(value, ptr, qs->map.value_size); in __stack_map_get()
157 qs->head = index; in __stack_map_get()
160 raw_spin_unlock_irqrestore(&qs->lock, flags); in __stack_map_get()
192 struct bpf_queue_stack *qs = bpf_queue_stack(map); in queue_stack_map_push_elem() local
207 if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) in queue_stack_map_push_elem()
210 raw_spin_lock_irqsave(&qs->lock, irq_flags); in queue_stack_map_push_elem()
213 if (queue_stack_map_is_full(qs)) { in queue_stack_map_push_elem()
219 if (unlikely(++qs->tail >= qs->size)) in queue_stack_map_push_elem()
220 qs->tail = 0; in queue_stack_map_push_elem()
223 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem()
224 memcpy(dst, value, qs->map.value_size); in queue_stack_map_push_elem()
226 if (unlikely(++qs->head >= qs->size)) in queue_stack_map_push_elem()
227 qs->head = 0; in queue_stack_map_push_elem()
230 raw_spin_unlock_irqrestore(&qs->lock, irq_flags); in queue_stack_map_push_elem()