Lines Matching full:rb
80 struct bpf_ringbuf *rb; member
97 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local
134 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
136 if (rb) { in bpf_ringbuf_area_alloc()
138 rb->pages = pages; in bpf_ringbuf_area_alloc()
139 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
140 return rb; in bpf_ringbuf_area_alloc()
152 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local
154 wake_up_all(&rb->waitq); in bpf_ringbuf_notify()
170 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local
172 rb = bpf_ringbuf_area_alloc(data_sz, numa_node); in bpf_ringbuf_alloc()
173 if (!rb) in bpf_ringbuf_alloc()
176 raw_spin_lock_init(&rb->spinlock); in bpf_ringbuf_alloc()
177 atomic_set(&rb->busy, 0); in bpf_ringbuf_alloc()
178 init_waitqueue_head(&rb->waitq); in bpf_ringbuf_alloc()
179 init_irq_work(&rb->work, bpf_ringbuf_notify); in bpf_ringbuf_alloc()
181 rb->mask = data_sz - 1; in bpf_ringbuf_alloc()
182 rb->consumer_pos = 0; in bpf_ringbuf_alloc()
183 rb->producer_pos = 0; in bpf_ringbuf_alloc()
184 rb->pending_pos = 0; in bpf_ringbuf_alloc()
186 return rb; in bpf_ringbuf_alloc()
207 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
208 if (!rb_map->rb) { in ringbuf_map_alloc()
216 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) in bpf_ringbuf_free() argument
219 * to unmap rb itself with vunmap() below in bpf_ringbuf_free()
221 struct page **pages = rb->pages; in bpf_ringbuf_free()
222 int i, nr_pages = rb->nr_pages; in bpf_ringbuf_free()
224 vunmap(rb); in bpf_ringbuf_free()
235 bpf_ringbuf_free(rb_map->rb); in ringbuf_map_free()
275 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap_kern()
296 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_user()
299 static unsigned long ringbuf_avail_data_sz(struct bpf_ringbuf *rb) in ringbuf_avail_data_sz() argument
303 cons_pos = smp_load_acquire(&rb->consumer_pos); in ringbuf_avail_data_sz()
304 prod_pos = smp_load_acquire(&rb->producer_pos); in ringbuf_avail_data_sz()
308 static u32 ringbuf_total_data_sz(const struct bpf_ringbuf *rb) in ringbuf_total_data_sz() argument
310 return rb->mask + 1; in ringbuf_total_data_sz()
319 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_kern()
321 if (ringbuf_avail_data_sz(rb_map->rb)) in ringbuf_map_poll_kern()
332 poll_wait(filp, &rb_map->rb->waitq, pts); in ringbuf_map_poll_user()
334 if (ringbuf_avail_data_sz(rb_map->rb) < ringbuf_total_data_sz(rb_map->rb)) in ringbuf_map_poll_user()
341 struct bpf_ringbuf *rb; in ringbuf_map_mem_usage() local
346 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in ringbuf_map_mem_usage()
347 usage += (u64)rb->nr_pages << PAGE_SHIFT; in ringbuf_map_mem_usage()
390 static size_t bpf_ringbuf_rec_pg_off(struct bpf_ringbuf *rb, in bpf_ringbuf_rec_pg_off() argument
393 return ((void *)hdr - (void *)rb) >> PAGE_SHIFT; in bpf_ringbuf_rec_pg_off()
408 static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size) in __bpf_ringbuf_reserve() argument
418 if (len > ringbuf_total_data_sz(rb)) in __bpf_ringbuf_reserve()
421 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_ringbuf_reserve()
424 if (!raw_spin_trylock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve()
427 raw_spin_lock_irqsave(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
430 pend_pos = rb->pending_pos; in __bpf_ringbuf_reserve()
431 prod_pos = rb->producer_pos; in __bpf_ringbuf_reserve()
435 hdr = (void *)rb->data + (pend_pos & rb->mask); in __bpf_ringbuf_reserve()
443 rb->pending_pos = pend_pos; in __bpf_ringbuf_reserve()
451 if (new_prod_pos - cons_pos > rb->mask || in __bpf_ringbuf_reserve()
452 new_prod_pos - pend_pos > rb->mask) { in __bpf_ringbuf_reserve()
453 raw_spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
457 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve()
458 pg_off = bpf_ringbuf_rec_pg_off(rb, hdr); in __bpf_ringbuf_reserve()
463 smp_store_release(&rb->producer_pos, new_prod_pos); in __bpf_ringbuf_reserve()
465 raw_spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
478 return (unsigned long)__bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_3()
493 struct bpf_ringbuf *rb; in bpf_ringbuf_commit() local
497 rb = bpf_ringbuf_restore_from_rec(hdr); in bpf_ringbuf_commit()
508 rec_pos = (void *)hdr - (void *)rb->data; in bpf_ringbuf_commit()
509 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit()
512 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
514 irq_work_queue(&rb->work); in bpf_ringbuf_commit()
553 rec = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
573 struct bpf_ringbuf *rb; in BPF_CALL_2() local
575 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_2()
579 return ringbuf_avail_data_sz(rb); in BPF_CALL_2()
581 return ringbuf_total_data_sz(rb); in BPF_CALL_2()
583 return smp_load_acquire(&rb->consumer_pos); in BPF_CALL_2()
585 return smp_load_acquire(&rb->producer_pos); in BPF_CALL_2()
618 sample = __bpf_ringbuf_reserve(rb_map->rb, size); in BPF_CALL_4()
676 static int __bpf_user_ringbuf_peek(struct bpf_ringbuf *rb, void **sample, u32 *size) in __bpf_user_ringbuf_peek() argument
683 prod_pos = smp_load_acquire(&rb->producer_pos); in __bpf_user_ringbuf_peek()
688 cons_pos = smp_load_acquire(&rb->consumer_pos); in __bpf_user_ringbuf_peek()
692 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask)); in __bpf_user_ringbuf_peek()
704 if (total_len > ringbuf_total_data_sz(rb)) in __bpf_user_ringbuf_peek()
718 smp_store_release(&rb->consumer_pos, cons_pos + total_len); in __bpf_user_ringbuf_peek()
725 *sample = (void *)((uintptr_t)rb->data + in __bpf_user_ringbuf_peek()
726 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask)); in __bpf_user_ringbuf_peek()
731 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags) in __bpf_user_ringbuf_sample_release() argument
740 consumer_pos = rb->consumer_pos; in __bpf_user_ringbuf_sample_release()
742 smp_store_release(&rb->consumer_pos, consumer_pos + rounded_size); in __bpf_user_ringbuf_sample_release()
748 struct bpf_ringbuf *rb; in BPF_CALL_4() local
757 rb = container_of(map, struct bpf_ringbuf_map, map)->rb; in BPF_CALL_4()
760 if (!atomic_try_cmpxchg(&rb->busy, &busy, 1)) in BPF_CALL_4()
769 err = __bpf_user_ringbuf_peek(rb, &sample, &size); in BPF_CALL_4()
784 __bpf_user_ringbuf_sample_release(rb, size, flags); in BPF_CALL_4()
790 * storing of any rb consumer or producer positions. in BPF_CALL_4()
792 atomic_set_release(&rb->busy, 0); in BPF_CALL_4()
795 irq_work_queue(&rb->work); in BPF_CALL_4()
797 irq_work_queue(&rb->work); in BPF_CALL_4()