Lines Matching full:array

22 static void bpf_array_free_percpu(struct bpf_array *array)  in bpf_array_free_percpu()  argument
26 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
27 free_percpu(array->pptrs[i]); in bpf_array_free_percpu()
32 static int bpf_array_alloc_percpu(struct bpf_array *array) in bpf_array_alloc_percpu() argument
37 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu()
41 bpf_array_free_percpu(array); in bpf_array_alloc_percpu()
44 array->pptrs[i] = ptr; in bpf_array_alloc_percpu()
90 struct bpf_array *array; in array_map_alloc() local
106 /* round up array size to nearest power of 2, in array_map_alloc()
115 array_size = sizeof(*array); in array_map_alloc()
120 * ensure array->value is exactly page-aligned in array_map_alloc()
138 array = data + PAGE_ALIGN(sizeof(struct bpf_array)) in array_map_alloc()
141 array = bpf_map_area_alloc(array_size, numa_node); in array_map_alloc()
143 if (!array) in array_map_alloc()
145 array->index_mask = index_mask; in array_map_alloc()
146 array->map.bypass_spec_v1 = bypass_spec_v1; in array_map_alloc()
149 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
150 array->elem_size = elem_size; in array_map_alloc()
152 if (percpu && bpf_array_alloc_percpu(array)) { in array_map_alloc()
153 bpf_map_area_free(array); in array_map_alloc()
157 return &array->map; in array_map_alloc()
160 static void *array_map_elem_ptr(struct bpf_array* array, u32 index) in array_map_elem_ptr() argument
162 return array->value + (u64)array->elem_size * index; in array_map_elem_ptr()
168 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem() local
171 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
174 return array->value + (u64)array->elem_size * (index & array->index_mask); in array_map_lookup_elem()
180 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_addr() local
187 *imm = (unsigned long)array->value; in array_map_direct_value_addr()
194 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_meta() local
195 u64 base = (unsigned long)array->value; in array_map_direct_value_meta()
196 u64 range = array->elem_size; in array_map_direct_value_meta()
210 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_gen_lookup() local
212 u32 elem_size = array->elem_size; in array_map_gen_lookup()
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_map_gen_lookup()
243 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_elem() local
246 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem()
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]); in percpu_array_map_lookup_elem()
255 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_gen_lookup() local
270 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); in percpu_array_map_gen_lookup()
286 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_percpu_elem() local
292 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_percpu_elem()
295 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); in percpu_array_map_lookup_percpu_elem()
300 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_copy() local
306 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy()
313 size = array->elem_size; in bpf_percpu_array_copy()
315 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_copy()
328 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_get_next_key() local
332 if (index >= array->map.max_entries) { in array_map_get_next_key()
337 if (index == array->map.max_entries - 1) in array_map_get_next_key()
348 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_update_elem() local
356 if (unlikely(index >= array->map.max_entries)) in array_map_update_elem()
368 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_update_elem()
369 val = this_cpu_ptr(array->pptrs[index & array->index_mask]); in array_map_update_elem()
371 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
373 val = array->value + in array_map_update_elem()
374 (u64)array->elem_size * (index & array->index_mask); in array_map_update_elem()
379 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
387 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_update() local
397 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_update()
411 size = array->elem_size; in bpf_percpu_array_update()
413 pptr = array->pptrs[index & array->index_mask]; in bpf_percpu_array_update()
416 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_update()
429 static void *array_map_vmalloc_addr(struct bpf_array *array) in array_map_vmalloc_addr() argument
431 return (void *)round_down((unsigned long)array, PAGE_SIZE); in array_map_vmalloc_addr()
436 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free_timers_wq() local
443 for (i = 0; i < array->map.max_entries; i++) { in array_map_free_timers_wq()
445 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers_wq()
447 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers_wq()
455 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free() local
459 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_free()
460 for (i = 0; i < array->map.max_entries; i++) { in array_map_free()
461 void __percpu *pptr = array->pptrs[i & array->index_mask]; in array_map_free()
470 for (i = 0; i < array->map.max_entries; i++) in array_map_free()
471 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); in array_map_free()
475 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_free()
476 bpf_array_free_percpu(array); in array_map_free()
478 if (array->map.map_flags & BPF_F_MMAPABLE) in array_map_free()
479 bpf_map_area_free(array_map_vmalloc_addr(array)); in array_map_free()
481 bpf_map_area_free(array); in array_map_free()
508 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_seq_show_elem() local
516 pptr = array->pptrs[index & array->index_mask]; in percpu_array_map_seq_show_elem()
551 /* bpf array can only take a u32 key. This check makes sure in array_map_check_btf()
562 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mmap() local
563 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; in array_map_mmap()
569 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) in array_map_mmap()
572 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), in array_map_mmap()
595 struct bpf_array *array; in bpf_array_map_seq_start() local
603 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_start()
604 index = info->index & array->index_mask; in bpf_array_map_seq_start()
606 return (void *)(uintptr_t)array->pptrs[index]; in bpf_array_map_seq_start()
607 return array_map_elem_ptr(array, index); in bpf_array_map_seq_start()
614 struct bpf_array *array; in bpf_array_map_seq_next() local
622 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_next()
623 index = info->index & array->index_mask; in bpf_array_map_seq_next()
625 return (void *)(uintptr_t)array->pptrs[index]; in bpf_array_map_seq_next()
626 return array_map_elem_ptr(array, index); in bpf_array_map_seq_next()
634 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_array_map_seq_show() local
655 size = array->elem_size; in __bpf_array_map_seq_show()
685 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_iter_init_array_map() local
690 buf_size = array->elem_size * num_possible_cpus(); in bpf_iter_init_array_map()
733 struct bpf_array *array; in bpf_for_each_array_elem() local
742 array = container_of(map, struct bpf_array, map); in bpf_for_each_array_elem()
747 val = this_cpu_ptr(array->pptrs[i]); in bpf_for_each_array_elem()
749 val = array_map_elem_ptr(array, i); in bpf_for_each_array_elem()
766 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mem_usage() local
768 u32 elem_size = array->elem_size; in array_map_mem_usage()
770 u64 usage = sizeof(*array); in array_map_mem_usage()
847 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_free() local
851 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free()
852 BUG_ON(array->ptrs[i] != NULL); in fd_array_map_free()
854 bpf_map_area_free(array); in fd_array_map_free()
886 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_update_elem() local
893 if (index >= array->map.max_entries) in bpf_fd_array_map_update_elem()
902 mutex_lock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
903 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
905 mutex_unlock(&array->aux->poke_mutex); in bpf_fd_array_map_update_elem()
907 old_ptr = xchg(array->ptrs + index, new_ptr); in bpf_fd_array_map_update_elem()
917 struct bpf_array *array = container_of(map, struct bpf_array, map); in __fd_array_map_delete_elem() local
921 if (index >= array->map.max_entries) in __fd_array_map_delete_elem()
925 mutex_lock(&array->aux->poke_mutex); in __fd_array_map_delete_elem()
926 old_ptr = xchg(array->ptrs + index, NULL); in __fd_array_map_delete_elem()
928 mutex_unlock(&array->aux->poke_mutex); in __fd_array_map_delete_elem()
930 old_ptr = xchg(array->ptrs + index, NULL); in __fd_array_map_delete_elem()
976 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_clear() local
979 for (i = 0; i < array->map.max_entries; i++) in bpf_fd_array_map_clear()
1260 struct bpf_array *array = container_of(map, struct bpf_array, map); in perf_event_fd_array_release() local
1268 for (i = 0; i < array->map.max_entries; i++) { in perf_event_fd_array_release()
1269 ee = READ_ONCE(array->ptrs[i]); in perf_event_fd_array_release()
1377 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_of_map_gen_lookup() local
1378 u32 elem_size = array->elem_size; in array_of_map_gen_lookup()
1388 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); in array_of_map_gen_lookup()