Home
last modified time | relevance | path

Searched refs:bkt (Results 1 – 25 of 65) sorted by relevance

123

/linux-6.12.1/include/linux/
Dhashtable.h126 #define hash_for_each(name, bkt, obj, member) \ argument
127 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
128 (bkt)++)\
129 hlist_for_each_entry(obj, &name[bkt], member)
138 #define hash_for_each_rcu(name, bkt, obj, member) \ argument
139 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
140 (bkt)++)\
141 hlist_for_each_entry_rcu(obj, &name[bkt], member)
152 #define hash_for_each_safe(name, bkt, tmp, obj, member) \ argument
153 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
[all …]
Drhashtable.h327 struct rhash_lock_head __rcu **bkt) in rht_lock() argument
332 bit_spin_lock(0, (unsigned long *)bkt); in rht_lock()
350 struct rhash_lock_head __rcu **bkt, in rht_unlock() argument
354 bit_spin_unlock(0, (unsigned long *)bkt); in rht_unlock()
359 struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) in __rht_ptr() argument
363 (unsigned long)RHT_NULLS_MARKER(bkt)); in __rht_ptr()
374 struct rhash_lock_head __rcu *const *bkt) in rht_ptr_rcu() argument
376 return __rht_ptr(rcu_dereference(*bkt), bkt); in rht_ptr_rcu()
380 struct rhash_lock_head __rcu *const *bkt, in rht_ptr() argument
384 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); in rht_ptr()
[all …]
/linux-6.12.1/fs/smb/client/
Dcompress.c63 static bool has_low_entropy(struct bucket *bkt, size_t slen) in has_low_entropy() argument
71 for (i = 0; i < 256 && bkt[i].count > 0; i++) { in has_low_entropy()
72 p = bkt[i].count; in has_low_entropy()
98 static int calc_byte_distribution(struct bucket *bkt, size_t slen) in calc_byte_distribution() argument
105 sum += bkt[i].count; in calc_byte_distribution()
110 for (; i < high && bkt[i].count > 0; i++) { in calc_byte_distribution()
111 sum += bkt[i].count; in calc_byte_distribution()
125 static bool is_mostly_ascii(const struct bucket *bkt) in is_mostly_ascii() argument
131 if (bkt[i].count > 0) in is_mostly_ascii()
226 struct bucket *bkt = NULL; in is_compressible() local
[all …]
/linux-6.12.1/tools/include/linux/
Dhashtable.h105 #define hash_for_each(name, bkt, obj, member) \ argument
106 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
107 (bkt)++)\
108 hlist_for_each_entry(obj, &name[bkt], member)
119 #define hash_for_each_safe(name, bkt, tmp, obj, member) \ argument
120 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
121 (bkt)++)\
122 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/linux-6.12.1/tools/lib/bpf/
Dhashmap.h168 #define hashmap__for_each_entry(map, cur, bkt) \ argument
169 for (bkt = 0; bkt < map->cap; bkt++) \
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
180 #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ argument
181 for (bkt = 0; bkt < map->cap; bkt++) \
182 for (cur = map->buckets[bkt]; \
Dhashmap.c66 size_t bkt; in hashmap__clear() local
68 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap__clear()
106 size_t h, bkt; in hashmap_grow() local
117 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap_grow()
/linux-6.12.1/tools/perf/util/
Dhashmap.h168 #define hashmap__for_each_entry(map, cur, bkt) \ argument
169 for (bkt = 0; bkt < map->cap; bkt++) \
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
180 #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ argument
181 for (bkt = 0; bkt < map->cap; bkt++) \
182 for (cur = map->buckets[bkt]; \
Dexpr.c82 size_t bkt; in ids__free() local
87 hashmap__for_each_entry(ids, cur, bkt) { in ids__free()
111 size_t bkt; in ids__union() local
129 hashmap__for_each_entry(ids2, cur, bkt) { in ids__union()
228 size_t bkt; in expr__subset_of_ids() local
231 hashmap__for_each_entry(needles->ids, cur, bkt) { in expr__subset_of_ids()
310 size_t bkt; in expr__ctx_clear() local
312 hashmap__for_each_entry(ctx->ids, cur, bkt) { in expr__ctx_clear()
322 size_t bkt; in expr__ctx_free() local
328 hashmap__for_each_entry(ctx->ids, cur, bkt) { in expr__ctx_free()
Dthreads.c140 size_t bkt; in threads__remove_all_threads() local
144 hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) { in threads__remove_all_threads()
175 size_t bkt; in threads__for_each_thread() local
178 hashmap__for_each_entry((&table->shard), cur, bkt) { in threads__for_each_thread()
Dhashmap.c66 size_t bkt; in hashmap__clear() local
68 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap__clear()
106 size_t h, bkt; in hashmap_grow() local
117 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap_grow()
/linux-6.12.1/drivers/infiniband/ulp/opa_vnic/
Dopa_vnic_internal.h280 #define vnic_hash_for_each_safe(name, bkt, tmp, obj, member) \ argument
281 for ((bkt) = 0, obj = NULL; \
282 !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
283 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
289 #define vnic_hash_for_each(name, bkt, obj, member) \ argument
290 for ((bkt) = 0, obj = NULL; \
291 !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
292 hlist_for_each_entry(obj, &name[bkt], member)
Dopa_vnic_encap.c107 int bkt; in opa_vnic_free_mac_tbl() local
112 vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) { in opa_vnic_free_mac_tbl()
157 int bkt; in opa_vnic_query_mac_tbl() local
168 vnic_hash_for_each(mactbl, bkt, node, hlist) { in opa_vnic_query_mac_tbl()
209 int i, bkt, rc = 0; in opa_vnic_update_mac_tbl() local
263 vnic_hash_for_each(old_mactbl, bkt, node, hlist) { in opa_vnic_update_mac_tbl()
/linux-6.12.1/lib/
Dhashtable_test.c68 int bkt; in hashtable_test_hash_add() local
80 hash_for_each(hash, bkt, x, node) { in hashtable_test_hash_add()
127 int bkt, i, j, count; in hashtable_test_hash_for_each() local
139 hash_for_each(hash, bkt, x, node) { in hashtable_test_hash_for_each()
157 int bkt, i, j, count; in hashtable_test_hash_for_each_safe() local
169 hash_for_each_safe(hash, bkt, tmp, x, node) { in hashtable_test_hash_for_each_safe()
190 int bkt, i, j, count; in hashtable_test_hash_for_each_possible() local
220 hash_for_each(hash, bkt, y, node) { in hashtable_test_hash_for_each_possible()
223 buckets[y->key] = bkt; in hashtable_test_hash_for_each_possible()
245 int bkt, i, j, count; in hashtable_test_hash_for_each_possible_safe() local
[all …]
Drhashtable.c229 struct rhash_lock_head __rcu **bkt, in rhashtable_rehash_one() argument
245 rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), in rhashtable_rehash_one()
274 rht_assign_locked(bkt, next); in rhashtable_rehash_one()
284 struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); in rhashtable_rehash_chain() local
288 if (!bkt) in rhashtable_rehash_chain()
290 flags = rht_lock(old_tbl, bkt); in rhashtable_rehash_chain()
292 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) in rhashtable_rehash_chain()
297 rht_unlock(old_tbl, bkt, flags); in rhashtable_rehash_chain()
495 struct rhash_lock_head __rcu **bkt, in rhashtable_lookup_one() argument
508 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { in rhashtable_lookup_one()
[all …]
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/
Dhashmap.c47 int err, bkt, found_cnt, i; in test_hashmap_generic() local
92 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic()
140 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { in test_hashmap_generic()
197 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { in test_hashmap_generic()
230 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic()
238 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic()
265 int err, i, bkt; in test_hashmap_ptr_iface() local
319 hashmap__for_each_entry(map, cur, bkt) { in test_hashmap_ptr_iface()
345 int err, bkt; in test_hashmap_multimap() local
382 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_multimap()
[all …]
/linux-6.12.1/drivers/s390/crypto/
Dap_card.c80 int bkt; in request_count_store() local
85 hash_for_each(ap_queues, bkt, aq, hnode) in request_count_store()
99 int bkt; in requestq_count_show() local
106 hash_for_each(ap_queues, bkt, aq, hnode) in requestq_count_show()
118 int bkt; in pendingq_count_show() local
125 hash_for_each(ap_queues, bkt, aq, hnode) in pendingq_count_show()
/linux-6.12.1/net/ipv6/
Dcalipso.c203 u32 bkt; in calipso_cache_check() local
212 bkt = hash & (CALIPSO_CACHE_BUCKETS - 1); in calipso_cache_check()
213 spin_lock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
214 list_for_each_entry(entry, &calipso_cache[bkt].list, list) { in calipso_cache_check()
224 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
239 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
244 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
268 u32 bkt; in calipso_cache_add() local
291 bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1); in calipso_cache_add()
292 spin_lock_bh(&calipso_cache[bkt].lock); in calipso_cache_add()
[all …]
/linux-6.12.1/drivers/net/ethernet/intel/ice/
Dice_vf_lib.h197 #define ice_for_each_vf(pf, bkt, vf) \ argument
198 hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
214 #define ice_for_each_vf_rcu(pf, bkt, vf) \ argument
215 hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
Dice_sriov.c29 unsigned int bkt; in ice_free_vf_entries() local
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { in ice_free_vf_entries()
156 unsigned int bkt; in ice_free_vfs() local
175 ice_for_each_vf(pf, bkt, vf) { in ice_free_vfs()
584 unsigned int bkt, it_cnt; in ice_start_vfs() local
591 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
619 ice_for_each_vf(pf, bkt, vf) { in ice_start_vfs()
1010 int to_remap = 0, bkt; in ice_sriov_remap_vectors() local
1015 ice_for_each_vf(pf, bkt, tmp_vf) { in ice_sriov_remap_vectors()
1215 unsigned int bkt; in ice_process_vflr_event() local
[all …]
Dice_vf_lib.c108 unsigned int bkt; in ice_get_num_vfs() local
112 ice_for_each_vf_rcu(pf, bkt, vf) in ice_get_num_vfs()
579 unsigned int bkt; in ice_is_any_vf_in_unicast_promisc() local
582 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_is_any_vf_in_unicast_promisc()
735 unsigned int bkt; in ice_reset_all_vfs() local
744 ice_for_each_vf(pf, bkt, vf) in ice_reset_all_vfs()
754 ice_for_each_vf(pf, bkt, vf) in ice_reset_all_vfs()
761 ice_for_each_vf(pf, bkt, vf) { in ice_reset_all_vfs()
773 ice_for_each_vf(pf, bkt, vf) { in ice_reset_all_vfs()
1363 unsigned int bkt; in ice_get_vf_ctrl_vsi() local
[all …]
/linux-6.12.1/net/ipv4/
Dcipso_ipv4.c237 u32 bkt; in cipso_v4_cache_check() local
246 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); in cipso_v4_cache_check()
247 spin_lock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
248 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { in cipso_v4_cache_check()
258 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
273 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
278 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
301 u32 bkt; in cipso_v4_cache_add() local
324 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); in cipso_v4_cache_add()
325 spin_lock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_add()
[all …]
/linux-6.12.1/net/sched/
Dcls_route.c55 struct route4_bucket *bkt; member
328 b = f->bkt; in route4_delete()
465 f->bkt = b; in route4_set_parms()
524 f->bkt = fold->bkt; in route4_change()
534 fp = &f->bkt->ht[h]; in route4_change()
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/lib/
Dvxlan.c183 int bkt; in mlx5_vxlan_reset_to_default() local
188 hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { in mlx5_vxlan_reset_to_default()
/linux-6.12.1/arch/powerpc/kvm/
Dbook3s_hv_uvmem.c470 int srcu_idx, bkt; in kvmppc_h_svm_init_start() local
489 kvm_for_each_memslot(memslot, bkt, slots) { in kvmppc_h_svm_init_start()
497 kvm_for_each_memslot(m, bkt, slots) { in kvmppc_h_svm_init_start()
661 int srcu_idx, bkt; in kvmppc_h_svm_init_abort() local
676 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) in kvmppc_h_svm_init_abort()
834 int srcu_idx, bkt; in kvmppc_h_svm_init_done() local
843 kvm_for_each_memslot(memslot, bkt, slots) { in kvmppc_h_svm_init_done()
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/
Dhtb.c37 int bkt, err; in mlx5e_htb_enumerate_leaves() local
39 hash_for_each(htb->qos_tc2node, bkt, node, hnode) { in mlx5e_htb_enumerate_leaves()
434 int bkt; in mlx5e_htb_node_find_by_qid() local
436 hash_for_each(htb->qos_tc2node, bkt, node, hnode) in mlx5e_htb_node_find_by_qid()
627 int bkt; in mlx5e_htb_update_children() local
629 hash_for_each(htb->qos_tc2node, bkt, child, hnode) { in mlx5e_htb_update_children()

123