/linux-6.12.1/drivers/md/dm-vdo/ |
D | int-map.c | 73 struct __packed bucket { struct 102 struct bucket *buckets; 167 return vdo_allocate(map->bucket_count, struct bucket, in allocate_buckets() 244 static struct bucket *dereference_hop(struct bucket *neighborhood, unsigned int hop_offset) in dereference_hop() 260 static void insert_in_hop_list(struct bucket *neighborhood, struct bucket *new_bucket) in insert_in_hop_list() 276 struct bucket *bucket = dereference_hop(neighborhood, next_hop); in insert_in_hop_list() local 278 next_hop = bucket->next_hop; in insert_in_hop_list() 282 bucket->next_hop = hop_offset; in insert_in_hop_list() 293 static struct bucket *select_bucket(const struct int_map *map, u64 key) in select_bucket() 324 static struct bucket *search_hop_list(struct int_map *map __always_unused, in search_hop_list() [all …]
|
D | priority-table.c | 23 struct bucket { struct 44 struct bucket buckets[]; 64 struct bucket, __func__, &table); in vdo_make_priority_table() 69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() local 71 bucket->priority = priority; in vdo_make_priority_table() 72 INIT_LIST_HEAD(&bucket->queue); in vdo_make_priority_table() 140 static inline void mark_bucket_empty(struct priority_table *table, struct bucket *bucket) in mark_bucket_empty() argument 142 table->search_vector &= ~(1ULL << bucket->priority); in mark_bucket_empty() 157 struct bucket *bucket; in vdo_priority_table_dequeue() local 173 bucket = &table->buckets[top_priority]; in vdo_priority_table_dequeue() [all …]
|
/linux-6.12.1/net/mptcp/ |
D | token.c | 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request() 125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request() 157 struct token_bucket *bucket; in mptcp_token_new_connect() local 163 bucket = token_bucket(subflow->token); in mptcp_token_new_connect() [all …]
|
/linux-6.12.1/net/ceph/crush/ |
D | mapper.c | 74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument 78 unsigned int pr = r % bucket->size; in bucket_perm_choose() 83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose() 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 95 for (i = 0; i < bucket->size; i++) in bucket_perm_choose() 100 for (i = 1; i < bucket->size; i++) in bucket_perm_choose() 112 if (p < bucket->size - 1) { in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() [all …]
|
/linux-6.12.1/drivers/interconnect/qcom/ |
D | bcm-voter.c | 65 int bucket, i; in bcm_aggregate_mask() local 67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask() 68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask() 75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask() 76 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask() 77 bcm->vote_y[bucket] = bcm->enable_mask; in bcm_aggregate_mask() 94 size_t i, bucket; in bcm_aggregate() local 99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate() 102 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, in bcm_aggregate() [all …]
|
/linux-6.12.1/block/ |
D | blk-stat.c | 55 int bucket, cpu; in blk_stat_add() local 66 bucket = cb->bucket_fn(rq); in blk_stat_add() 67 if (bucket < 0) in blk_stat_add() 70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 80 unsigned int bucket; in blk_stat_timer_fn() local 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 91 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn() 92 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn() [all …]
|
/linux-6.12.1/net/sched/ |
D | sch_hhf.c | 329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument 331 struct sk_buff *skb = bucket->head; in dequeue_head() 333 bucket->head = skb->next; in dequeue_head() 339 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument 341 if (bucket->head == NULL) in bucket_add() 342 bucket->head = skb; in bucket_add() 344 bucket->tail->next = skb; in bucket_add() 345 bucket->tail = skb; in bucket_add() 352 struct wdrr_bucket *bucket; in hhf_drop() local 355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop() [all …]
|
/linux-6.12.1/drivers/infiniband/sw/rdmavt/ |
D | trace_qp.h | 18 TP_PROTO(struct rvt_qp *qp, u32 bucket), 19 TP_ARGS(qp, bucket), 23 __field(u32, bucket) 28 __entry->bucket = bucket; 34 __entry->bucket 39 TP_PROTO(struct rvt_qp *qp, u32 bucket), 40 TP_ARGS(qp, bucket)); 43 TP_PROTO(struct rvt_qp *qp, u32 bucket), 44 TP_ARGS(qp, bucket));
|
/linux-6.12.1/fs/bcachefs/ |
D | backpointers.h | 46 …line bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) in bp_pos_to_bucket_nodev_noerror() argument 51 *bucket = bp_pos_to_bucket(ca, bp_pos); in bp_pos_to_bucket_nodev_noerror() 56 static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) in bp_pos_to_bucket_nodev() argument 58 return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket), in bp_pos_to_bucket_nodev() 63 struct bpos bucket, in bucket_pos_to_bp_noerror() 66 return POS(bucket.inode, in bucket_pos_to_bp_noerror() 67 (bucket_to_sector(ca, bucket.offset) << in bucket_pos_to_bp_noerror() 75 struct bpos bucket, in bucket_pos_to_bp() 78 struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset); in bucket_pos_to_bp() 79 EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret))); in bucket_pos_to_bp() [all …]
|
D | alloc_background.h | 25 static inline u64 bucket_to_u64(struct bpos bucket) in bucket_to_u64() argument 27 return (bucket.inode << 48) | bucket.offset; in bucket_to_u64() 30 static inline struct bpos u64_to_bucket(u64 bucket) in u64_to_bucket() argument 32 return POS(bucket >> 48, bucket & ~(~0ULL << 48)); in u64_to_bucket() 40 static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src) in alloc_to_bucket() 50 static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src) in __bucket_m_to_alloc() 60 static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) in bucket_m_to_alloc() 78 static inline bool bucket_data_type_mismatch(enum bch_data_type bucket, in bucket_data_type_mismatch() argument 81 return !data_type_is_empty(bucket) && in bucket_data_type_mismatch() 82 bucket_data_type(bucket) != bucket_data_type(ptr); in bucket_data_type_mismatch() [all …]
|
D | alloc_foreground.h | 121 unsigned dev, u64 bucket) in open_bucket_hashslot() argument 124 (jhash_3words(dev, bucket, bucket >> 32, 0) & in open_bucket_hashslot() 128 static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket) in bch2_bucket_is_open() argument 130 open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket); in bch2_bucket_is_open() 135 if (ob->dev == dev && ob->bucket == bucket) in bch2_bucket_is_open() 144 static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket) in bch2_bucket_is_open_safe() argument 148 if (bch2_bucket_is_open(c, dev, bucket)) in bch2_bucket_is_open_safe() 152 ret = bch2_bucket_is_open(c, dev, bucket); in bch2_bucket_is_open_safe()
|
D | backpointers.c | 20 struct bpos bucket, in extent_matches_bp() argument 40 if (bpos_eq(bucket, bucket2) && in extent_matches_bp() 70 struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p); in bch2_backpointer_validate() local 71 struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset); in bch2_backpointer_validate() 98 struct bpos bucket = bp_pos_to_bucket(ca, k.k->p); in bch2_backpointer_k_to_text() local 101 bch2_bpos_to_text(out, bucket); in bch2_backpointer_k_to_text() 171 struct bpos bucket, in bch2_bucket_backpointer_mod_nowritebuffer() argument 187 bp_k->k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); in bch2_bucket_backpointer_mod_nowritebuffer() 224 struct bpos bucket, int gen, in bch2_get_next_backpointer() argument 229 struct bpos bp_end_pos = bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0); in bch2_get_next_backpointer() [all …]
|
/linux-6.12.1/fs/nfs/ |
D | nfs42xattr.c | 87 struct nfs4_xattr_bucket *bucket; member 238 entry->bucket = NULL; in nfs4_xattr_alloc_entry() 388 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local 394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache() 396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache() 397 bucket->draining = true; in nfs4_xattr_discard_cache() 398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache() 403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache() 511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument 517 hlist_for_each_entry(entry, &bucket->hlist, hnode) { in nfs4_xattr_get_entry() [all …]
|
D | pnfs_nfs.c | 63 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument 65 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg() 66 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg() 67 bucket->lseg = NULL; in pnfs_free_bucket_lseg() 81 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local 87 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit() 91 if (bucket) in pnfs_generic_clear_request_commit() 92 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit() 241 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_bucket_scan_ds_commit_list() argument 245 struct list_head *src = &bucket->written; in pnfs_bucket_scan_ds_commit_list() [all …]
|
/linux-6.12.1/net/9p/ |
D | error.c | 179 int bucket; in p9_error_init() local 182 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init() 183 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init() 188 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init() 190 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init() 208 int bucket; in p9_errstr2errno() local 212 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno() 213 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
|
/linux-6.12.1/drivers/md/bcache/ |
D | alloc.c | 76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 89 struct bucket *b; in bch_rescale_priorities() 125 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen() 130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() 137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() 151 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() 167 static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b) in new_bucket_prio() 176 struct bucket **lhs = (struct bucket **)l; in new_bucket_max_cmp() 177 struct bucket **rhs = (struct bucket **)r; in new_bucket_max_cmp() 185 struct bucket **lhs = (struct bucket **)l; in new_bucket_min_cmp() [all …]
|
/linux-6.12.1/net/vmw_vsock/ |
D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
/linux-6.12.1/drivers/cpuidle/governors/ |
D | menu.c | 109 unsigned int bucket; member 117 int bucket = 0; in which_bucket() local 126 bucket = BUCKETS/2; in which_bucket() 129 return bucket; in which_bucket() 131 return bucket + 1; in which_bucket() 133 return bucket + 2; in which_bucket() 135 return bucket + 3; in which_bucket() 137 return bucket + 4; in which_bucket() 138 return bucket + 5; in which_bucket() 286 data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); in menu_select() [all …]
|
/linux-6.12.1/kernel/dma/ |
D | debug.c | 265 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument 267 __releases(&bucket->lock) in put_hash_bucket() 269 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket() 294 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument 301 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find() 344 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument 347 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact() 350 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, in bucket_find_contain() argument 359 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain() 367 put_hash_bucket(*bucket, *flags); in bucket_find_contain() [all …]
|
/linux-6.12.1/Documentation/userspace-api/media/v4l/ |
D | metafmt-vsp1-hgt.rst | 28 The Saturation position **n** (0 - 31) of the bucket in the matrix is 33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on 101 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0] 103 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0] 107 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0] 109 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0] 113 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0] 117 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0] 121 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0] 125 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0] [all …]
|
/linux-6.12.1/fs/ocfs2/ |
D | xattr.c | 121 struct ocfs2_xattr_bucket *bucket; member 275 struct ocfs2_xattr_bucket *bucket, 297 struct ocfs2_xattr_bucket *bucket, 318 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local 323 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new() 324 if (bucket) { in ocfs2_xattr_bucket_new() 325 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new() 326 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new() 329 return bucket; in ocfs2_xattr_bucket_new() 332 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument [all …]
|
/linux-6.12.1/drivers/md/ |
D | dm-clone-target.c | 564 #define bucket_lock_irqsave(bucket, flags) \ argument 565 spin_lock_irqsave(&(bucket)->lock, flags) 567 #define bucket_unlock_irqrestore(bucket, flags) \ argument 568 spin_unlock_irqrestore(&(bucket)->lock, flags) 570 #define bucket_lock_irq(bucket) \ argument 571 spin_lock_irq(&(bucket)->lock) 573 #define bucket_unlock_irq(bucket) \ argument 574 spin_unlock_irq(&(bucket)->lock) 579 struct hash_table_bucket *bucket; in hash_table_init() local 588 bucket = clone->ht + i; in hash_table_init() [all …]
|
/linux-6.12.1/tools/testing/selftests/drivers/net/hw/ |
D | ethtool_rmon.sh | 35 local bucket=$1; shift 50 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val") 58 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val") 78 while read -r -a bucket; do 82 if ! ensure_mtu $if ${bucket[0]}; then 88 if ! bucket_test $iface $neigh $set $nbuckets ${bucket[0]}; then
|
/linux-6.12.1/Documentation/networking/ |
D | nexthop-group-resilient.rst | 49 to choose a hash table bucket, then reads the next hop that this bucket 83 cause bucket allocation change, the wants counts for individual next hops 91 Each bucket maintains a last-used timer. Every time a packet is forwarded 92 through a bucket, this timer is updated to current jiffies value. One 94 amount of time that a bucket must not be hit by traffic in order for it to 104 upkeep changes the next hop that the bucket references to one of the 135 - Single-bucket notifications of the type 143 Some single-bucket notifications are forced, as indicated by the "force" 145 hop associated with the bucket was removed, and the bucket really must be 150 bucket should be migrated, but the HW discovers that the bucket has in fact [all …]
|
/linux-6.12.1/include/trace/events/ |
D | bcache.h | 68 __field(size_t, bucket ) 72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 75 TP_printk("bucket %zu", __entry->bucket) 267 __field(size_t, bucket ) 273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 279 __entry->bucket, __entry->block, __entry->keys) 370 __field(size_t, bucket ) 375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0); 379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys) 429 TP_PROTO(struct cache *ca, size_t bucket), [all …]
|