Home
last modified time | relevance | path

Searched full:bucket (Results 1 – 25 of 431) sorted by relevance

12345678910>>...18

/linux-6.15/drivers/md/dm-vdo/
Dint-map.c16 * probing, all the entries that hash to a given bucket are stored within a fixed neighborhood
17 * starting at that bucket. Chaining is effectively represented as a bit vector relative to each
18 * bucket instead of as pointers or explicit offsets.
20 * When an empty bucket cannot be found within a given neighborhood, subsequent neighborhoods are
22 * an empty bucket will move into the desired neighborhood, allowing the entry to be added. When
36 * entries of the hash bucket at the start of the neighborhood, a pair of small offset fields each
41 * in the list is always the bucket closest to the start of the neighborhood.
61 #define MAX_PROBES 1024 /* limit on the number of probes for a free bucket */
66 * struct bucket - hash bucket
73 struct bucket { struct
[all …]
Dpriority-table.c20 * All the entries with the same priority are queued in a circular list in a bucket for that
23 struct bucket { struct
28 /* The priority of all the entries in this bucket */ argument
34 * of the queue in the appropriate bucket. The dequeue operation finds the highest-priority argument
35 * non-empty bucket by searching a bit vector represented as a single 8-byte word, which is very
44 struct bucket buckets[];
64 struct bucket, __func__, &table); in vdo_make_priority_table()
69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() local
71 bucket->priority = priority; in vdo_make_priority_table()
72 INIT_LIST_HEAD(&bucket->queue); in vdo_make_priority_table()
[all …]
/linux-6.15/net/mptcp/
Dtoken.c53 /* called with bucket lock held */
66 /* called with bucket lock held */
111 struct token_bucket *bucket; in mptcp_token_new_request() local
122 bucket = token_bucket(token); in mptcp_token_new_request()
123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request()
124 if (__token_bucket_busy(bucket, token)) { in mptcp_token_new_request()
125 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request()
129 hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); in mptcp_token_new_request()
130 bucket->chain_len++; in mptcp_token_new_request()
131 spin_unlock_bh(&bucket->lock); in mptcp_token_new_request()
[all …]
/linux-6.15/net/ceph/crush/
Dmapper.c58 * bucket choose methods
60 * For each bucket algorithm, we have a "choose" method that, given a
62 * will produce an item in the bucket.
66 * Choose based on a random permutation of the bucket.
70 * calculate an actual random permutation of the bucket members.
74 static int bucket_perm_choose(const struct crush_bucket *bucket, in bucket_perm_choose() argument
78 unsigned int pr = r % bucket->size; in bucket_perm_choose()
83 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose()
88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose()
89 bucket->size; in bucket_perm_choose()
[all …]
/linux-6.15/block/
Dblk-stat.c55 int bucket, cpu; in blk_stat_add() local
66 bucket = cb->bucket_fn(rq); in blk_stat_add()
67 if (bucket < 0) in blk_stat_add()
70 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add()
80 unsigned int bucket; in blk_stat_timer_fn() local
83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn()
84 blk_rq_stat_init(&cb->stat[bucket]); in blk_stat_timer_fn()
90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn()
91 blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); in blk_stat_timer_fn()
92 blk_rq_stat_init(&cpu_stat[bucket]); in blk_stat_timer_fn()
[all …]
/linux-6.15/drivers/md/bcache/
Dalloc.c3 * Primary bucket allocation code
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
33 * If we've got discards enabled, that happens when a bucket moves from the
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
52 * bch_bucket_alloc_set() allocates one bucket from different caches
74 /* Bucket heap / gen */
[all …]
Dbcache.h42 * To do this, we first divide the cache device up into buckets. A bucket is the
46 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
51 * The priority is used to implement an LRU. We reset a bucket's priority when
53 * of each bucket. It could be used to implement something more sophisticated,
58 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
100 * accomplished by either by invalidating pointers (by incrementing a bucket's
110 * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
111 * free smaller than a bucket - so, that's how big our btree nodes are.
113 * (If buckets are really big we'll only use part of the bucket for a btree node
[all …]
/linux-6.15/drivers/interconnect/qcom/
Dbcm-voter.c65 int bucket, i; in bcm_aggregate_mask() local
67 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate_mask()
68 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask()
69 bcm->vote_y[bucket] = 0; in bcm_aggregate_mask()
74 /* If any vote in this bucket exists, keep the BCM enabled */ in bcm_aggregate_mask()
75 if (node->sum_avg[bucket] || node->max_peak[bucket]) { in bcm_aggregate_mask()
76 bcm->vote_x[bucket] = 0; in bcm_aggregate_mask()
77 bcm->vote_y[bucket] = bcm->enable_mask; in bcm_aggregate_mask()
94 size_t i, bucket; in bcm_aggregate() local
99 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { in bcm_aggregate()
[all …]
/linux-6.15/net/sched/
Dsch_hhf.c21 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
23 * in which the heavy-hitter bucket is served with less weight.
61 * dispatched to the heavy-hitter bucket accordingly.
68 * bucket.
71 * to the non-heavy-hitter bucket.
74 * send p to the heavy-hitter bucket.
105 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
106 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
328 /* Removes one skb from head of bucket. */
329 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument
[all …]
/linux-6.15/tools/tracing/rtla/src/
Dtimerlat.bpf.c70 int bucket) in update_main_hist() argument
76 if (bucket >= entries) in update_main_hist()
80 map_increment(map, bucket); in update_main_hist()
85 int bucket) in update_summary() argument
93 if (bucket >= entries) in update_summary()
122 int bucket; in handle_timerlat_sample() local
129 bucket = latency / bucket_size; in handle_timerlat_sample()
132 update_main_hist(&hist_irq, bucket); in handle_timerlat_sample()
133 update_summary(&summary_irq, latency, bucket); in handle_timerlat_sample()
138 update_main_hist(&hist_thread, bucket); in handle_timerlat_sample()
[all …]
/linux-6.15/fs/ocfs2/
Dxattr.c60 /* The actual buffers that make up the bucket */
63 /* How many blocks make up one bucket for this filesystem */
121 struct ocfs2_xattr_bucket *bucket; member
275 struct ocfs2_xattr_bucket *bucket,
297 struct ocfs2_xattr_bucket *bucket,
318 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local
323 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new()
324 if (bucket) { in ocfs2_xattr_bucket_new()
325 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new()
326 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new()
[all …]
/linux-6.15/drivers/infiniband/sw/rdmavt/
Dtrace_qp.h18 TP_PROTO(struct rvt_qp *qp, u32 bucket),
19 TP_ARGS(qp, bucket),
23 __field(u32, bucket)
28 __entry->bucket = bucket;
31 "[%s] qpn 0x%x bucket %u",
34 __entry->bucket
39 TP_PROTO(struct rvt_qp *qp, u32 bucket),
40 TP_ARGS(qp, bucket));
43 TP_PROTO(struct rvt_qp *qp, u32 bucket),
44 TP_ARGS(qp, bucket));
/linux-6.15/fs/bcachefs/
Dbackpointers.h36 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
54 …line bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) in bp_pos_to_bucket_nodev_noerror() argument
59 *bucket = bp_pos_to_bucket(ca, bp_pos); in bp_pos_to_bucket_nodev_noerror()
65 struct bpos bucket, in bucket_pos_to_bp_noerror()
68 return POS(bucket.inode, in bucket_pos_to_bp_noerror()
69 (bucket_to_sector(ca, bucket.offset) << in bucket_pos_to_bp_noerror()
74 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
77 struct bpos bucket, in bucket_pos_to_bp()
80 struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset); in bucket_pos_to_bp()
81 EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret))); in bucket_pos_to_bp()
[all …]
/linux-6.15/fs/nfs/
Dnfs42xattr.c87 struct nfs4_xattr_bucket *bucket; member
120 * 1. inode i_lock or bucket lock
238 entry->bucket = NULL; in nfs4_xattr_alloc_entry()
388 struct nfs4_xattr_bucket *bucket; in nfs4_xattr_discard_cache() local
394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache()
396 spin_lock(&bucket->lock); in nfs4_xattr_discard_cache()
397 bucket->draining = true; in nfs4_xattr_discard_cache()
398 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { in nfs4_xattr_discard_cache()
403 spin_unlock(&bucket->lock); in nfs4_xattr_discard_cache()
511 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) in nfs4_xattr_get_entry() argument
[all …]
Dpnfs_nfs.c64 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket) in pnfs_free_bucket_lseg() argument
66 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) { in pnfs_free_bucket_lseg()
67 struct pnfs_layout_segment *freeme = bucket->lseg; in pnfs_free_bucket_lseg()
68 bucket->lseg = NULL; in pnfs_free_bucket_lseg()
75 * If this will make the bucket empty, it will need to put the lseg reference.
82 struct pnfs_commit_bucket *bucket = NULL; in pnfs_generic_clear_request_commit() local
88 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit()
92 if (bucket) in pnfs_generic_clear_request_commit()
93 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket)); in pnfs_generic_clear_request_commit()
239 * @bucket->committing.
[all …]
/linux-6.15/include/linux/crush/
Dcrush.h99 * A bucket is a named container of other items (either devices or
100 * other buckets). Items within a bucket are chosen using one of a
105 * Bucket Alg Speed Additions Removals
144 * Replacement weights for each item in a bucket. The size of the
145 * array must be exactly the size of the straw2 bucket, just as the
157 * Replacement weights and ids for a given straw2 bucket, for
160 * When crush_do_rule() chooses the Nth item from a straw2 bucket, the
185 * Replacement weights and ids for each bucket in the crushmap. The
190 * an item from the bucket __map->buckets[N]__ bucket, provided it
191 * is a straw2 bucket.
[all …]
/linux-6.15/Documentation/networking/
Dnexthop-group-resilient.rst49 to choose a hash table bucket, then reads the next hop that this bucket
83 cause bucket allocation change, the wants counts for individual next hops
91 Each bucket maintains a last-used timer. Every time a packet is forwarded
92 through a bucket, this timer is updated to current jiffies value. One
94 amount of time that a bucket must not be hit by traffic in order for it to
104 upkeep changes the next hop that the bucket references to one of the
135 - Single-bucket notifications of the type
143 Some single-bucket notifications are forced, as indicated by the "force"
145 hop associated with the bucket was removed, and the bucket really must be
150 bucket should be migrated, but the HW discovers that the bucket has in fact
[all …]
/linux-6.15/Documentation/userspace-api/media/v4l/
Dmetafmt-vsp1-hgt.rst28 The Saturation position **n** (0 - 31) of the bucket in the matrix is
33 The Hue position **m** (0 - 5) of the bucket in the matrix depends on
101 - :cspan:`4` Histogram bucket (m=0, n=0) [31:0]
103 - :cspan:`4` Histogram bucket (m=0, n=1) [31:0]
107 - :cspan:`4` Histogram bucket (m=0, n=31) [31:0]
109 - :cspan:`4` Histogram bucket (m=1, n=0) [31:0]
113 - :cspan:`4` Histogram bucket (m=2, n=0) [31:0]
117 - :cspan:`4` Histogram bucket (m=3, n=0) [31:0]
121 - :cspan:`4` Histogram bucket (m=4, n=0) [31:0]
125 - :cspan:`4` Histogram bucket (m=5, n=0) [31:0]
[all …]
/linux-6.15/net/vmw_vsock/
Ddiag.c52 unsigned int bucket; in vsock_diag_dump() local
63 bucket = cb->args[1]; in vsock_diag_dump()
72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump()
73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump()
94 bucket++; in vsock_diag_dump()
98 bucket = 0; in vsock_diag_dump()
102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump()
103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump()
128 bucket++; in vsock_diag_dump()
135 cb->args[1] = bucket; in vsock_diag_dump()
/linux-6.15/fs/afs/
Ddir_search.c25 int bucket; in afs_dir_hash_name() local
29 bucket = hash & (AFS_DIR_HASHTBL_SIZE - 1); in afs_dir_hash_name()
31 bucket = AFS_DIR_HASHTBL_SIZE - bucket; in afs_dir_hash_name()
32 bucket &= (AFS_DIR_HASHTBL_SIZE - 1); in afs_dir_hash_name()
34 return bucket; in afs_dir_hash_name()
60 iter->bucket = afs_dir_hash_name(name); in afs_dir_init_iter()
121 * Search through a directory bucket.
134 entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]); in afs_dir_search_bucket()
135 _enter("%x,%x", iter->bucket, entry); in afs_dir_search_bucket()
148 iter->bucket, resv, slot, slot + iter->nr_slots - 1); in afs_dir_search_bucket()
[all …]
/linux-6.15/include/trace/events/
Dbcache.h68 __field(size_t, bucket )
72 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
75 TP_printk("bucket %zu", __entry->bucket)
267 __field(size_t, bucket )
273 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
278 TP_printk("bucket %zu written block %u + %u",
279 __entry->bucket, __entry->block, __entry->keys)
370 __field(size_t, bucket )
375 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
379 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
[all …]
/linux-6.15/kernel/dma/
Ddebug.c246 * Request exclusive access to a hash bucket for a given dma_debug_entry.
261 * Give up exclusive access to the hash bucket
263 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument
265 __releases(&bucket->lock) in put_hash_bucket()
267 spin_unlock_irqrestore(&bucket->lock, flags); in put_hash_bucket()
290 * Search a given entry in the hash bucket list
292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument
299 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument
345 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact()
[all …]
/linux-6.15/include/linux/
Drhashtable.h36 * the hash bucket. This allows us to be sure we've found the end
38 * The value stored in the hash bucket has BIT(0) used as a lock bit.
42 * pointer stored in the bucket. This struct needs to be defined so
68 * @rehash: Current bucket being rehashed
308 * We lock a bucket by setting BIT(0) in the pointer - this is always
309 * zero in real pointers. The NULLS mark is never stored in the bucket,
310 * rather we store NULL if the bucket is empty.
312 * of the hashtable design is to achieve minimum per-bucket contention.
313 * A nested hash table might not have a bucket pointer. In that case
314 * we cannot get a lock. For remove and replace the bucket cannot be
[all …]
/linux-6.15/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dpno.h56 * brcmf_pno_find_reqid_by_bucket - find request id for given bucket index.
59 * @bucket: index of firmware bucket.
61 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
64 * brcmf_pno_get_bucket_map - determine bucket map for given netinfo.
67 * @netinfo: netinfo to compare with bucket configuration.
/linux-6.15/tools/testing/selftests/drivers/net/hw/
Dethtool_rmon.sh35 local bucket=$1; shift
50 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val")
58 jq -r ".[0].rmon[\"${set}-pktsNtoM\"][$bucket].val")
78 while read -r -a bucket; do
79 step="$set-pkts${bucket[0]}to${bucket[1]} on $iface"
82 if ! ensure_mtu $if ${bucket[0]}; then
88 if ! bucket_test $iface $neigh $set $nbuckets ${bucket[0]}; then

12345678910>>...18