Lines Matching full:bucket
3 * Primary bucket allocation code
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
33 * If we've got discards enabled, that happens when a bucket moves from the
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
52 * bch_bucket_alloc_set() allocates one bucket from different caches
74 /* Bucket heap / gen */
76 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen()
89 struct bucket *b; in bch_rescale_priorities()
125 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen()
130 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket()
137 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket()
151 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket()
161 * bucket, and in order for that multiply to make sense we have to scale bucket
163 * Thus, we scale the bucket priorities so that the bucket with the smallest
167 static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b) in new_bucket_prio()
176 struct bucket **lhs = (struct bucket **)l; in new_bucket_max_cmp()
177 struct bucket **rhs = (struct bucket **)r; in new_bucket_max_cmp()
185 struct bucket **lhs = (struct bucket **)l; in new_bucket_min_cmp()
186 struct bucket **rhs = (struct bucket **)r; in new_bucket_min_cmp()
194 struct bucket *b; in invalidate_buckets_lru()
239 struct bucket *b; in invalidate_buckets_fifo()
262 struct bucket *b; in invalidate_buckets_random()
323 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
328 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) in bch_allocator_push()
332 if (fifo_push(&ca->free[i], bucket)) in bch_allocator_push()
347 * possibly issue discards to them, then we add the bucket to in bch_allocator_thread()
351 long bucket; in bch_allocator_thread() local
353 if (!fifo_pop(&ca->free_inc, bucket)) in bch_allocator_thread()
359 bucket_to_sector(ca->set, bucket), in bch_allocator_thread()
364 allocator_wait(ca, bch_allocator_push(ca, bucket)); in bch_allocator_thread()
415 struct bucket *b; in bch_bucket_alloc()
489 void __bch_bucket_free(struct cache *ca, struct bucket *b) in __bch_bucket_free()
559 * only volume write streams from cached devices, secondly we look for a bucket
561 * failing that we look for a bucket that was last used by the same task.
634 * We might have to allocate a new bucket, which we can't do with a in bch_alloc_sectors()
637 * allocated bucket(s). in bch_alloc_sectors()
658 * second time we call pick_data_bucket(). If we allocated a bucket but in bch_alloc_sectors()
679 * Move b to the end of the lru, and keep track of what this bucket was in bch_alloc_sectors()
700 * into the btree, but if we're done with this bucket we just transfer in bch_alloc_sectors()