Lines Matching full:bc
82 static void bkey_cached_free(struct btree_key_cache *bc, in bkey_cached_free() argument
85 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); in bkey_cached_free()
93 list_move_tail(&ck->list, &bc->freed_pcpu); in bkey_cached_free()
94 bc->nr_freed_pcpu++; in bkey_cached_free()
96 list_move_tail(&ck->list, &bc->freed_nonpcpu); in bkey_cached_free()
97 bc->nr_freed_nonpcpu++; in bkey_cached_free()
99 atomic_long_inc(&bc->nr_freed); in bkey_cached_free()
110 static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc, in __bkey_cached_move_to_freelist_ordered() argument
115 bc->nr_freed_nonpcpu++; in __bkey_cached_move_to_freelist_ordered()
117 list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) { in __bkey_cached_move_to_freelist_ordered()
125 list_move(&ck->list, &bc->freed_nonpcpu); in __bkey_cached_move_to_freelist_ordered()
129 static void bkey_cached_move_to_freelist(struct btree_key_cache *bc, in bkey_cached_move_to_freelist() argument
140 f = this_cpu_ptr(bc->pcpu_freed); in bkey_cached_move_to_freelist()
149 mutex_lock(&bc->lock); in bkey_cached_move_to_freelist()
151 f = this_cpu_ptr(bc->pcpu_freed); in bkey_cached_move_to_freelist()
156 __bkey_cached_move_to_freelist_ordered(bc, ck2); in bkey_cached_move_to_freelist()
160 __bkey_cached_move_to_freelist_ordered(bc, ck); in bkey_cached_move_to_freelist()
161 mutex_unlock(&bc->lock); in bkey_cached_move_to_freelist()
164 mutex_lock(&bc->lock); in bkey_cached_move_to_freelist()
165 list_move_tail(&ck->list, &bc->freed_nonpcpu); in bkey_cached_move_to_freelist()
166 bc->nr_freed_nonpcpu++; in bkey_cached_move_to_freelist()
167 mutex_unlock(&bc->lock); in bkey_cached_move_to_freelist()
170 mutex_lock(&bc->lock); in bkey_cached_move_to_freelist()
171 list_move_tail(&ck->list, &bc->freed_pcpu); in bkey_cached_move_to_freelist()
172 mutex_unlock(&bc->lock); in bkey_cached_move_to_freelist()
176 static void bkey_cached_free_fast(struct btree_key_cache *bc, in bkey_cached_free_fast() argument
179 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); in bkey_cached_free_fast()
185 atomic_long_inc(&bc->nr_freed); in bkey_cached_free_fast()
191 bkey_cached_move_to_freelist(bc, ck); in bkey_cached_free_fast()
202 struct btree_key_cache *bc = &c->btree_key_cache; in bkey_cached_alloc() local
212 f = this_cpu_ptr(bc->pcpu_freed); in bkey_cached_alloc()
218 mutex_lock(&bc->lock); in bkey_cached_alloc()
220 f = this_cpu_ptr(bc->pcpu_freed); in bkey_cached_alloc()
222 while (!list_empty(&bc->freed_nonpcpu) && in bkey_cached_alloc()
224 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list); in bkey_cached_alloc()
226 bc->nr_freed_nonpcpu--; in bkey_cached_alloc()
232 mutex_unlock(&bc->lock); in bkey_cached_alloc()
235 mutex_lock(&bc->lock); in bkey_cached_alloc()
236 if (!list_empty(&bc->freed_nonpcpu)) { in bkey_cached_alloc()
237 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list); in bkey_cached_alloc()
239 bc->nr_freed_nonpcpu--; in bkey_cached_alloc()
241 mutex_unlock(&bc->lock); in bkey_cached_alloc()
244 mutex_lock(&bc->lock); in bkey_cached_alloc()
245 if (!list_empty(&bc->freed_pcpu)) { in bkey_cached_alloc()
246 ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list); in bkey_cached_alloc()
249 mutex_unlock(&bc->lock); in bkey_cached_alloc()
255 bkey_cached_move_to_freelist(bc, ck); in bkey_cached_alloc()
266 bkey_cached_move_to_freelist(bc, ck); in bkey_cached_alloc()
323 struct btree_key_cache *bc = &c->btree_key_cache; in btree_key_cache_create() local
332 ck = bkey_cached_reuse(bc); in btree_key_cache_create()
349 if (unlikely(rhashtable_lookup_insert_fast(&bc->table, in btree_key_cache_create()
359 bkey_cached_free_fast(bc, ck); in btree_key_cache_create()
366 atomic_long_inc(&bc->nr_keys); in btree_key_cache_create()
826 struct btree_key_cache *bc = &c->btree_key_cache; in bch2_btree_key_cache_scan() local
833 mutex_lock(&bc->lock); in bch2_btree_key_cache_scan()
841 scanned += bc->nr_freed_nonpcpu; in bch2_btree_key_cache_scan()
843 list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) { in bch2_btree_key_cache_scan()
851 atomic_long_dec(&bc->nr_freed); in bch2_btree_key_cache_scan()
853 bc->nr_freed_nonpcpu--; in bch2_btree_key_cache_scan()
859 scanned += bc->nr_freed_pcpu; in bch2_btree_key_cache_scan()
861 list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) { in bch2_btree_key_cache_scan()
869 atomic_long_dec(&bc->nr_freed); in bch2_btree_key_cache_scan()
871 bc->nr_freed_pcpu--; in bch2_btree_key_cache_scan()
878 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); in bch2_btree_key_cache_scan()
879 if (bc->shrink_iter >= tbl->size) in bch2_btree_key_cache_scan()
880 bc->shrink_iter = 0; in bch2_btree_key_cache_scan()
881 start = bc->shrink_iter; in bch2_btree_key_cache_scan()
886 pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter)); in bch2_btree_key_cache_scan()
889 next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter); in bch2_btree_key_cache_scan()
898 bkey_cached_evict(bc, ck); in bch2_btree_key_cache_scan()
899 bkey_cached_free(bc, ck); in bch2_btree_key_cache_scan()
909 bc->shrink_iter++; in bch2_btree_key_cache_scan()
910 if (bc->shrink_iter >= tbl->size) in bch2_btree_key_cache_scan()
911 bc->shrink_iter = 0; in bch2_btree_key_cache_scan()
912 } while (scanned < nr && bc->shrink_iter != start); in bch2_btree_key_cache_scan()
918 mutex_unlock(&bc->lock); in bch2_btree_key_cache_scan()
927 struct btree_key_cache *bc = &c->btree_key_cache; in bch2_btree_key_cache_count() local
928 long nr = atomic_long_read(&bc->nr_keys) - in bch2_btree_key_cache_count()
929 atomic_long_read(&bc->nr_dirty); in bch2_btree_key_cache_count()
934 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) in bch2_fs_btree_key_cache_exit() argument
936 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); in bch2_fs_btree_key_cache_exit()
946 shrinker_free(bc->shrink); in bch2_fs_btree_key_cache_exit()
948 mutex_lock(&bc->lock); in bch2_fs_btree_key_cache_exit()
953 while (atomic_long_read(&bc->nr_keys)) { in bch2_fs_btree_key_cache_exit()
955 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); in bch2_fs_btree_key_cache_exit()
959 bkey_cached_evict(bc, ck); in bch2_fs_btree_key_cache_exit()
968 per_cpu_ptr(bc->pcpu_freed, cpu); in bch2_fs_btree_key_cache_exit()
977 BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu); in bch2_fs_btree_key_cache_exit()
978 BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu); in bch2_fs_btree_key_cache_exit()
980 list_splice(&bc->freed_pcpu, &items); in bch2_fs_btree_key_cache_exit()
981 list_splice(&bc->freed_nonpcpu, &items); in bch2_fs_btree_key_cache_exit()
983 mutex_unlock(&bc->lock); in bch2_fs_btree_key_cache_exit()
994 if (atomic_long_read(&bc->nr_dirty) && in bch2_fs_btree_key_cache_exit()
998 atomic_long_read(&bc->nr_dirty)); in bch2_fs_btree_key_cache_exit()
1000 if (atomic_long_read(&bc->nr_keys)) in bch2_fs_btree_key_cache_exit()
1002 atomic_long_read(&bc->nr_keys)); in bch2_fs_btree_key_cache_exit()
1004 if (bc->table_init_done) in bch2_fs_btree_key_cache_exit()
1005 rhashtable_destroy(&bc->table); in bch2_fs_btree_key_cache_exit()
1007 free_percpu(bc->pcpu_freed); in bch2_fs_btree_key_cache_exit()
1017 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) in bch2_fs_btree_key_cache_init() argument
1019 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); in bch2_fs_btree_key_cache_init()
1023 bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist); in bch2_fs_btree_key_cache_init()
1024 if (!bc->pcpu_freed) in bch2_fs_btree_key_cache_init()
1028 if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params)) in bch2_fs_btree_key_cache_init()
1031 bc->table_init_done = true; in bch2_fs_btree_key_cache_init()
1036 bc->shrink = shrink; in bch2_fs_btree_key_cache_init()