Lines Matching full:ck

27 	const struct bkey_cached *ck = obj;  in bch2_btree_key_cache_cmp_fn()  local
30 return ck->key.btree_id != key->btree_id || in bch2_btree_key_cache_cmp_fn()
31 !bpos_eq(ck->key.pos, key->pos); in bch2_btree_key_cache_cmp_fn()
54 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck) in bkey_cached_lock_for_evict() argument
56 if (!six_trylock_intent(&ck->c.lock)) in bkey_cached_lock_for_evict()
59 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bkey_cached_lock_for_evict()
60 six_unlock_intent(&ck->c.lock); in bkey_cached_lock_for_evict()
64 if (!six_trylock_write(&ck->c.lock)) { in bkey_cached_lock_for_evict()
65 six_unlock_intent(&ck->c.lock); in bkey_cached_lock_for_evict()
73 struct bkey_cached *ck) in bkey_cached_evict() argument
75 BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash, in bkey_cached_evict()
77 memset(&ck->key, ~0, sizeof(ck->key)); in bkey_cached_evict()
83 struct bkey_cached *ck) in bkey_cached_free() argument
87 BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags)); in bkey_cached_free()
89 ck->btree_trans_barrier_seq = in bkey_cached_free()
92 if (ck->c.lock.readers) { in bkey_cached_free()
93 list_move_tail(&ck->list, &bc->freed_pcpu); in bkey_cached_free()
96 list_move_tail(&ck->list, &bc->freed_nonpcpu); in bkey_cached_free()
101 kfree(ck->k); in bkey_cached_free()
102 ck->k = NULL; in bkey_cached_free()
103 ck->u64s = 0; in bkey_cached_free()
105 six_unlock_write(&ck->c.lock); in bkey_cached_free()
106 six_unlock_intent(&ck->c.lock); in bkey_cached_free()
111 struct bkey_cached *ck) in __bkey_cached_move_to_freelist_ordered() argument
118 if (ULONG_CMP_GE(ck->btree_trans_barrier_seq, in __bkey_cached_move_to_freelist_ordered()
120 list_move(&ck->list, &pos->list); in __bkey_cached_move_to_freelist_ordered()
125 list_move(&ck->list, &bc->freed_nonpcpu); in __bkey_cached_move_to_freelist_ordered()
130 struct bkey_cached *ck) in bkey_cached_move_to_freelist() argument
132 BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags)); in bkey_cached_move_to_freelist()
134 if (!ck->c.lock.readers) { in bkey_cached_move_to_freelist()
143 f->objs[f->nr++] = ck; in bkey_cached_move_to_freelist()
160 __bkey_cached_move_to_freelist_ordered(bc, ck); in bkey_cached_move_to_freelist()
165 list_move_tail(&ck->list, &bc->freed_nonpcpu); in bkey_cached_move_to_freelist()
171 list_move_tail(&ck->list, &bc->freed_pcpu); in bkey_cached_move_to_freelist()
177 struct bkey_cached *ck) in bkey_cached_free_fast() argument
181 ck->btree_trans_barrier_seq = in bkey_cached_free_fast()
184 list_del_init(&ck->list); in bkey_cached_free_fast()
187 kfree(ck->k); in bkey_cached_free_fast()
188 ck->k = NULL; in bkey_cached_free_fast()
189 ck->u64s = 0; in bkey_cached_free_fast()
191 bkey_cached_move_to_freelist(bc, ck); in bkey_cached_free_fast()
193 six_unlock_write(&ck->c.lock); in bkey_cached_free_fast()
194 six_unlock_intent(&ck->c.lock); in bkey_cached_free_fast()
203 struct bkey_cached *ck = NULL; in bkey_cached_alloc() local
214 ck = f->objs[--f->nr]; in bkey_cached_alloc()
217 if (!ck) { in bkey_cached_alloc()
224 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list); in bkey_cached_alloc()
225 list_del_init(&ck->list); in bkey_cached_alloc()
227 f->objs[f->nr++] = ck; in bkey_cached_alloc()
230 ck = f->nr ? f->objs[--f->nr] : NULL; in bkey_cached_alloc()
237 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list); in bkey_cached_alloc()
238 list_del_init(&ck->list); in bkey_cached_alloc()
246 ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list); in bkey_cached_alloc()
247 list_del_init(&ck->list); in bkey_cached_alloc()
252 if (ck) { in bkey_cached_alloc()
253 ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_); in bkey_cached_alloc()
255 bkey_cached_move_to_freelist(bc, ck); in bkey_cached_alloc()
259 path->l[0].b = (void *) ck; in bkey_cached_alloc()
260 path->l[0].lock_seq = six_lock_seq(&ck->c.lock); in bkey_cached_alloc()
263 ret = bch2_btree_node_lock_write(trans, path, &ck->c); in bkey_cached_alloc()
266 bkey_cached_move_to_freelist(bc, ck); in bkey_cached_alloc()
270 return ck; in bkey_cached_alloc()
273 ck = allocate_dropping_locks(trans, ret, in bkey_cached_alloc()
276 kmem_cache_free(bch2_key_cache, ck); in bkey_cached_alloc()
280 if (!ck) in bkey_cached_alloc()
283 INIT_LIST_HEAD(&ck->list); in bkey_cached_alloc()
284 bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0); in bkey_cached_alloc()
286 ck->c.cached = true; in bkey_cached_alloc()
287 BUG_ON(!six_trylock_intent(&ck->c.lock)); in bkey_cached_alloc()
288 BUG_ON(!six_trylock_write(&ck->c.lock)); in bkey_cached_alloc()
290 return ck; in bkey_cached_alloc()
298 struct bkey_cached *ck; in bkey_cached_reuse() local
305 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { in bkey_cached_reuse()
306 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) && in bkey_cached_reuse()
307 bkey_cached_lock_for_evict(ck)) { in bkey_cached_reuse()
308 bkey_cached_evict(c, ck); in bkey_cached_reuse()
312 ck = NULL; in bkey_cached_reuse()
316 return ck; in bkey_cached_reuse()
324 struct bkey_cached *ck; in btree_key_cache_create() local
327 ck = bkey_cached_alloc(trans, path, &was_new); in btree_key_cache_create()
328 if (IS_ERR(ck)) in btree_key_cache_create()
329 return ck; in btree_key_cache_create()
331 if (unlikely(!ck)) { in btree_key_cache_create()
332 ck = bkey_cached_reuse(bc); in btree_key_cache_create()
333 if (unlikely(!ck)) { in btree_key_cache_create()
342 ck->c.level = 0; in btree_key_cache_create()
343 ck->c.btree_id = path->btree_id; in btree_key_cache_create()
344 ck->key.btree_id = path->btree_id; in btree_key_cache_create()
345 ck->key.pos = path->pos; in btree_key_cache_create()
346 ck->valid = false; in btree_key_cache_create()
347 ck->flags = 1U << BKEY_CACHED_ACCESSED; in btree_key_cache_create()
350 &ck->hash, in btree_key_cache_create()
355 six_unlock_write(&ck->c.lock); in btree_key_cache_create()
356 six_unlock_intent(&ck->c.lock); in btree_key_cache_create()
357 kfree(ck); in btree_key_cache_create()
359 bkey_cached_free_fast(bc, ck); in btree_key_cache_create()
368 six_unlock_write(&ck->c.lock); in btree_key_cache_create()
370 return ck; in btree_key_cache_create()
375 struct bkey_cached *ck) in btree_key_cache_fill() argument
383 k = bch2_bkey_get_iter(trans, &iter, ck->key.btree_id, ck->key.pos, in btree_key_cache_fill()
409 if (new_u64s > ck->u64s) { in btree_key_cache_fill()
418 bch2_btree_id_str(ck->key.btree_id), new_u64s); in btree_key_cache_fill()
445 kfree(ck->k); in btree_key_cache_fill()
446 ck->u64s = new_u64s; in btree_key_cache_fill()
447 ck->k = new_k; in btree_key_cache_fill()
450 bkey_reassemble(ck->k, k); in btree_key_cache_fill()
451 ck->valid = true; in btree_key_cache_fill()
466 struct bkey_cached *ck; in bch2_btree_path_traverse_cached_slowpath() local
474 ck = (void *) path->l[0].b; in bch2_btree_path_traverse_cached_slowpath()
478 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); in bch2_btree_path_traverse_cached_slowpath()
479 if (!ck) { in bch2_btree_path_traverse_cached_slowpath()
480 ck = btree_key_cache_create(trans, path); in bch2_btree_path_traverse_cached_slowpath()
481 ret = PTR_ERR_OR_ZERO(ck); in bch2_btree_path_traverse_cached_slowpath()
484 if (!ck) in bch2_btree_path_traverse_cached_slowpath()
492 ret = btree_node_lock(trans, path, (void *) ck, 0, in bch2_btree_path_traverse_cached_slowpath()
499 if (ck->key.btree_id != path->btree_id || in bch2_btree_path_traverse_cached_slowpath()
500 !bpos_eq(ck->key.pos, path->pos)) { in bch2_btree_path_traverse_cached_slowpath()
501 six_unlock_type(&ck->c.lock, lock_want); in bch2_btree_path_traverse_cached_slowpath()
509 path->l[0].lock_seq = six_lock_seq(&ck->c.lock); in bch2_btree_path_traverse_cached_slowpath()
510 path->l[0].b = (void *) ck; in bch2_btree_path_traverse_cached_slowpath()
514 if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) { in bch2_btree_path_traverse_cached_slowpath()
526 ret = btree_key_cache_fill(trans, path, ck); in bch2_btree_path_traverse_cached_slowpath()
537 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) in bch2_btree_path_traverse_cached_slowpath()
538 set_bit(BKEY_CACHED_ACCESSED, &ck->flags); in bch2_btree_path_traverse_cached_slowpath()
557 struct bkey_cached *ck; in bch2_btree_path_traverse_cached() local
565 ck = (void *) path->l[0].b; in bch2_btree_path_traverse_cached()
569 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); in bch2_btree_path_traverse_cached()
570 if (!ck) { in bch2_btree_path_traverse_cached()
575 ret = btree_node_lock(trans, path, (void *) ck, 0, in bch2_btree_path_traverse_cached()
582 if (ck->key.btree_id != path->btree_id || in bch2_btree_path_traverse_cached()
583 !bpos_eq(ck->key.pos, path->pos)) { in bch2_btree_path_traverse_cached()
584 six_unlock_type(&ck->c.lock, lock_want); in bch2_btree_path_traverse_cached()
592 path->l[0].lock_seq = six_lock_seq(&ck->c.lock); in bch2_btree_path_traverse_cached()
593 path->l[0].b = (void *) ck; in bch2_btree_path_traverse_cached()
595 if (!ck->valid) in bch2_btree_path_traverse_cached()
598 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) in bch2_btree_path_traverse_cached()
599 set_bit(BKEY_CACHED_ACCESSED, &ck->flags); in bch2_btree_path_traverse_cached()
602 EBUG_ON(!ck->valid); in bch2_btree_path_traverse_cached()
617 struct bkey_cached *ck = NULL; in btree_key_cache_flush_pos() local
633 ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b; in btree_key_cache_flush_pos()
634 if (!ck) in btree_key_cache_flush_pos()
637 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
643 BUG_ON(!ck->valid); in btree_key_cache_flush_pos()
645 if (journal_seq && ck->journal.seq != journal_seq) in btree_key_cache_flush_pos()
648 trans->journal_res.seq = ck->journal.seq; in btree_key_cache_flush_pos()
656 if (ck->journal.seq == journal_last_seq(j)) in btree_key_cache_flush_pos()
659 if (ck->journal.seq != journal_last_seq(j) || in btree_key_cache_flush_pos()
664 bch2_trans_update(trans, &b_iter, ck->k, in btree_key_cache_flush_pos()
681 bch2_journal_pin_drop(j, &ck->journal); in btree_key_cache_flush_pos()
687 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
688 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in btree_key_cache_flush_pos()
699 bch2_btree_node_lock_write_nofail(trans, path, &ck->c); in btree_key_cache_flush_pos()
701 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
702 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in btree_key_cache_flush_pos()
707 bkey_cached_evict(&c->btree_key_cache, ck); in btree_key_cache_flush_pos()
708 bkey_cached_free_fast(&c->btree_key_cache, ck); in btree_key_cache_flush_pos()
720 struct bkey_cached *ck = in bch2_btree_key_cache_journal_flush() local
727 btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read); in bch2_btree_key_cache_journal_flush()
728 key = ck->key; in bch2_btree_key_cache_journal_flush()
730 if (ck->journal.seq != seq || in bch2_btree_key_cache_journal_flush()
731 !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_journal_flush()
732 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
736 if (ck->seq != seq) { in bch2_btree_key_cache_journal_flush()
737 bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal, in bch2_btree_key_cache_journal_flush()
739 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
742 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
759 struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b; in bch2_btree_insert_key_cached() local
763 BUG_ON(insert->k.u64s > ck->u64s); in bch2_btree_insert_key_cached()
765 bkey_copy(ck->k, insert); in bch2_btree_insert_key_cached()
766 ck->valid = true; in bch2_btree_insert_key_cached()
768 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_insert_key_cached()
770 set_bit(BKEY_CACHED_DIRTY, &ck->flags); in bch2_btree_insert_key_cached()
790 !journal_pin_active(&ck->journal)) { in bch2_btree_insert_key_cached()
791 ck->seq = trans->journal_res.seq; in bch2_btree_insert_key_cached()
794 &ck->journal, bch2_btree_key_cache_journal_flush); in bch2_btree_insert_key_cached()
805 struct bkey_cached *ck = (void *) path->l[0].b; in bch2_btree_key_cache_drop() local
807 BUG_ON(!ck->valid); in bch2_btree_key_cache_drop()
813 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_drop()
814 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in bch2_btree_key_cache_drop()
816 bch2_journal_pin_drop(&c->journal, &ck->journal); in bch2_btree_key_cache_drop()
819 ck->valid = false; in bch2_btree_key_cache_drop()
828 struct bkey_cached *ck, *t; in bch2_btree_key_cache_scan() local
843 list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) { in bch2_btree_key_cache_scan()
845 ck->btree_trans_barrier_seq)) in bch2_btree_key_cache_scan()
848 list_del(&ck->list); in bch2_btree_key_cache_scan()
849 six_lock_exit(&ck->c.lock); in bch2_btree_key_cache_scan()
850 kmem_cache_free(bch2_key_cache, ck); in bch2_btree_key_cache_scan()
861 list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) { in bch2_btree_key_cache_scan()
863 ck->btree_trans_barrier_seq)) in bch2_btree_key_cache_scan()
866 list_del(&ck->list); in bch2_btree_key_cache_scan()
867 six_lock_exit(&ck->c.lock); in bch2_btree_key_cache_scan()
868 kmem_cache_free(bch2_key_cache, ck); in bch2_btree_key_cache_scan()
890 ck = container_of(pos, struct bkey_cached, hash); in bch2_btree_key_cache_scan()
892 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) in bch2_btree_key_cache_scan()
895 if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) in bch2_btree_key_cache_scan()
896 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); in bch2_btree_key_cache_scan()
897 else if (bkey_cached_lock_for_evict(ck)) { in bch2_btree_key_cache_scan()
898 bkey_cached_evict(bc, ck); in bch2_btree_key_cache_scan()
899 bkey_cached_free(bc, ck); in bch2_btree_key_cache_scan()
938 struct bkey_cached *ck, *n; in bch2_fs_btree_key_cache_exit() local
958 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { in bch2_fs_btree_key_cache_exit()
959 bkey_cached_evict(bc, ck); in bch2_fs_btree_key_cache_exit()
960 list_add(&ck->list, &items); in bch2_fs_btree_key_cache_exit()
971 ck = f->objs[i]; in bch2_fs_btree_key_cache_exit()
972 list_add(&ck->list, &items); in bch2_fs_btree_key_cache_exit()
985 list_for_each_entry_safe(ck, n, &items, list) { in bch2_fs_btree_key_cache_exit()
988 list_del(&ck->list); in bch2_fs_btree_key_cache_exit()
989 kfree(ck->k); in bch2_fs_btree_key_cache_exit()
990 six_lock_exit(&ck->c.lock); in bch2_fs_btree_key_cache_exit()
991 kmem_cache_free(bch2_key_cache, ck); in bch2_fs_btree_key_cache_exit()