Lines Matching full:ck
27 const struct bkey_cached *ck = obj; in bch2_btree_key_cache_cmp_fn() local
30 return ck->key.btree_id != key->btree_id || in bch2_btree_key_cache_cmp_fn()
31 !bpos_eq(ck->key.pos, key->pos); in bch2_btree_key_cache_cmp_fn()
43 struct bkey_cached *ck, in btree_path_cached_set() argument
46 path->l[0].lock_seq = six_lock_seq(&ck->c.lock); in btree_path_cached_set()
47 path->l[0].b = (void *) ck; in btree_path_cached_set()
64 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck) in bkey_cached_lock_for_evict() argument
66 if (!six_trylock_intent(&ck->c.lock)) in bkey_cached_lock_for_evict()
69 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bkey_cached_lock_for_evict()
70 six_unlock_intent(&ck->c.lock); in bkey_cached_lock_for_evict()
74 if (!six_trylock_write(&ck->c.lock)) { in bkey_cached_lock_for_evict()
75 six_unlock_intent(&ck->c.lock); in bkey_cached_lock_for_evict()
83 struct bkey_cached *ck) in bkey_cached_evict() argument
85 bool ret = !rhashtable_remove_fast(&c->table, &ck->hash, in bkey_cached_evict()
88 memset(&ck->key, ~0, sizeof(ck->key)); in bkey_cached_evict()
98 struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu); in __bkey_cached_free() local
101 kmem_cache_free(bch2_key_cache, ck); in __bkey_cached_free()
105 struct bkey_cached *ck) in bkey_cached_free() argument
107 kfree(ck->k); in bkey_cached_free()
108 ck->k = NULL; in bkey_cached_free()
109 ck->u64s = 0; in bkey_cached_free()
111 six_unlock_write(&ck->c.lock); in bkey_cached_free()
112 six_unlock_intent(&ck->c.lock); in bkey_cached_free()
114 bool pcpu_readers = ck->c.lock.readers != NULL; in bkey_cached_free()
115 rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu); in bkey_cached_free()
123 struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); in __bkey_cached_alloc() local
124 if (unlikely(!ck)) in __bkey_cached_alloc()
126 ck->k = kmalloc(key_u64s * sizeof(u64), gfp); in __bkey_cached_alloc()
127 if (unlikely(!ck->k)) { in __bkey_cached_alloc()
128 kmem_cache_free(bch2_key_cache, ck); in __bkey_cached_alloc()
131 ck->u64s = key_u64s; in __bkey_cached_alloc()
132 return ck; in __bkey_cached_alloc()
143 struct bkey_cached *ck = container_of_or_null( in bkey_cached_alloc() local
146 if (ck) in bkey_cached_alloc()
149 ck = allocate_dropping_locks(trans, ret, in bkey_cached_alloc()
152 if (ck) in bkey_cached_alloc()
153 kfree(ck->k); in bkey_cached_alloc()
154 kmem_cache_free(bch2_key_cache, ck); in bkey_cached_alloc()
158 if (ck) { in bkey_cached_alloc()
159 bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); in bkey_cached_alloc()
160 ck->c.cached = true; in bkey_cached_alloc()
164 ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]), in bkey_cached_alloc()
166 if (ck) in bkey_cached_alloc()
169 six_lock_intent(&ck->c.lock, NULL, NULL); in bkey_cached_alloc()
170 six_lock_write(&ck->c.lock, NULL, NULL); in bkey_cached_alloc()
171 return ck; in bkey_cached_alloc()
179 struct bkey_cached *ck; in bkey_cached_reuse() local
185 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { in bkey_cached_reuse()
186 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) && in bkey_cached_reuse()
187 bkey_cached_lock_for_evict(ck)) { in bkey_cached_reuse()
188 if (bkey_cached_evict(c, ck)) in bkey_cached_reuse()
190 six_unlock_write(&ck->c.lock); in bkey_cached_reuse()
191 six_unlock_intent(&ck->c.lock); in bkey_cached_reuse()
194 ck = NULL; in bkey_cached_reuse()
197 return ck; in bkey_cached_reuse()
222 struct bkey_cached *ck = bkey_cached_alloc(trans, ck_path, key_u64s); in btree_key_cache_create() local
223 int ret = PTR_ERR_OR_ZERO(ck); in btree_key_cache_create()
227 if (unlikely(!ck)) { in btree_key_cache_create()
228 ck = bkey_cached_reuse(bc); in btree_key_cache_create()
229 if (unlikely(!ck)) { in btree_key_cache_create()
236 ck->c.level = 0; in btree_key_cache_create()
237 ck->c.btree_id = ck_path->btree_id; in btree_key_cache_create()
238 ck->key.btree_id = ck_path->btree_id; in btree_key_cache_create()
239 ck->key.pos = ck_path->pos; in btree_key_cache_create()
240 ck->flags = 1U << BKEY_CACHED_ACCESSED; in btree_key_cache_create()
242 if (unlikely(key_u64s > ck->u64s)) { in btree_key_cache_create()
249 bch2_btree_id_str(ck->key.btree_id), key_u64s); in btree_key_cache_create()
256 kfree(ck->k); in btree_key_cache_create()
257 ck->k = new_k; in btree_key_cache_create()
258 ck->u64s = key_u64s; in btree_key_cache_create()
261 bkey_reassemble(ck->k, k); in btree_key_cache_create()
267 ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params); in btree_key_cache_create()
275 six_unlock_write(&ck->c.lock); in btree_key_cache_create()
279 six_lock_downgrade(&ck->c.lock); in btree_key_cache_create()
280 btree_path_cached_set(trans, ck_path, ck, (enum btree_node_locked_type) lock_want); in btree_key_cache_create()
284 bkey_cached_free(bc, ck); in btree_key_cache_create()
353 struct bkey_cached *ck; in btree_path_traverse_cached_fast() local
356 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); in btree_path_traverse_cached_fast()
357 if (!ck) in btree_path_traverse_cached_fast()
362 int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_); in btree_path_traverse_cached_fast()
366 if (ck->key.btree_id != path->btree_id || in btree_path_traverse_cached_fast()
367 !bpos_eq(ck->key.pos, path->pos)) { in btree_path_traverse_cached_fast()
368 six_unlock_type(&ck->c.lock, lock_want); in btree_path_traverse_cached_fast()
372 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) in btree_path_traverse_cached_fast()
373 set_bit(BKEY_CACHED_ACCESSED, &ck->flags); in btree_path_traverse_cached_fast()
375 btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want); in btree_path_traverse_cached_fast()
418 struct bkey_cached *ck = NULL; in btree_key_cache_flush_pos() local
434 ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b; in btree_key_cache_flush_pos()
435 if (!ck) in btree_key_cache_flush_pos()
438 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
444 if (journal_seq && ck->journal.seq != journal_seq) in btree_key_cache_flush_pos()
447 trans->journal_res.seq = ck->journal.seq; in btree_key_cache_flush_pos()
455 if (ck->journal.seq == journal_last_seq(j)) in btree_key_cache_flush_pos()
458 if (ck->journal.seq != journal_last_seq(j) || in btree_key_cache_flush_pos()
470 ret = bch2_trans_update(trans, &b_iter, ck->k, in btree_key_cache_flush_pos()
487 bch2_journal_pin_drop(j, &ck->journal); in btree_key_cache_flush_pos()
493 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
494 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in btree_key_cache_flush_pos()
505 bch2_btree_node_lock_write_nofail(trans, path, &ck->c); in btree_key_cache_flush_pos()
507 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in btree_key_cache_flush_pos()
508 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in btree_key_cache_flush_pos()
513 if (bkey_cached_evict(&c->btree_key_cache, ck)) { in btree_key_cache_flush_pos()
514 bkey_cached_free(&c->btree_key_cache, ck); in btree_key_cache_flush_pos()
516 six_unlock_write(&ck->c.lock); in btree_key_cache_flush_pos()
517 six_unlock_intent(&ck->c.lock); in btree_key_cache_flush_pos()
530 struct bkey_cached *ck = in bch2_btree_key_cache_journal_flush() local
537 btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read); in bch2_btree_key_cache_journal_flush()
538 key = ck->key; in bch2_btree_key_cache_journal_flush()
540 if (ck->journal.seq != seq || in bch2_btree_key_cache_journal_flush()
541 !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_journal_flush()
542 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
546 if (ck->seq != seq) { in bch2_btree_key_cache_journal_flush()
547 bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal, in bch2_btree_key_cache_journal_flush()
549 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
552 six_unlock_read(&ck->c.lock); in bch2_btree_key_cache_journal_flush()
569 struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b; in bch2_btree_insert_key_cached() local
573 BUG_ON(insert->k.u64s > ck->u64s); in bch2_btree_insert_key_cached()
575 bkey_copy(ck->k, insert); in bch2_btree_insert_key_cached()
577 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_insert_key_cached()
579 set_bit(BKEY_CACHED_DIRTY, &ck->flags); in bch2_btree_insert_key_cached()
599 !journal_pin_active(&ck->journal)) { in bch2_btree_insert_key_cached()
600 ck->seq = trans->journal_res.seq; in bch2_btree_insert_key_cached()
603 &ck->journal, bch2_btree_key_cache_journal_flush); in bch2_btree_insert_key_cached()
615 struct bkey_cached *ck = (void *) path->l[0].b; in bch2_btree_key_cache_drop() local
621 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_drop()
622 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); in bch2_btree_key_cache_drop()
624 bch2_journal_pin_drop(&c->journal, &ck->journal); in bch2_btree_key_cache_drop()
627 bkey_cached_evict(bc, ck); in bch2_btree_key_cache_drop()
628 bkey_cached_free(bc, ck); in bch2_btree_key_cache_drop()
635 if (path2->l[0].b == (void *) ck) { in bch2_btree_key_cache_drop()
651 struct bkey_cached *ck; in bch2_btree_key_cache_scan() local
686 ck = container_of(pos, struct bkey_cached, hash); in bch2_btree_key_cache_scan()
688 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { in bch2_btree_key_cache_scan()
690 } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) { in bch2_btree_key_cache_scan()
691 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); in bch2_btree_key_cache_scan()
693 } else if (!bkey_cached_lock_for_evict(ck)) { in bch2_btree_key_cache_scan()
695 } else if (bkey_cached_evict(bc, ck)) { in bch2_btree_key_cache_scan()
696 bkey_cached_free(bc, ck); in bch2_btree_key_cache_scan()
700 six_unlock_write(&ck->c.lock); in bch2_btree_key_cache_scan()
701 six_unlock_intent(&ck->c.lock); in bch2_btree_key_cache_scan()
747 struct bkey_cached *ck; in bch2_fs_btree_key_cache_exit() local
770 ck = container_of(pos, struct bkey_cached, hash); in bch2_fs_btree_key_cache_exit()
771 BUG_ON(!bkey_cached_evict(bc, ck)); in bch2_fs_btree_key_cache_exit()
772 kfree(ck->k); in bch2_fs_btree_key_cache_exit()
773 kmem_cache_free(bch2_key_cache, ck); in bch2_fs_btree_key_cache_exit()