Lines Matching defs:ck

27 	const struct bkey_cached *ck = obj;
30 return ck->key.btree_id != key->btree_id ||
31 !bpos_eq(ck->key.pos, key->pos);
43 struct bkey_cached *ck,
46 path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
47 path->l[0].b = (void *) ck;
64 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
66 if (!six_trylock_intent(&ck->c.lock))
69 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
70 six_unlock_intent(&ck->c.lock);
74 if (!six_trylock_write(&ck->c.lock)) {
75 six_unlock_intent(&ck->c.lock);
83 struct bkey_cached *ck)
85 bool ret = !rhashtable_remove_fast(&c->table, &ck->hash,
88 memset(&ck->key, ~0, sizeof(ck->key));
98 struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu);
101 kmem_cache_free(bch2_key_cache, ck);
105 struct bkey_cached *ck)
107 kfree(ck->k);
108 ck->k = NULL;
109 ck->u64s = 0;
111 six_unlock_write(&ck->c.lock);
112 six_unlock_intent(&ck->c.lock);
114 bool pcpu_readers = ck->c.lock.readers != NULL;
115 rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu);
121 struct bkey_cached *ck)
129 bkey_cached_free_noassert(bc, ck);
136 struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
137 if (unlikely(!ck))
139 ck->k = kmalloc(key_u64s * sizeof(u64), gfp);
140 if (unlikely(!ck->k)) {
141 kmem_cache_free(bch2_key_cache, ck);
144 ck->u64s = key_u64s;
145 return ck;
156 struct bkey_cached *ck = container_of_or_null(
159 if (ck)
162 ck = allocate_dropping_locks(trans, ret,
165 if (ck)
166 kfree(ck->k);
167 kmem_cache_free(bch2_key_cache, ck);
171 if (ck) {
172 bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
173 ck->c.cached = true;
177 ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]),
179 if (ck)
182 six_lock_intent(&ck->c.lock, NULL, NULL);
183 six_lock_write(&ck->c.lock, NULL, NULL);
184 return ck;
194 struct bkey_cached *ck;
197 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
198 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
199 bkey_cached_lock_for_evict(ck)) {
200 if (bkey_cached_evict(c, ck))
201 return ck;
202 six_unlock_write(&ck->c.lock);
203 six_unlock_intent(&ck->c.lock);
231 struct bkey_cached *ck = bkey_cached_alloc(trans, ck_path, key_u64s);
232 int ret = PTR_ERR_OR_ZERO(ck);
236 if (unlikely(!ck)) {
237 ck = bkey_cached_reuse(bc);
238 if (unlikely(!ck)) {
245 ck->c.level = 0;
246 ck->c.btree_id = ck_path->btree_id;
247 ck->key.btree_id = ck_path->btree_id;
248 ck->key.pos = ck_path->pos;
249 ck->flags = 1U << BKEY_CACHED_ACCESSED;
251 if (unlikely(key_u64s > ck->u64s)) {
258 bch2_btree_id_str(ck->key.btree_id), key_u64s);
265 kfree(ck->k);
266 ck->k = new_k;
267 ck->u64s = key_u64s;
270 bkey_reassemble(ck->k, k);
276 ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params);
284 six_unlock_write(&ck->c.lock);
288 six_lock_downgrade(&ck->c.lock);
289 btree_path_cached_set(trans, ck_path, ck, (enum btree_node_locked_type) lock_want);
293 bkey_cached_free(trans, bc, ck);
362 struct bkey_cached *ck;
365 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
366 if (!ck)
371 int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_);
375 if (ck->key.btree_id != path->btree_id ||
376 !bpos_eq(ck->key.pos, path->pos)) {
377 six_unlock_type(&ck->c.lock, lock_want);
381 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
382 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
384 btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
427 struct bkey_cached *ck = NULL;
443 ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
444 if (!ck)
447 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
453 if (journal_seq && ck->journal.seq != journal_seq)
456 trans->journal_res.seq = ck->journal.seq;
464 if (ck->journal.seq == journal_last_seq(j))
467 if (ck->journal.seq != journal_last_seq(j) ||
479 ret = bch2_trans_update(trans, &b_iter, ck->k,
496 bch2_journal_pin_drop(j, &ck->journal);
502 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
503 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
514 bch2_btree_node_lock_write_nofail(trans, path, &ck->c);
516 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
517 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
522 if (bkey_cached_evict(&c->btree_key_cache, ck)) {
523 bkey_cached_free(trans, &c->btree_key_cache, ck);
525 six_unlock_write(&ck->c.lock);
526 six_unlock_intent(&ck->c.lock);
539 struct bkey_cached *ck =
546 btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read);
547 key = ck->key;
549 if (ck->journal.seq != seq ||
550 !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
551 six_unlock_read(&ck->c.lock);
555 if (ck->seq != seq) {
556 bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
558 six_unlock_read(&ck->c.lock);
561 six_unlock_read(&ck->c.lock);
578 struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b;
582 BUG_ON(insert->k.u64s > ck->u64s);
584 bkey_copy(ck->k, insert);
586 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
588 set_bit(BKEY_CACHED_DIRTY, &ck->flags);
608 !journal_pin_active(&ck->journal)) {
609 ck->seq = trans->journal_res.seq;
612 &ck->journal, bch2_btree_key_cache_journal_flush);
624 struct bkey_cached *ck = (void *) path->l[0].b;
630 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
631 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
633 bch2_journal_pin_drop(&c->journal, &ck->journal);
636 bkey_cached_evict(bc, ck);
637 bkey_cached_free(trans, bc, ck);
644 if (path2->l[0].b == (void *) ck) {
667 struct bkey_cached *ck;
702 ck = container_of(pos, struct bkey_cached, hash);
704 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
706 } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
707 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
709 } else if (!bkey_cached_lock_for_evict(ck)) {
711 } else if (bkey_cached_evict(bc, ck)) {
712 bkey_cached_free_noassert(bc, ck);
716 six_unlock_write(&ck->c.lock);
717 six_unlock_intent(&ck->c.lock);
763 struct bkey_cached *ck;
786 ck = container_of(pos, struct bkey_cached, hash);
787 BUG_ON(!bkey_cached_evict(bc, ck));
788 kfree(ck->k);
789 kmem_cache_free(bch2_key_cache, ck);