Lines Matching +full:remove +full:- +full:item
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
7 * See the COPYING file in the top-level directory.
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
14 * - Lookups are performed under an RCU read-critical section; removals
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
28 * The key structure is the bucket, which is cacheline-sized. Buckets
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
40 * chained to it; these two fields are unused in non-head buckets.
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
51 * Writers check for concurrent resizes by comparing ht->map before and after
56 * - Idea of cacheline-sized buckets with full hashes taken from:
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
77 * We want to avoid false sharing of cache lines. Most systems have 64-byte
81 * almost 64-bytes); systems with larger cache lines might suffer from
89 #else /* 64-bit */
95 QHT_ITER_RM, /* remove element if retbool returns true */
112 if (ht->mode & QHT_MODE_RAW_MUTEXES) { in qht_lock()
113 qemu_mutex_lock__raw(&ht->lock); in qht_lock()
115 qemu_mutex_lock(&ht->lock); in qht_lock()
121 if (ht->mode & QHT_MODE_RAW_MUTEXES) { in qht_trylock()
122 return qemu_mutex_trylock__raw(&(ht)->lock); in qht_trylock()
124 return qemu_mutex_trylock(&(ht)->lock); in qht_trylock()
130 qemu_mutex_unlock(&ht->lock); in qht_unlock()
134 * Note: reading partially-updated pointers in @pointers could lead to
137 * volatile-like behavior in qatomic_read, since otherwise the compiler
141 * If both ht->lock and b->lock are grabbed, ht->lock should always
171 * struct qht_map - structure to track an array of buckets
176 * @n_added_buckets: number of added (i.e. "non-head") buckets
213 if (b->pointers[i] == NULL) { in qht_bucket_debug__locked()
219 __func__, b, i, b->hashes[i], b->pointers[i]); in qht_bucket_debug__locked()
223 b = b->next; in qht_bucket_debug__locked()
232 for (i = 0; i < map->n_buckets; i++) { in qht_map_debug__all_locked()
233 qht_bucket_debug__locked(&map->buckets[i]); in qht_map_debug__all_locked()
262 unsigned long bucket_idx = b - map->buckets; in qht_do_if_first_in_stripe()
265 unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1); in qht_do_if_first_in_stripe()
266 func(&map->tsan_bucket_locks[lock_idx].lock); in qht_do_if_first_in_stripe()
269 func(&b->lock); in qht_do_if_first_in_stripe()
278 unsigned long bucket_idx = b - map->buckets; in qht_bucket_lock_do()
279 unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1); in qht_bucket_lock_do()
280 func(&map->tsan_bucket_locks[lock_idx].lock); in qht_bucket_lock_do()
282 func(&b->lock); in qht_bucket_lock_do()
302 seqlock_init(&b->sequence); in qht_head_init()
308 return &map->buckets[hash & (map->n_buckets - 1)]; in qht_map_to_bucket()
316 for (i = 0; i < map->n_buckets; i++) { in qht_map_lock_buckets()
317 struct qht_bucket *b = &map->buckets[i]; in qht_map_lock_buckets()
327 for (i = 0; i < map->n_buckets; i++) { in qht_map_unlock_buckets()
328 struct qht_bucket *b = &map->buckets[i]; in qht_map_unlock_buckets()
341 return map != ht->map; in qht_map_is_stale__locked()
347 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
349 * Note: callers cannot have ht->lock held.
356 map = qatomic_rcu_read(&ht->map); in qht_map_lock_buckets__no_stale()
364 /* we raced with a resize; acquire ht->lock to see the updated ht->map */ in qht_map_lock_buckets__no_stale()
366 map = ht->map; in qht_map_lock_buckets__no_stale()
378 * Note: callers cannot have ht->lock held.
387 map = qatomic_rcu_read(&ht->map); in qht_bucket_lock__no_stale()
397 /* we raced with a resize; acquire ht->lock to see the updated ht->map */ in qht_bucket_lock__no_stale()
399 map = ht->map; in qht_bucket_lock__no_stale()
409 return qatomic_read(&map->n_added_buckets) > in qht_map_needs_resize()
410 map->n_added_buckets_threshold; in qht_map_needs_resize()
416 struct qht_bucket *curr = head->next; in qht_chain_destroy()
422 curr = curr->next; in qht_chain_destroy()
432 for (i = 0; i < map->n_buckets; i++) { in qht_map_destroy()
433 qht_chain_destroy(map, &map->buckets[i]); in qht_map_destroy()
435 qemu_vfree(map->buckets); in qht_map_destroy()
445 map->n_buckets = n_buckets; in qht_map_create()
447 map->n_added_buckets = 0; in qht_map_create()
448 map->n_added_buckets_threshold = n_buckets / in qht_map_create()
451 /* let tiny hash tables to at least add one non-head bucket */ in qht_map_create()
452 if (unlikely(map->n_added_buckets_threshold == 0)) { in qht_map_create()
453 map->n_added_buckets_threshold = 1; in qht_map_create()
456 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN, in qht_map_create()
457 sizeof(*map->buckets) * n_buckets); in qht_map_create()
459 qht_head_init(map, &map->buckets[i]); in qht_map_create()
471 ht->cmp = cmp; in qht_init()
472 ht->mode = mode; in qht_init()
473 qemu_mutex_init(&ht->lock); in qht_init()
475 qatomic_rcu_set(&ht->map, map); in qht_init()
481 qht_map_destroy(ht->map); in qht_destroy()
490 seqlock_write_begin(&head->sequence); in qht_bucket_reset__locked()
493 if (b->pointers[i] == NULL) { in qht_bucket_reset__locked()
496 qatomic_set(&b->hashes[i], 0); in qht_bucket_reset__locked()
497 qatomic_set(&b->pointers[i], NULL); in qht_bucket_reset__locked()
499 b = b->next; in qht_bucket_reset__locked()
502 seqlock_write_end(&head->sequence); in qht_bucket_reset__locked()
510 for (i = 0; i < map->n_buckets; i++) { in qht_map_reset__all_locked()
511 qht_bucket_reset__locked(&map->buckets[i]); in qht_map_reset__all_locked()
544 map = ht->map; in qht_reset_size()
545 if (n_buckets != map->n_buckets) { in qht_reset_size()
563 if (qatomic_read(&b->hashes[i]) == hash) { in qht_do_lookup()
568 void *p = qatomic_rcu_read(&b->pointers[i]); in qht_do_lookup()
575 b = qatomic_rcu_read(&b->next); in qht_do_lookup()
589 version = seqlock_read_begin(&b->sequence); in qht_lookup__slowpath()
591 } while (seqlock_read_retry(&b->sequence, version)); in qht_lookup__slowpath()
603 map = qatomic_rcu_read(&ht->map); in qht_lookup_custom()
606 version = seqlock_read_begin(&b->sequence); in qht_lookup_custom()
608 if (likely(!seqlock_read_retry(&b->sequence, version))) { in qht_lookup_custom()
613 * running a 100%-lookup microbenchmark. in qht_lookup_custom()
620 return qht_lookup_custom(ht, userp, hash, ht->cmp); in qht_lookup()
624 * call with head->lock held
625 * @ht is const since it is only used for ht->cmp()
638 if (b->pointers[i]) { in qht_insert__locked()
639 if (unlikely(b->hashes[i] == hash && in qht_insert__locked()
640 ht->cmp(b->pointers[i], p))) { in qht_insert__locked()
641 return b->pointers[i]; in qht_insert__locked()
648 b = b->next; in qht_insert__locked()
655 qatomic_inc(&map->n_added_buckets); in qht_insert__locked()
662 seqlock_write_begin(&head->sequence); in qht_insert__locked()
664 qatomic_rcu_set(&prev->next, b); in qht_insert__locked()
667 qatomic_set(&b->hashes[i], hash); in qht_insert__locked()
668 qatomic_set(&b->pointers[i], p); in qht_insert__locked()
669 seqlock_write_end(&head->sequence); in qht_insert__locked()
684 map = ht->map; in qht_grow_maybe()
687 struct qht_map *new = qht_map_create(map->n_buckets * 2); in qht_grow_maybe()
709 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) { in qht_insert()
723 if (pos == QHT_BUCKET_ENTRIES - 1) { in qht_entry_is_last()
724 if (b->next == NULL) { in qht_entry_is_last()
727 return b->next->pointers[0] == NULL; in qht_entry_is_last()
729 return b->pointers[pos + 1] == NULL; in qht_entry_is_last()
736 qht_debug_assert(to->pointers[i]); in qht_entry_move()
737 qht_debug_assert(from->pointers[j]); in qht_entry_move()
739 qatomic_set(&to->hashes[i], from->hashes[j]); in qht_entry_move()
740 qatomic_set(&to->pointers[i], from->pointers[j]); in qht_entry_move()
742 qatomic_set(&from->hashes[j], 0); in qht_entry_move()
743 qatomic_set(&from->pointers[j], NULL); in qht_entry_move()
757 qatomic_set(&orig->hashes[pos], 0); in qht_bucket_remove_entry()
758 qatomic_set(&orig->pointers[pos], NULL); in qht_bucket_remove_entry()
763 if (b->pointers[i]) { in qht_bucket_remove_entry()
767 return qht_entry_move(orig, pos, b, i - 1); in qht_bucket_remove_entry()
770 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); in qht_bucket_remove_entry()
773 b = b->next; in qht_bucket_remove_entry()
776 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); in qht_bucket_remove_entry()
779 /* call with b->lock held */
788 void *q = b->pointers[i]; in qht_remove__locked()
794 qht_debug_assert(b->hashes[i] == hash); in qht_remove__locked()
795 seqlock_write_begin(&head->sequence); in qht_remove__locked()
797 seqlock_write_end(&head->sequence); in qht_remove__locked()
801 b = b->next; in qht_remove__locked()
830 if (b->pointers[i] == NULL) { in qht_bucket_iter()
833 switch (iter->type) { in qht_bucket_iter()
835 iter->f.retvoid(b->pointers[i], b->hashes[i], userp); in qht_bucket_iter()
838 if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) { in qht_bucket_iter()
840 seqlock_write_begin(&head->sequence); in qht_bucket_iter()
842 seqlock_write_end(&head->sequence); in qht_bucket_iter()
845 i--; in qht_bucket_iter()
853 b = b->next; in qht_bucket_iter()
864 for (i = 0; i < map->n_buckets; i++) { in qht_map_iter__all_locked()
865 qht_bucket_iter(&map->buckets[i], iter, userp); in qht_map_iter__all_locked()
874 map = qatomic_rcu_read(&ht->map); in do_qht_iter()
908 struct qht *ht = data->ht; in qht_map_copy()
909 struct qht_map *new = data->new; in qht_map_copy()
912 /* no need to acquire b->lock because no thread has seen this map yet */ in qht_map_copy()
918 * Call with ht->lock held.
929 old = ht->map; in qht_do_resize_reset()
941 g_assert(new->n_buckets != old->n_buckets); in qht_do_resize_reset()
947 qatomic_rcu_set(&ht->map, new); in qht_do_resize_reset()
958 if (n_buckets != ht->map->n_buckets) { in qht_resize()
976 map = qatomic_rcu_read(&ht->map); in qht_statistics_init()
978 stats->used_head_buckets = 0; in qht_statistics_init()
979 stats->entries = 0; in qht_statistics_init()
980 qdist_init(&stats->chain); in qht_statistics_init()
981 qdist_init(&stats->occupancy); in qht_statistics_init()
984 stats->head_buckets = 0; in qht_statistics_init()
987 stats->head_buckets = map->n_buckets; in qht_statistics_init()
989 for (i = 0; i < map->n_buckets; i++) { in qht_statistics_init()
990 const struct qht_bucket *head = &map->buckets[i]; in qht_statistics_init()
998 version = seqlock_read_begin(&head->sequence); in qht_statistics_init()
1004 if (qatomic_read(&b->pointers[j]) == NULL) { in qht_statistics_init()
1010 b = qatomic_rcu_read(&b->next); in qht_statistics_init()
1012 } while (seqlock_read_retry(&head->sequence, version)); in qht_statistics_init()
1015 qdist_inc(&stats->chain, buckets); in qht_statistics_init()
1016 qdist_inc(&stats->occupancy, in qht_statistics_init()
1018 stats->used_head_buckets++; in qht_statistics_init()
1019 stats->entries += entries; in qht_statistics_init()
1021 qdist_inc(&stats->occupancy, 0); in qht_statistics_init()
1028 qdist_destroy(&stats->occupancy); in qht_statistics_destroy()
1029 qdist_destroy(&stats->chain); in qht_statistics_destroy()