Lines Matching full:hash
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
29 * contain a few hash values and pointers; the u32 hash values are stored in
33 * - Resizing the hash table with concurrent lookups is easy.
47 * race with us) and then copying all entries into a new hash map. Then, the
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
218 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n", in qht_bucket_debug__locked()
306 struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash) in qht_map_to_bucket() argument
308 return &map->buckets[hash & (map->n_buckets - 1)]; in qht_map_to_bucket()
381 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, in qht_bucket_lock__no_stale() argument
388 b = qht_map_to_bucket(map, hash); in qht_bucket_lock__no_stale()
400 b = qht_map_to_bucket(map, hash); in qht_bucket_lock__no_stale()
451 /* let tiny hash tables to at least add one non-head bucket */ in qht_map_create()
556 const void *userp, uint32_t hash) in qht_do_lookup() argument
563 if (qatomic_read(&b->hashes[i]) == hash) { in qht_do_lookup()
583 const void *userp, uint32_t hash) in qht_lookup__slowpath() argument
590 ret = qht_do_lookup(b, func, userp, hash); in qht_lookup__slowpath()
595 void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash, in qht_lookup_custom() argument
604 b = qht_map_to_bucket(map, hash); in qht_lookup_custom()
607 ret = qht_do_lookup(b, func, userp, hash); in qht_lookup_custom()
615 return qht_lookup__slowpath(b, func, userp, hash); in qht_lookup_custom()
618 void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash) in qht_lookup() argument
620 return qht_lookup_custom(ht, userp, hash, ht->cmp); in qht_lookup()
628 struct qht_bucket *head, void *p, uint32_t hash, in qht_insert__locked() argument
639 if (unlikely(b->hashes[i] == hash && in qht_insert__locked()
667 qatomic_set(&b->hashes[i], hash); in qht_insert__locked()
694 bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing) in qht_insert() argument
704 b = qht_bucket_lock__no_stale(ht, hash, &map); in qht_insert()
705 prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize); in qht_insert()
781 bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash) in qht_remove__locked() argument
794 qht_debug_assert(b->hashes[i] == hash); in qht_remove__locked()
806 bool qht_remove(struct qht *ht, const void *p, uint32_t hash) in qht_remove() argument
815 b = qht_bucket_lock__no_stale(ht, hash, &map); in qht_remove()
816 ret = qht_remove__locked(b, p, hash); in qht_remove()
905 static void qht_map_copy(void *p, uint32_t hash, void *userp) in qht_map_copy() argument
910 struct qht_bucket *b = qht_map_to_bucket(new, hash); in qht_map_copy()
913 qht_insert__locked(ht, new, b, p, hash, NULL); in qht_map_copy()