xref: /src/sys/kern/subr_hash.c (revision 411c28b6caa4a55d89cc34d77b33141b78ced590)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2026 Gleb Smirnoff <glebius@FreeBSD.org>
5  * Copyright (c) 1982, 1986, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/ck.h>
42 #include <sys/queue.h>
43 #include <sys/mutex.h>
44 #include <sys/rmlock.h>
45 #include <sys/rwlock.h>
46 #include <sys/sx.h>
47 #include <sys/hash.h>
48 
49 #define	ASSERT_NOPAD(head, lock)	_Static_assert(			\
50     sizeof(head ## _HEAD(, foo)) + sizeof(struct lock) ==		\
51     sizeof(struct { head ## _HEAD(, foo) h; struct lock l; }),		\
52     "Structure of " #head "_HEAD and " #lock " has padding")
53 ASSERT_NOPAD(LIST, mtx);
54 ASSERT_NOPAD(CK_LIST, mtx);
55 ASSERT_NOPAD(SLIST, mtx);
56 ASSERT_NOPAD(CK_SLIST, mtx);
57 ASSERT_NOPAD(STAILQ, mtx);
58 ASSERT_NOPAD(CK_STAILQ, mtx);
59 ASSERT_NOPAD(TAILQ, mtx);
60 ASSERT_NOPAD(LIST, rwlock);
61 ASSERT_NOPAD(CK_LIST, rwlock);
62 ASSERT_NOPAD(SLIST, rwlock);
63 ASSERT_NOPAD(CK_SLIST, rwlock);
64 ASSERT_NOPAD(STAILQ, rwlock);
65 ASSERT_NOPAD(CK_STAILQ, rwlock);
66 ASSERT_NOPAD(TAILQ, rwlock);
67 ASSERT_NOPAD(LIST, sx);
68 ASSERT_NOPAD(CK_LIST, sx);
69 ASSERT_NOPAD(SLIST, sx);
70 ASSERT_NOPAD(CK_SLIST, sx);
71 ASSERT_NOPAD(STAILQ, sx);
72 ASSERT_NOPAD(CK_STAILQ, sx);
73 ASSERT_NOPAD(TAILQ, sx);
74 ASSERT_NOPAD(LIST, rmlock);
75 ASSERT_NOPAD(CK_LIST, rmlock);
76 ASSERT_NOPAD(SLIST, rmlock);
77 ASSERT_NOPAD(CK_SLIST, rmlock);
78 ASSERT_NOPAD(STAILQ, rmlock);
79 ASSERT_NOPAD(CK_STAILQ, rmlock);
80 ASSERT_NOPAD(TAILQ, rmlock);
81 ASSERT_NOPAD(LIST, rmslock);
82 ASSERT_NOPAD(CK_LIST, rmslock);
83 ASSERT_NOPAD(SLIST, rmslock);
84 ASSERT_NOPAD(CK_SLIST, rmslock);
85 ASSERT_NOPAD(STAILQ, rmslock);
86 ASSERT_NOPAD(CK_STAILQ, rmslock);
87 ASSERT_NOPAD(TAILQ, rmslock);
88 #undef ASSERT_NOPAD
89 
90 static inline void
hashalloc_sizes(struct hashalloc_args * args,size_t * hdrsize,size_t * loffset)91 hashalloc_sizes(struct hashalloc_args *args, size_t *hdrsize, size_t *loffset)
92 {
93 	switch (args->head) {
94 	case HASH_HEAD_LIST:
95 		*loffset = sizeof(LIST_HEAD(, foo));
96 		break;
97 	case HASH_HEAD_CK_LIST:
98 		*loffset = sizeof(CK_LIST_HEAD(, foo));
99 		break;
100 	case HASH_HEAD_SLIST:
101 		*loffset = sizeof(SLIST_HEAD(, foo));
102 		break;
103 	case HASH_HEAD_CK_SLIST:
104 		*loffset = sizeof(CK_SLIST_HEAD(, foo));
105 		break;
106 	case HASH_HEAD_STAILQ:
107 		*loffset = sizeof(STAILQ_HEAD(, foo));
108 		break;
109 	case HASH_HEAD_CK_STAILQ:
110 		*loffset = sizeof(CK_STAILQ_HEAD(, foo));
111 		break;
112 	case HASH_HEAD_TAILQ:
113 		*loffset = sizeof(TAILQ_HEAD(, foo));
114 		break;
115 	}
116 
117 	switch (args->lock) {
118 	case HASH_LOCK_NONE:
119 		*hdrsize = *loffset;
120 		break;
121 	case HASH_LOCK_MTX:
122 		*hdrsize = *loffset + sizeof(struct mtx);
123 		break;
124 	case HASH_LOCK_RWLOCK:
125 		*hdrsize = *loffset + sizeof(struct rwlock);
126 		break;
127 	case HASH_LOCK_SX:
128 		*hdrsize = *loffset + sizeof(struct sx);
129 		break;
130 	case HASH_LOCK_RMLOCK:
131 		*hdrsize = *loffset + sizeof(struct rmlock);
132 		break;
133 	case HASH_LOCK_RMSLOCK:
134 		*hdrsize = *loffset + sizeof(struct rmslock);
135 		break;
136 	}
137 
138 	if (args->hdrsize > 0) {
139 		MPASS(args->hdrsize >= *hdrsize);
140 		*hdrsize = args->hdrsize;
141 	} else
142 		args->hdrsize = *hdrsize;
143 }
144 
145 void *
hashalloc(struct hashalloc_args * args)146 hashalloc(struct hashalloc_args *args)
147 {
148 	static const int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021,
149 	    1531, 2039, 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
150 	    7159, 7673, 8191, 12281, 16381, 24571, 32749 };
151 	void *mem;
152 	size_t size, hdrsize, loffset;
153 	u_int i;
154 
155 	MPASS(args->version == 0);
156 	MPASS(args->size > 0);
157 
158 	switch (args->type) {
159 	case HASH_TYPE_POWER2:
160 		for (size = 1; size <= args->size; size <<= 1)
161 			continue;
162 		size >>= 1;
163 		break;
164 	case HASH_TYPE_PRIME:
165 		for (i = nitems(primes) - 1; args->size < primes[i]; i--)
166 			;
167 		size = primes[i];
168 		break;
169 	}
170 
171 	hashalloc_sizes(args, &hdrsize, &loffset);
172 
173 	mem = malloc(size * hdrsize, args->mtype, args->mflags);
174 	if (mem == NULL) {
175 		args->error = ENOMEM;
176 		return (NULL);
177 	}
178 
179 	switch (args->lock) {
180 	case HASH_LOCK_NONE:
181 		break;
182 	case HASH_LOCK_MTX:
183 		MPASS(args->lname != NULL);
184 		if ((args->mflags & M_ZERO) == 0)
185 			args->lopts |= MTX_NEW;
186 		break;
187 	case HASH_LOCK_RWLOCK:
188 		MPASS(args->lname != NULL);
189 		if ((args->mflags & M_ZERO) == 0)
190 			args->lopts |= RW_NEW;
191 		break;
192 	case HASH_LOCK_SX:
193 		MPASS(args->lname != NULL);
194 		if ((args->mflags & M_ZERO) == 0)
195 			args->lopts |= SX_NEW;
196 		break;
197 	case HASH_LOCK_RMLOCK:
198 		MPASS(args->lname != NULL);
199 		if ((args->mflags & M_ZERO) == 0)
200 			args->lopts |= RM_NEW;
201 		break;
202 	case HASH_LOCK_RMSLOCK:
203 		MPASS(args->lname != NULL);
204 		break;
205 	}
206 
207 	for (i = 0; i < size; i++) {
208 		void *slot;
209 
210 		slot = (char *)mem + i * hdrsize;
211 		switch (args->head) {
212 		case HASH_HEAD_LIST:
213 			LIST_INIT((LIST_HEAD(, foo) *)slot);
214 			break;
215 		case HASH_HEAD_CK_LIST:
216 			CK_LIST_INIT((CK_LIST_HEAD(, foo) *)slot);
217 			break;
218 		case HASH_HEAD_SLIST:
219 			SLIST_INIT((SLIST_HEAD(, foo) *)slot);
220 			break;
221 		case HASH_HEAD_CK_SLIST:
222 			CK_SLIST_INIT((CK_SLIST_HEAD(, foo) *)slot);
223 			break;
224 		case HASH_HEAD_STAILQ:
225 			STAILQ_INIT((STAILQ_HEAD(, foo) *)slot);
226 			break;
227 		case HASH_HEAD_CK_STAILQ:
228 			CK_STAILQ_INIT((CK_STAILQ_HEAD(, foo) *)slot);
229 			break;
230 		case HASH_HEAD_TAILQ:
231 			TAILQ_INIT((TAILQ_HEAD(, foo) *)slot);
232 			break;
233 		}
234 
235 		slot = (char *)slot + loffset;
236 		switch (args->lock) {
237 		case HASH_LOCK_NONE:
238 			break;
239 		case HASH_LOCK_MTX:
240 			mtx_init((struct mtx *)slot, args->lname, NULL,
241 			    args->lopts);
242 			break;
243 		case HASH_LOCK_RWLOCK:
244 			rw_init_flags((struct rwlock *)slot, args->lname,
245 			    args->lopts);
246 			break;
247 		case HASH_LOCK_SX:
248 			sx_init_flags((struct sx *)slot, args->lname,
249 			    args->lopts);
250 			break;
251 		case HASH_LOCK_RMLOCK:
252 			rm_init_flags((struct rmlock *)slot, args->lname,
253 			    args->lopts);
254 			break;
255 		case HASH_LOCK_RMSLOCK:
256 			rms_init((struct rmslock *)slot, args->lname);
257 			break;
258 		}
259 
260 		if (args->ctor != NULL) {
261 			slot = (char *)mem + i * hdrsize;
262 			if ((args->error = args->ctor(slot)) != 0) {
263 				slot = (char *)slot + loffset;
264 				switch (args->lock) {
265 				case HASH_LOCK_NONE:
266 					break;
267 				case HASH_LOCK_MTX:
268 					mtx_destroy((struct mtx *)slot);
269 					break;
270 				case HASH_LOCK_RWLOCK:
271 					rw_destroy((struct rwlock *)slot);
272 					break;
273 				case HASH_LOCK_SX:
274 					sx_destroy((struct sx *)slot);
275 					break;
276 				case HASH_LOCK_RMLOCK:
277 					rm_destroy((struct rmlock *)slot);
278 					break;
279 				case HASH_LOCK_RMSLOCK:
280 					rms_destroy((struct rmslock *)slot);
281 					break;
282 				}
283 				args->size = i;
284 				hashfree(mem, args);
285 				return (NULL);
286 			}
287 		}
288 	}
289 
290 	args->size = size;
291 	return (mem);
292 }
293 
294 void
hashfree(void * mem,struct hashalloc_args * args)295 hashfree(void *mem, struct hashalloc_args *args)
296 {
297 	size_t hdrsize, loffset;
298 
299 	if (__predict_false(mem == NULL))
300 		return;
301 
302 	hashalloc_sizes(args, &hdrsize, &loffset);
303 
304 	for (u_int i = 0; i < args->size; i++) {
305 #ifdef INVARIANTS
306 		static const char msg[] =
307 		    "%s: hashtbl %p not empty (malloc type %s)";
308 #endif
309 #define	HPASS(exp) KASSERT(exp, (msg, __func__, mem, args->mtype->ks_shortdesc))
310 		void *slot;
311 
312 		slot = (char *)mem + i * hdrsize;
313 		if (args->dtor != NULL)
314 			args->dtor(slot);
315 		switch (args->head) {
316 		case HASH_HEAD_LIST:
317 			HPASS(LIST_EMPTY((LIST_HEAD(, foo) *)slot));
318 			break;
319 		case HASH_HEAD_CK_LIST:
320 			HPASS(CK_LIST_EMPTY((CK_LIST_HEAD(, foo) *)slot));
321 			break;
322 		case HASH_HEAD_SLIST:
323 			HPASS(SLIST_EMPTY((SLIST_HEAD(, foo) *)slot));
324 			break;
325 		case HASH_HEAD_CK_SLIST:
326 			HPASS(CK_SLIST_EMPTY((CK_SLIST_HEAD(, foo) *)slot));
327 			break;
328 		case HASH_HEAD_STAILQ:
329 			HPASS(STAILQ_EMPTY((STAILQ_HEAD(, foo) *)slot));
330 			break;
331 		case HASH_HEAD_CK_STAILQ:
332 			HPASS(CK_STAILQ_EMPTY((CK_STAILQ_HEAD(, foo) *)slot));
333 			break;
334 		case HASH_HEAD_TAILQ:
335 			HPASS(TAILQ_EMPTY((TAILQ_HEAD(, foo) *)slot));
336 			break;
337 		}
338 #undef HPASS
339 
340 		slot = (char *)slot + loffset;
341 		switch (args->lock) {
342 		case HASH_LOCK_NONE:
343 			break;
344 		case HASH_LOCK_MTX:
345 			mtx_destroy((struct mtx *)slot);
346 			break;
347 		case HASH_LOCK_RWLOCK:
348 			rw_destroy((struct rwlock *)slot);
349 			break;
350 		case HASH_LOCK_SX:
351 			sx_destroy((struct sx *)slot);
352 			break;
353 		case HASH_LOCK_RMLOCK:
354 			rm_destroy((struct rmlock *)slot);
355 			break;
356 		case HASH_LOCK_RMSLOCK:
357 			rms_destroy((struct rmslock *)slot);
358 			break;
359 		}
360 	}
361 
362 	free(mem, args->mtype);
363 }
364 
365 static __inline int
hash_mflags(int flags)366 hash_mflags(int flags)
367 {
368 
369 	return ((flags & HASH_NOWAIT) ? M_NOWAIT : M_WAITOK);
370 }
371 
372 /*
373  * General routine to allocate a hash table with control of memory flags.
374  */
375 void *
hashinit_flags(int elements,struct malloc_type * type,u_long * hashmask,int flags)376 hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask,
377     int flags)
378 {
379 	struct hashalloc_args args = {
380 		.size = elements,
381 		.mtype = type,
382 		.mflags = hash_mflags(flags),
383 	};
384 	void *rv;
385 
386 	rv = hashalloc(&args);
387 	if (rv != NULL)
388 		*hashmask = args.size - 1;
389 	return (rv);
390 }
391 
392 /*
393  * Allocate and initialize a hash table with default flag: may sleep.
394  */
395 void *
hashinit(int elements,struct malloc_type * type,u_long * hashmask)396 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
397 {
398 
399 	return (hashinit_flags(elements, type, hashmask, HASH_WAITOK));
400 }
401 
402 void
hashdestroy(void * vhashtbl,struct malloc_type * type,u_long hashmask)403 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
404 {
405 	struct hashalloc_args args = {
406 		.size = hashmask + 1,
407 		.mtype = type,
408 	};
409 
410 	hashfree(vhashtbl, &args);
411 }
412 
413 /*
414  * General routine to allocate a prime number sized hash table with control of
415  * memory flags.
416  */
417 void *
phashinit_flags(int elements,struct malloc_type * type,u_long * nentries,int flags)418 phashinit_flags(int elements, struct malloc_type *type, u_long *nentries, int flags)
419 {
420 	struct hashalloc_args args = {
421 		.size = elements,
422 		.mtype = type,
423 		.type = HASH_TYPE_PRIME,
424 		.mflags = hash_mflags(flags),
425 	};
426 	void *rv;
427 
428 	rv = hashalloc(&args);
429 	if (rv != NULL)
430 		*nentries = args.size;
431 	return (rv);
432 }
433 
434 /*
435  * Allocate and initialize a prime number sized hash table with default flag:
436  * may sleep.
437  */
438 void *
phashinit(int elements,struct malloc_type * type,u_long * nentries)439 phashinit(int elements, struct malloc_type *type, u_long *nentries)
440 {
441 
442 	return (phashinit_flags(elements, type, nentries, HASH_WAITOK));
443 }
444