xref: /linux/include/linux/sbitmap.h (revision 2988dfed8a5dc752921a5790b81c06e781af51ce)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Fast and scalable bitmaps.
4  *
5  * Copyright (C) 2016 Facebook
6  * Copyright (C) 2013-2014 Jens Axboe
7  */
8 
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/cache.h>
15 #include <linux/list.h>
16 #include <linux/log2.h>
17 #include <linux/minmax.h>
18 #include <linux/percpu.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
23 
24 struct seq_file;
25 
26 /**
27  * struct sbitmap_word - Word in a &struct sbitmap.
28  */
29 struct sbitmap_word {
30 	/**
31 	 * @word: word holding free bits
32 	 */
33 	unsigned long word;
34 
35 	/**
36 	 * @cleared: word holding cleared bits
37 	 */
38 	unsigned long cleared ____cacheline_aligned_in_smp;
39 
40 	/**
41 	 * @swap_lock: serializes simultaneous updates of ->word and ->cleared
42 	 */
43 	raw_spinlock_t swap_lock;
44 } ____cacheline_aligned_in_smp;
45 
46 /**
47  * struct sbitmap - Scalable bitmap.
48  *
49  * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
50  * trades off higher memory usage for better scalability.
51  */
52 struct sbitmap {
53 	/**
54 	 * @depth: Number of bits used in the whole bitmap.
55 	 */
56 	unsigned int depth;
57 
58 	/**
59 	 * @shift: log2(number of bits used per word)
60 	 */
61 	unsigned int shift;
62 
63 	/**
64 	 * @map_nr: Number of words (cachelines) being used for the bitmap.
65 	 */
66 	unsigned int map_nr;
67 
68 	/**
69 	 * @round_robin: Allocate bits in strict round-robin order.
70 	 */
71 	bool round_robin;
72 
73 	/**
74 	 * @map: Allocated bitmap.
75 	 */
76 	struct sbitmap_word *map;
77 
78 	/*
79 	 * @alloc_hint: Cache of last successfully allocated or freed bit.
80 	 *
81 	 * This is per-cpu, which allows multiple users to stick to different
82 	 * cachelines until the map is exhausted.
83 	 */
84 	unsigned int __percpu *alloc_hint;
85 };
86 
87 #define SBQ_WAIT_QUEUES 8
88 #define SBQ_WAKE_BATCH 8
89 
90 /**
91  * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
92  */
93 struct sbq_wait_state {
94 	/**
95 	 * @wait: Wait queue.
96 	 */
97 	wait_queue_head_t wait;
98 } ____cacheline_aligned_in_smp;
99 
100 /**
101  * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
102  * bits.
103  *
104  * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
105  * avoid contention on the wait queue spinlock. This ensures that we don't hit a
106  * scalability wall when we run out of free bits and have to start putting tasks
107  * to sleep.
108  */
109 struct sbitmap_queue {
110 	/**
111 	 * @sb: Scalable bitmap.
112 	 */
113 	struct sbitmap sb;
114 
115 	/**
116 	 * @wake_batch: Number of bits which must be freed before we wake up any
117 	 * waiters.
118 	 */
119 	unsigned int wake_batch;
120 
121 	/**
122 	 * @wake_index: Next wait queue in @ws to wake up.
123 	 */
124 	atomic_t wake_index;
125 
126 	/**
127 	 * @ws: Wait queues.
128 	 */
129 	struct sbq_wait_state *ws;
130 
131 	/*
132 	 * @ws_active: count of currently active ws waitqueues
133 	 */
134 	atomic_t ws_active;
135 
136 	/**
137 	 * @min_shallow_depth: The minimum shallow depth which may be passed to
138 	 * sbitmap_queue_get_shallow()
139 	 */
140 	unsigned int min_shallow_depth;
141 
142 	/**
143 	 * @completion_cnt: Number of bits cleared passed to the
144 	 * wakeup function.
145 	 */
146 	atomic_t completion_cnt;
147 
148 	/**
149 	 * @wakeup_cnt: Number of thread wake ups issued.
150 	 */
151 	atomic_t wakeup_cnt;
152 };
153 
154 /**
155  * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
156  * @sb: Bitmap to initialize.
157  * @depth: Number of bits to allocate.
158  * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
159  *         given, a good default is chosen.
160  * @flags: Allocation flags.
161  * @node: Memory node to allocate on.
162  * @round_robin: If true, be stricter about allocation order; always allocate
163  *               starting from the last allocated bit. This is less efficient
164  *               than the default behavior (false).
165  * @alloc_hint: If true, apply percpu hint for where to start searching for
166  *              a free bit.
167  *
168  * Return: Zero on success or negative errno on failure.
169  */
170 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
171 		      gfp_t flags, int node, bool round_robin, bool alloc_hint);
172 
173 /* sbitmap internal helper */
__map_depth(const struct sbitmap * sb,int index)174 static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
175 {
176 	if (index == sb->map_nr - 1)
177 		return sb->depth - (index << sb->shift);
178 	return 1U << sb->shift;
179 }
180 
181 /**
182  * sbitmap_free() - Free memory used by a &struct sbitmap.
183  * @sb: Bitmap to free.
184  */
sbitmap_free(struct sbitmap * sb)185 static inline void sbitmap_free(struct sbitmap *sb)
186 {
187 	free_percpu(sb->alloc_hint);
188 	kvfree(sb->map);
189 	sb->map = NULL;
190 }
191 
192 /**
193  * sbitmap_resize() - Resize a &struct sbitmap.
194  * @sb: Bitmap to resize.
195  * @depth: New number of bits to resize to.
196  *
197  * Doesn't reallocate anything. It's up to the caller to ensure that the new
198  * depth doesn't exceed the depth that the sb was initialized with.
199  */
200 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
201 
202 /**
203  * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
204  * @sb: Bitmap to allocate from.
205  *
206  * This operation provides acquire barrier semantics if it succeeds.
207  *
208  * Return: Non-negative allocated bit number if successful, -1 otherwise.
209  */
210 int sbitmap_get(struct sbitmap *sb);
211 
212 /**
213  * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
214  * @sb: Bitmap to check.
215  *
216  * Return: true if any bit in the bitmap is set, false otherwise.
217  */
218 bool sbitmap_any_bit_set(const struct sbitmap *sb);
219 
220 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
221 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
222 
223 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
224 
225 /**
226  * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
227  * @start: Where to start the iteration.
228  * @sb: Bitmap to iterate over.
229  * @fn: Callback. Should return true to continue or false to break early.
230  * @data: Pointer to pass to callback.
231  *
232  * This is inline even though it's non-trivial so that the function calls to the
233  * callback will hopefully get optimized away.
234  */
__sbitmap_for_each_set(struct sbitmap * sb,unsigned int start,sb_for_each_fn fn,void * data)235 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
236 					  unsigned int start,
237 					  sb_for_each_fn fn, void *data)
238 {
239 	unsigned int index;
240 	unsigned int nr;
241 	unsigned int scanned = 0;
242 
243 	if (start >= sb->depth)
244 		start = 0;
245 	index = SB_NR_TO_INDEX(sb, start);
246 	nr = SB_NR_TO_BIT(sb, start);
247 
248 	while (scanned < sb->depth) {
249 		unsigned long word;
250 		unsigned int depth = min_t(unsigned int,
251 					   __map_depth(sb, index) - nr,
252 					   sb->depth - scanned);
253 
254 		scanned += depth;
255 		word = sb->map[index].word & ~sb->map[index].cleared;
256 		if (!word)
257 			goto next;
258 
259 		/*
260 		 * On the first iteration of the outer loop, we need to add the
261 		 * bit offset back to the size of the word for find_next_bit().
262 		 * On all other iterations, nr is zero, so this is a noop.
263 		 */
264 		depth += nr;
265 		while (1) {
266 			nr = find_next_bit(&word, depth, nr);
267 			if (nr >= depth)
268 				break;
269 			if (!fn(sb, (index << sb->shift) + nr, data))
270 				return;
271 
272 			nr++;
273 		}
274 next:
275 		nr = 0;
276 		if (++index >= sb->map_nr)
277 			index = 0;
278 	}
279 }
280 
281 /**
282  * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
283  * @sb: Bitmap to iterate over.
284  * @fn: Callback. Should return true to continue or false to break early.
285  * @data: Pointer to pass to callback.
286  */
sbitmap_for_each_set(struct sbitmap * sb,sb_for_each_fn fn,void * data)287 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
288 					void *data)
289 {
290 	__sbitmap_for_each_set(sb, 0, fn, data);
291 }
292 
__sbitmap_word(struct sbitmap * sb,unsigned int bitnr)293 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
294 					    unsigned int bitnr)
295 {
296 	return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
297 }
298 
299 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
300 
sbitmap_set_bit(struct sbitmap * sb,unsigned int bitnr)301 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
302 {
303 	set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
304 }
305 
sbitmap_clear_bit(struct sbitmap * sb,unsigned int bitnr)306 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
307 {
308 	clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
309 }
310 
311 /*
312  * This one is special, since it doesn't actually clear the bit, rather it
313  * sets the corresponding bit in the ->cleared mask instead. Paired with
314  * the caller doing sbitmap_deferred_clear() if a given index is full, which
315  * will clear the previously freed entries in the corresponding ->word.
316  */
sbitmap_deferred_clear_bit(struct sbitmap * sb,unsigned int bitnr)317 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
318 {
319 	unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
320 
321 	set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
322 }
323 
324 /*
325  * Pair of sbitmap_get, and this one applies both cleared bit and
326  * allocation hint.
327  */
sbitmap_put(struct sbitmap * sb,unsigned int bitnr)328 static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
329 {
330 	sbitmap_deferred_clear_bit(sb, bitnr);
331 
332 	if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
333 		*raw_cpu_ptr(sb->alloc_hint) = bitnr;
334 }
335 
sbitmap_test_bit(struct sbitmap * sb,unsigned int bitnr)336 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
337 {
338 	return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
339 }
340 
sbitmap_calculate_shift(unsigned int depth)341 static inline int sbitmap_calculate_shift(unsigned int depth)
342 {
343 	int	shift = ilog2(BITS_PER_LONG);
344 
345 	/*
346 	 * If the bitmap is small, shrink the number of bits per word so
347 	 * we spread over a few cachelines, at least. If less than 4
348 	 * bits, just forget about it, it's not going to work optimally
349 	 * anyway.
350 	 */
351 	if (depth >= 4) {
352 		while ((4U << shift) > depth)
353 			shift--;
354 	}
355 
356 	return shift;
357 }
358 
359 /**
360  * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
361  * @sb: Bitmap to show.
362  * @m: struct seq_file to write to.
363  *
364  * This is intended for debugging. The format may change at any time.
365  */
366 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
367 
368 
369 /**
370  * sbitmap_weight() - Return how many set and not cleared bits in a &struct
371  * sbitmap.
372  * @sb: Bitmap to check.
373  *
374  * Return: How many set and not cleared bits set
375  */
376 unsigned int sbitmap_weight(const struct sbitmap *sb);
377 
378 /**
379  * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
380  * seq_file.
381  * @sb: Bitmap to show.
382  * @m: struct seq_file to write to.
383  *
384  * This is intended for debugging. The output isn't guaranteed to be internally
385  * consistent.
386  */
387 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
388 
389 /**
390  * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
391  * memory node.
392  * @sbq: Bitmap queue to initialize.
393  * @depth: See sbitmap_init_node().
394  * @shift: See sbitmap_init_node().
395  * @round_robin: See sbitmap_get().
396  * @flags: Allocation flags.
397  * @node: Memory node to allocate on.
398  *
399  * Return: Zero on success or negative errno on failure.
400  */
401 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
402 			    int shift, bool round_robin, gfp_t flags, int node);
403 
404 /**
405  * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
406  *
407  * @sbq: Bitmap queue to free.
408  */
sbitmap_queue_free(struct sbitmap_queue * sbq)409 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
410 {
411 	kfree(sbq->ws);
412 	sbitmap_free(&sbq->sb);
413 }
414 
415 /**
416  * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
417  * @sbq: Bitmap queue to recalculate wake batch.
418  * @users: Number of shares.
419  *
420  * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
421  * by depth. This interface is for HCTX shared tags or queue shared tags.
422  */
423 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
424 					    unsigned int users);
425 
426 /**
427  * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
428  * @sbq: Bitmap queue to resize.
429  * @depth: New number of bits to resize to.
430  *
431  * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
432  * some extra work on the &struct sbitmap_queue, so it's not safe to just
433  * resize the underlying &struct sbitmap.
434  */
435 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
436 
437 /**
438  * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
439  * sbitmap_queue with preemption already disabled.
440  * @sbq: Bitmap queue to allocate from.
441  *
442  * Return: Non-negative allocated bit number if successful, -1 otherwise.
443  */
444 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
445 
446 /**
447  * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
448  * @sbq: Bitmap queue to allocate from.
449  * @nr_tags: number of tags requested
450  * @offset: offset to add to returned bits
451  *
452  * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
453  * a bit in the mask returned, and the caller must add @offset to the value to
454  * get the absolute tag value.
455  */
456 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
457 					unsigned int *offset);
458 
459 /**
460  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
461  * sbitmap_queue, limiting the depth used from each word, with preemption
462  * already disabled.
463  * @sbq: Bitmap queue to allocate from.
464  * @shallow_depth: The maximum number of bits to allocate from the queue.
465  * See sbitmap_get_shallow().
466  *
467  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
468  * initializing @sbq.
469  *
470  * Return: Non-negative allocated bit number if successful, -1 otherwise.
471  */
472 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
473 			      unsigned int shallow_depth);
474 
475 /**
476  * sbitmap_queue_get() - Try to allocate a free bit from a &struct
477  * sbitmap_queue.
478  * @sbq: Bitmap queue to allocate from.
479  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
480  *       sbitmap_queue_clear()).
481  *
482  * Return: Non-negative allocated bit number if successful, -1 otherwise.
483  */
sbitmap_queue_get(struct sbitmap_queue * sbq,unsigned int * cpu)484 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
485 				    unsigned int *cpu)
486 {
487 	int nr;
488 
489 	*cpu = get_cpu();
490 	nr = __sbitmap_queue_get(sbq);
491 	put_cpu();
492 	return nr;
493 }
494 
495 /**
496  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
497  * minimum shallow depth that will be used.
498  * @sbq: Bitmap queue in question.
499  * @min_shallow_depth: The minimum shallow depth that will be passed to
500  * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
501  *
502  * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
503  * depends on the depth of the bitmap. Since the shallow allocation functions
504  * effectively operate with a different depth, the shallow depth must be taken
505  * into account when calculating the batch size. This function must be called
506  * with the minimum shallow depth that will be used. Failure to do so can result
507  * in missed wakeups.
508  */
509 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
510 				     unsigned int min_shallow_depth);
511 
512 /**
513  * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
514  * &struct sbitmap_queue.
515  * @sbq: Bitmap to free from.
516  * @nr: Bit number to free.
517  * @cpu: CPU the bit was allocated on.
518  */
519 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
520 			 unsigned int cpu);
521 
522 /**
523  * sbitmap_queue_clear_batch() - Free a batch of allocated bits
524  * &struct sbitmap_queue.
525  * @sbq: Bitmap to free from.
526  * @offset: offset for each tag in array
527  * @tags: array of tags
528  * @nr_tags: number of tags in array
529  */
530 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
531 				int *tags, int nr_tags);
532 
sbq_index_inc(int index)533 static inline int sbq_index_inc(int index)
534 {
535 	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
536 }
537 
sbq_index_atomic_inc(atomic_t * index)538 static inline void sbq_index_atomic_inc(atomic_t *index)
539 {
540 	int old = atomic_read(index);
541 	int new = sbq_index_inc(old);
542 	atomic_cmpxchg(index, old, new);
543 }
544 
545 /**
546  * sbq_wait_ptr() - Get the next wait queue to use for a &struct
547  * sbitmap_queue.
548  * @sbq: Bitmap queue to wait on.
549  * @wait_index: A counter per "user" of @sbq.
550  */
sbq_wait_ptr(struct sbitmap_queue * sbq,atomic_t * wait_index)551 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
552 						  atomic_t *wait_index)
553 {
554 	struct sbq_wait_state *ws;
555 
556 	ws = &sbq->ws[atomic_read(wait_index)];
557 	sbq_index_atomic_inc(wait_index);
558 	return ws;
559 }
560 
561 /**
562  * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
563  * sbitmap_queue.
564  * @sbq: Bitmap queue to wake up.
565  */
566 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
567 
568 /**
569  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
570  * on a &struct sbitmap_queue.
571  * @sbq: Bitmap queue to wake up.
572  * @nr: Number of bits cleared.
573  */
574 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
575 
576 /**
577  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
578  * seq_file.
579  * @sbq: Bitmap queue to show.
580  * @m: struct seq_file to write to.
581  *
582  * This is intended for debugging. The format may change at any time.
583  */
584 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
585 
586 struct sbq_wait {
587 	struct sbitmap_queue *sbq;	/* if set, sbq_wait is accounted */
588 	struct wait_queue_entry wait;
589 };
590 
591 #define DEFINE_SBQ_WAIT(name)							\
592 	struct sbq_wait name = {						\
593 		.sbq = NULL,							\
594 		.wait = {							\
595 			.private	= current,				\
596 			.func		= autoremove_wake_function,		\
597 			.entry		= LIST_HEAD_INIT((name).wait.entry),	\
598 		}								\
599 	}
600 
601 /*
602  * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
603  * internal state.
604  */
605 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
606 				struct sbq_wait_state *ws,
607 				struct sbq_wait *sbq_wait, int state);
608 
609 /*
610  * Must be paired with sbitmap_prepare_to_wait().
611  */
612 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
613 				struct sbq_wait *sbq_wait);
614 
615 /*
616  * Wrapper around add_wait_queue(), which maintains some extra internal state
617  */
618 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
619 			    struct sbq_wait_state *ws,
620 			    struct sbq_wait *sbq_wait);
621 
622 /*
623  * Must be paired with sbitmap_add_wait_queue()
624  */
625 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
626 
627 #endif /* __LINUX_SCALE_BITMAP_H */
628