Lines Matching +full:array +full:- +full:nest
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
10 * This is a limited-size FIFO maintaining pointers in FIFO order, with
13 * This implementation tries to minimize cache-contention when there is a
37 /* Read-only by both the producer and the consumer */
51 return r->queue[r->producer]; in __ptr_ring_full()
58 spin_lock(&r->producer_lock); in ptr_ring_full()
60 spin_unlock(&r->producer_lock); in ptr_ring_full()
69 spin_lock_irq(&r->producer_lock); in ptr_ring_full_irq()
71 spin_unlock_irq(&r->producer_lock); in ptr_ring_full_irq()
81 spin_lock_irqsave(&r->producer_lock, flags); in ptr_ring_full_any()
83 spin_unlock_irqrestore(&r->producer_lock, flags); in ptr_ring_full_any()
92 spin_lock_bh(&r->producer_lock); in ptr_ring_full_bh()
94 spin_unlock_bh(&r->producer_lock); in ptr_ring_full_bh()
106 if (unlikely(!r->size) || r->queue[r->producer]) in __ptr_ring_produce()
107 return -ENOSPC; in __ptr_ring_produce()
113 WRITE_ONCE(r->queue[r->producer++], ptr); in __ptr_ring_produce()
114 if (unlikely(r->producer >= r->size)) in __ptr_ring_produce()
115 r->producer = 0; in __ptr_ring_produce()
128 spin_lock(&r->producer_lock); in ptr_ring_produce()
130 spin_unlock(&r->producer_lock); in ptr_ring_produce()
139 spin_lock_irq(&r->producer_lock); in ptr_ring_produce_irq()
141 spin_unlock_irq(&r->producer_lock); in ptr_ring_produce_irq()
151 spin_lock_irqsave(&r->producer_lock, flags); in ptr_ring_produce_any()
153 spin_unlock_irqrestore(&r->producer_lock, flags); in ptr_ring_produce_any()
162 spin_lock_bh(&r->producer_lock); in ptr_ring_produce_bh()
164 spin_unlock_bh(&r->producer_lock); in ptr_ring_produce_bh()
171 if (likely(r->size)) in __ptr_ring_peek()
172 return READ_ONCE(r->queue[r->consumer_head]); in __ptr_ring_peek()
184 * In this case - to avoid incorrectly detecting the ring
185 * as empty - the CPU consuming the ring entries is responsible
188 * re-test __ptr_ring_empty and/or consume the ring enteries
196 if (likely(r->size)) in __ptr_ring_empty()
197 return !r->queue[READ_ONCE(r->consumer_head)]; in __ptr_ring_empty()
205 spin_lock(&r->consumer_lock); in ptr_ring_empty()
207 spin_unlock(&r->consumer_lock); in ptr_ring_empty()
216 spin_lock_irq(&r->consumer_lock); in ptr_ring_empty_irq()
218 spin_unlock_irq(&r->consumer_lock); in ptr_ring_empty_irq()
228 spin_lock_irqsave(&r->consumer_lock, flags); in ptr_ring_empty_any()
230 spin_unlock_irqrestore(&r->consumer_lock, flags); in ptr_ring_empty_any()
239 spin_lock_bh(&r->consumer_lock); in ptr_ring_empty_bh()
241 spin_unlock_bh(&r->consumer_lock); in ptr_ring_empty_bh()
252 * consumer = r->consumer; in __ptr_ring_discard_one()
253 * r->queue[consumer++] = NULL; in __ptr_ring_discard_one()
254 * if (unlikely(consumer >= r->size)) in __ptr_ring_discard_one()
256 * r->consumer = consumer; in __ptr_ring_discard_one()
264 int consumer_head = r->consumer_head; in __ptr_ring_discard_one()
269 * We also do this when we reach end of the ring - not mandatory in __ptr_ring_discard_one()
272 if (unlikely(consumer_head - r->consumer_tail >= r->batch || in __ptr_ring_discard_one()
273 consumer_head >= r->size)) { in __ptr_ring_discard_one()
279 while (likely(head >= r->consumer_tail)) in __ptr_ring_discard_one()
280 r->queue[head--] = NULL; in __ptr_ring_discard_one()
281 r->consumer_tail = consumer_head; in __ptr_ring_discard_one()
283 if (unlikely(consumer_head >= r->size)) { in __ptr_ring_discard_one()
285 r->consumer_tail = 0; in __ptr_ring_discard_one()
288 WRITE_ONCE(r->consumer_head, consumer_head); in __ptr_ring_discard_one()
307 void **array, int n) in __ptr_ring_consume_batched() argument
316 array[i] = ptr; in __ptr_ring_consume_batched()
331 spin_lock(&r->consumer_lock); in ptr_ring_consume()
333 spin_unlock(&r->consumer_lock); in ptr_ring_consume()
342 spin_lock_irq(&r->consumer_lock); in ptr_ring_consume_irq()
344 spin_unlock_irq(&r->consumer_lock); in ptr_ring_consume_irq()
354 spin_lock_irqsave(&r->consumer_lock, flags); in ptr_ring_consume_any()
356 spin_unlock_irqrestore(&r->consumer_lock, flags); in ptr_ring_consume_any()
365 spin_lock_bh(&r->consumer_lock); in ptr_ring_consume_bh()
367 spin_unlock_bh(&r->consumer_lock); in ptr_ring_consume_bh()
373 void **array, int n) in ptr_ring_consume_batched() argument
377 spin_lock(&r->consumer_lock); in ptr_ring_consume_batched()
378 ret = __ptr_ring_consume_batched(r, array, n); in ptr_ring_consume_batched()
379 spin_unlock(&r->consumer_lock); in ptr_ring_consume_batched()
385 void **array, int n) in ptr_ring_consume_batched_irq() argument
389 spin_lock_irq(&r->consumer_lock); in ptr_ring_consume_batched_irq()
390 ret = __ptr_ring_consume_batched(r, array, n); in ptr_ring_consume_batched_irq()
391 spin_unlock_irq(&r->consumer_lock); in ptr_ring_consume_batched_irq()
397 void **array, int n) in ptr_ring_consume_batched_any() argument
402 spin_lock_irqsave(&r->consumer_lock, flags); in ptr_ring_consume_batched_any()
403 ret = __ptr_ring_consume_batched(r, array, n); in ptr_ring_consume_batched_any()
404 spin_unlock_irqrestore(&r->consumer_lock, flags); in ptr_ring_consume_batched_any()
410 void **array, int n) in ptr_ring_consume_batched_bh() argument
414 spin_lock_bh(&r->consumer_lock); in ptr_ring_consume_batched_bh()
415 ret = __ptr_ring_consume_batched(r, array, n); in ptr_ring_consume_batched_bh()
416 spin_unlock_bh(&r->consumer_lock); in ptr_ring_consume_batched_bh()
430 spin_lock(&(r)->consumer_lock); \
432 spin_unlock(&(r)->consumer_lock); \
439 spin_lock_irq(&(r)->consumer_lock); \
441 spin_unlock_irq(&(r)->consumer_lock); \
448 spin_lock_bh(&(r)->consumer_lock); \
450 spin_unlock_bh(&(r)->consumer_lock); \
458 spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
460 spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
476 r->size = size; in __ptr_ring_set_size()
477 r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); in __ptr_ring_set_size()
483 if (r->batch > r->size / 2 || !r->batch) in __ptr_ring_set_size()
484 r->batch = 1; in __ptr_ring_set_size()
489 r->queue = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_init()
490 if (!r->queue) in ptr_ring_init()
491 return -ENOMEM; in ptr_ring_init()
494 r->producer = r->consumer_head = r->consumer_tail = 0; in ptr_ring_init()
495 spin_lock_init(&r->producer_lock); in ptr_ring_init()
496 spin_lock_init(&r->consumer_lock); in ptr_ring_init()
507 * resize you must make sure all uses nest correctly.
517 spin_lock_irqsave(&r->consumer_lock, flags); in ptr_ring_unconsume()
518 spin_lock(&r->producer_lock); in ptr_ring_unconsume()
520 if (!r->size) in ptr_ring_unconsume()
527 head = r->consumer_head - 1; in ptr_ring_unconsume()
528 while (likely(head >= r->consumer_tail)) in ptr_ring_unconsume()
529 r->queue[head--] = NULL; in ptr_ring_unconsume()
530 r->consumer_tail = r->consumer_head; in ptr_ring_unconsume()
537 head = r->consumer_head - 1; in ptr_ring_unconsume()
539 head = r->size - 1; in ptr_ring_unconsume()
540 if (r->queue[head]) { in ptr_ring_unconsume()
544 r->queue[head] = batch[--n]; in ptr_ring_unconsume()
545 r->consumer_tail = head; in ptr_ring_unconsume()
547 WRITE_ONCE(r->consumer_head, head); in ptr_ring_unconsume()
553 destroy(batch[--n]); in ptr_ring_unconsume()
554 spin_unlock(&r->producer_lock); in ptr_ring_unconsume()
555 spin_unlock_irqrestore(&r->consumer_lock, flags); in ptr_ring_unconsume()
575 r->producer = producer; in __ptr_ring_swap_queue()
576 r->consumer_head = 0; in __ptr_ring_swap_queue()
577 r->consumer_tail = 0; in __ptr_ring_swap_queue()
578 old = r->queue; in __ptr_ring_swap_queue()
579 r->queue = queue; in __ptr_ring_swap_queue()
586 * resize you must make sure all uses nest correctly.
598 return -ENOMEM; in ptr_ring_resize()
600 spin_lock_irqsave(&(r)->consumer_lock, flags); in ptr_ring_resize()
601 spin_lock(&(r)->producer_lock); in ptr_ring_resize()
605 spin_unlock(&(r)->producer_lock); in ptr_ring_resize()
606 spin_unlock_irqrestore(&(r)->consumer_lock, flags); in ptr_ring_resize()
615 * resize you must make sure all uses nest correctly.
639 spin_lock_irqsave(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
640 spin_lock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple()
643 spin_unlock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple()
644 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple()
655 while (--i >= 0) in ptr_ring_resize_multiple()
661 return -ENOMEM; in ptr_ring_resize_multiple()
671 kvfree(r->queue); in ptr_ring_cleanup()