1 /*-
2 * Copyright (c) 2014 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/counter.h>
32 #include <sys/lock.h>
33 #include <sys/malloc.h>
34 #include <sys/mutex.h>
35 #include <sys/sysctl.h>
36 #include <machine/cpu.h>
37
38 #include "t4_mp_ring.h"
39
40 #if defined(__i386__)
41 #define atomic_cmpset_acq_64 atomic_cmpset_64
42 #define atomic_cmpset_rel_64 atomic_cmpset_64
43 #endif
44
45 /*
46 * mp_ring handles multiple threads (producers) enqueueing data to a tx queue.
47 * The thread that is writing the hardware descriptors is the consumer and it
48 * runs with the consumer lock held. A producer becomes the consumer if there
49 * isn't one already. The consumer runs with the flags sets to BUSY and
50 * consumes everything (IDLE or COALESCING) or gets STALLED. If it is running
51 * over its budget it sets flags to TOO_BUSY. A producer that observes a
52 * TOO_BUSY consumer will become the new consumer by setting flags to
53 * TAKING_OVER. The original consumer stops and sets the flags back to BUSY for
54 * the new consumer.
55 *
56 * COALESCING is the same as IDLE except there are items being held in the hope
57 * that they can be coalesced with items that follow. The driver must arrange
58 * for a tx update or some other event that transmits all the held items in a
59 * timely manner if nothing else is enqueued.
60 */
61
62 union ring_state {
63 struct {
64 uint16_t pidx_head;
65 uint16_t pidx_tail;
66 uint16_t cidx;
67 uint16_t flags;
68 };
69 uint64_t state;
70 };
71
72 enum {
73 IDLE = 0, /* tx is all caught up, nothing to do. */
74 COALESCING, /* IDLE, but tx frames are being held for coalescing */
75 BUSY, /* consumer is running already, or will be shortly. */
76 TOO_BUSY, /* consumer is running and is beyond its budget */
77 TAKING_OVER, /* new consumer taking over from a TOO_BUSY consumer */
78 STALLED, /* consumer stopped due to lack of resources. */
79 };
80
81 enum {
82 C_FAST = 0,
83 C_2,
84 C_3,
85 C_TAKEOVER,
86 };
87
88 static inline uint16_t
space_available(struct mp_ring * r,union ring_state s)89 space_available(struct mp_ring *r, union ring_state s)
90 {
91 uint16_t x = r->size - 1;
92
93 if (s.cidx == s.pidx_head)
94 return (x);
95 else if (s.cidx > s.pidx_head)
96 return (s.cidx - s.pidx_head - 1);
97 else
98 return (x - s.pidx_head + s.cidx);
99 }
100
101 static inline uint16_t
increment_idx(struct mp_ring * r,uint16_t idx,uint16_t n)102 increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n)
103 {
104 int x = r->size - idx;
105
106 MPASS(x > 0);
107 return (x > n ? idx + n : n - x);
108 }
109
110 /*
111 * Consumer. Called with the consumer lock held and a guarantee that there is
112 * work to do.
113 */
114 static void
drain_ring(struct mp_ring * r,int budget)115 drain_ring(struct mp_ring *r, int budget)
116 {
117 union ring_state os, ns;
118 int n, pending, total;
119 uint16_t cidx;
120 uint16_t pidx;
121 bool coalescing;
122
123 mtx_assert(r->cons_lock, MA_OWNED);
124
125 os.state = atomic_load_acq_64(&r->state);
126 MPASS(os.flags == BUSY);
127
128 cidx = os.cidx;
129 pidx = os.pidx_tail;
130 MPASS(cidx != pidx);
131
132 pending = 0;
133 total = 0;
134
135 while (cidx != pidx) {
136
137 /* Items from cidx to pidx are available for consumption. */
138 n = r->drain(r, cidx, pidx, &coalescing);
139 if (n == 0) {
140 critical_enter();
141 os.state = atomic_load_64(&r->state);
142 do {
143 ns.state = os.state;
144 ns.cidx = cidx;
145
146 MPASS(os.flags == BUSY ||
147 os.flags == TOO_BUSY ||
148 os.flags == TAKING_OVER);
149
150 if (os.flags == TAKING_OVER)
151 ns.flags = BUSY;
152 else
153 ns.flags = STALLED;
154 } while (atomic_fcmpset_64(&r->state, &os.state,
155 ns.state) == 0);
156 critical_exit();
157 if (os.flags == TAKING_OVER)
158 counter_u64_add(r->abdications, 1);
159 else if (ns.flags == STALLED)
160 counter_u64_add(r->stalls, 1);
161 break;
162 }
163 cidx = increment_idx(r, cidx, n);
164 pending += n;
165 total += n;
166 counter_u64_add(r->consumed, n);
167
168 os.state = atomic_load_64(&r->state);
169 do {
170 MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
171 os.flags == TAKING_OVER);
172
173 ns.state = os.state;
174 ns.cidx = cidx;
175 if (__predict_false(os.flags == TAKING_OVER)) {
176 MPASS(total >= budget);
177 ns.flags = BUSY;
178 continue;
179 }
180 if (cidx == os.pidx_tail) {
181 ns.flags = coalescing ? COALESCING : IDLE;
182 continue;
183 }
184 if (total >= budget) {
185 ns.flags = TOO_BUSY;
186 continue;
187 }
188 MPASS(os.flags == BUSY);
189 if (pending < 32)
190 break;
191 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0);
192
193 if (__predict_false(os.flags == TAKING_OVER)) {
194 MPASS(ns.flags == BUSY);
195 counter_u64_add(r->abdications, 1);
196 break;
197 }
198
199 if (ns.flags == IDLE || ns.flags == COALESCING) {
200 MPASS(ns.pidx_tail == cidx);
201 if (ns.pidx_head != ns.pidx_tail)
202 counter_u64_add(r->cons_idle2, 1);
203 else
204 counter_u64_add(r->cons_idle, 1);
205 break;
206 }
207
208 /*
209 * The acquire style atomic above guarantees visibility of items
210 * associated with any pidx change that we notice here.
211 */
212 pidx = ns.pidx_tail;
213 pending = 0;
214 }
215
216 #ifdef INVARIANTS
217 if (os.flags == TAKING_OVER)
218 MPASS(ns.flags == BUSY);
219 else {
220 MPASS(ns.flags == IDLE || ns.flags == COALESCING ||
221 ns.flags == STALLED);
222 }
223 #endif
224 }
225
226 static void
drain_txpkts(struct mp_ring * r,union ring_state os,int budget)227 drain_txpkts(struct mp_ring *r, union ring_state os, int budget)
228 {
229 union ring_state ns;
230 uint16_t cidx = os.cidx;
231 uint16_t pidx = os.pidx_tail;
232 bool coalescing;
233
234 mtx_assert(r->cons_lock, MA_OWNED);
235 MPASS(os.flags == BUSY);
236 MPASS(cidx == pidx);
237
238 r->drain(r, cidx, pidx, &coalescing);
239 MPASS(coalescing == false);
240 critical_enter();
241 os.state = atomic_load_64(&r->state);
242 do {
243 ns.state = os.state;
244 MPASS(os.flags == BUSY);
245 MPASS(os.cidx == cidx);
246 if (ns.cidx == ns.pidx_tail)
247 ns.flags = IDLE;
248 else
249 ns.flags = BUSY;
250 } while (atomic_fcmpset_acq_64(&r->state, &os.state, ns.state) == 0);
251 critical_exit();
252
253 if (ns.flags == BUSY)
254 drain_ring(r, budget);
255 }
256
257 int
mp_ring_alloc(struct mp_ring ** pr,int size,void * cookie,ring_drain_t drain,ring_can_drain_t can_drain,struct malloc_type * mt,struct mtx * lck,int flags)258 mp_ring_alloc(struct mp_ring **pr, int size, void *cookie, ring_drain_t drain,
259 ring_can_drain_t can_drain, struct malloc_type *mt, struct mtx *lck,
260 int flags)
261 {
262 struct mp_ring *r;
263 int i;
264
265 /* All idx are 16b so size can be 65536 at most */
266 if (pr == NULL || size < 2 || size > 65536 || drain == NULL ||
267 can_drain == NULL)
268 return (EINVAL);
269 *pr = NULL;
270 flags &= M_NOWAIT | M_WAITOK;
271 MPASS(flags != 0);
272
273 r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO);
274 if (r == NULL)
275 return (ENOMEM);
276 r->size = size;
277 r->cookie = cookie;
278 r->mt = mt;
279 r->drain = drain;
280 r->can_drain = can_drain;
281 r->cons_lock = lck;
282 if ((r->dropped = counter_u64_alloc(flags)) == NULL)
283 goto failed;
284 for (i = 0; i < nitems(r->consumer); i++) {
285 if ((r->consumer[i] = counter_u64_alloc(flags)) == NULL)
286 goto failed;
287 }
288 if ((r->not_consumer = counter_u64_alloc(flags)) == NULL)
289 goto failed;
290 if ((r->abdications = counter_u64_alloc(flags)) == NULL)
291 goto failed;
292 if ((r->stalls = counter_u64_alloc(flags)) == NULL)
293 goto failed;
294 if ((r->consumed = counter_u64_alloc(flags)) == NULL)
295 goto failed;
296 if ((r->cons_idle = counter_u64_alloc(flags)) == NULL)
297 goto failed;
298 if ((r->cons_idle2 = counter_u64_alloc(flags)) == NULL)
299 goto failed;
300 *pr = r;
301 return (0);
302 failed:
303 mp_ring_free(r);
304 return (ENOMEM);
305 }
306
307 void
mp_ring_free(struct mp_ring * r)308 mp_ring_free(struct mp_ring *r)
309 {
310 int i;
311
312 if (r == NULL)
313 return;
314
315 if (r->dropped != NULL)
316 counter_u64_free(r->dropped);
317 for (i = 0; i < nitems(r->consumer); i++) {
318 if (r->consumer[i] != NULL)
319 counter_u64_free(r->consumer[i]);
320 }
321 if (r->not_consumer != NULL)
322 counter_u64_free(r->not_consumer);
323 if (r->abdications != NULL)
324 counter_u64_free(r->abdications);
325 if (r->stalls != NULL)
326 counter_u64_free(r->stalls);
327 if (r->consumed != NULL)
328 counter_u64_free(r->consumed);
329 if (r->cons_idle != NULL)
330 counter_u64_free(r->cons_idle);
331 if (r->cons_idle2 != NULL)
332 counter_u64_free(r->cons_idle2);
333
334 free(r, r->mt);
335 }
336
337 /*
338 * Enqueue n items and maybe drain the ring for some time.
339 *
340 * Returns an errno.
341 */
342 int
mp_ring_enqueue(struct mp_ring * r,void ** items,int n,int budget)343 mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
344 {
345 union ring_state os, ns;
346 uint16_t pidx_start, pidx_stop;
347 int i, nospc, cons;
348 bool consumer;
349
350 MPASS(items != NULL);
351 MPASS(n > 0);
352
353 /*
354 * Reserve room for the new items. Our reservation, if successful, is
355 * from 'pidx_start' to 'pidx_stop'.
356 */
357 nospc = 0;
358 os.state = atomic_load_64(&r->state);
359 for (;;) {
360 for (;;) {
361 if (__predict_true(space_available(r, os) >= n))
362 break;
363
364 /* Not enough room in the ring. */
365
366 MPASS(os.flags != IDLE);
367 MPASS(os.flags != COALESCING);
368 if (__predict_false(++nospc > 100)) {
369 counter_u64_add(r->dropped, n);
370 return (ENOBUFS);
371 }
372 if (os.flags == STALLED)
373 mp_ring_check_drainage(r, 64);
374 else
375 cpu_spinwait();
376 os.state = atomic_load_64(&r->state);
377 }
378
379 /* There is room in the ring. */
380
381 cons = -1;
382 ns.state = os.state;
383 ns.pidx_head = increment_idx(r, os.pidx_head, n);
384 if (os.flags == IDLE || os.flags == COALESCING) {
385 MPASS(os.pidx_tail == os.cidx);
386 if (os.pidx_head == os.pidx_tail) {
387 cons = C_FAST;
388 ns.pidx_tail = increment_idx(r, os.pidx_tail, n);
389 } else
390 cons = C_2;
391 ns.flags = BUSY;
392 } else if (os.flags == TOO_BUSY) {
393 cons = C_TAKEOVER;
394 ns.flags = TAKING_OVER;
395 }
396 critical_enter();
397 if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
398 break;
399 critical_exit();
400 cpu_spinwait();
401 };
402
403 pidx_start = os.pidx_head;
404 pidx_stop = ns.pidx_head;
405
406 if (cons == C_FAST) {
407 i = pidx_start;
408 do {
409 r->items[i] = *items++;
410 if (__predict_false(++i == r->size))
411 i = 0;
412 } while (i != pidx_stop);
413 critical_exit();
414 counter_u64_add(r->consumer[C_FAST], 1);
415 mtx_lock(r->cons_lock);
416 drain_ring(r, budget);
417 mtx_unlock(r->cons_lock);
418 return (0);
419 }
420
421 /*
422 * Wait for other producers who got in ahead of us to enqueue their
423 * items, one producer at a time. It is our turn when the ring's
424 * pidx_tail reaches the beginning of our reservation (pidx_start).
425 */
426 while (ns.pidx_tail != pidx_start) {
427 cpu_spinwait();
428 ns.state = atomic_load_64(&r->state);
429 }
430
431 /* Now it is our turn to fill up the area we reserved earlier. */
432 i = pidx_start;
433 do {
434 r->items[i] = *items++;
435 if (__predict_false(++i == r->size))
436 i = 0;
437 } while (i != pidx_stop);
438
439 /*
440 * Update the ring's pidx_tail. The release style atomic guarantees
441 * that the items are visible to any thread that sees the updated pidx.
442 */
443 os.state = atomic_load_64(&r->state);
444 do {
445 consumer = false;
446 ns.state = os.state;
447 ns.pidx_tail = pidx_stop;
448 if (os.flags == IDLE || os.flags == COALESCING ||
449 (os.flags == STALLED && r->can_drain(r))) {
450 MPASS(cons == -1);
451 consumer = true;
452 ns.flags = BUSY;
453 }
454 } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
455 critical_exit();
456
457 if (cons == -1) {
458 if (consumer)
459 cons = C_3;
460 else {
461 counter_u64_add(r->not_consumer, 1);
462 return (0);
463 }
464 }
465 MPASS(cons > C_FAST && cons < nitems(r->consumer));
466 counter_u64_add(r->consumer[cons], 1);
467 mtx_lock(r->cons_lock);
468 drain_ring(r, budget);
469 mtx_unlock(r->cons_lock);
470
471 return (0);
472 }
473
474 /*
475 * Enqueue n items but never drain the ring. Can be called
476 * to enqueue new items while draining the ring.
477 *
478 * Returns an errno.
479 */
480 int
mp_ring_enqueue_only(struct mp_ring * r,void ** items,int n)481 mp_ring_enqueue_only(struct mp_ring *r, void **items, int n)
482 {
483 union ring_state os, ns;
484 uint16_t pidx_start, pidx_stop;
485 int i;
486
487 MPASS(items != NULL);
488 MPASS(n > 0);
489
490 /*
491 * Reserve room for the new items. Our reservation, if successful, is
492 * from 'pidx_start' to 'pidx_stop'.
493 */
494 os.state = atomic_load_64(&r->state);
495
496 /* Should only be used from the drain callback. */
497 MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
498 os.flags == TAKING_OVER);
499
500 for (;;) {
501 if (__predict_false(space_available(r, os) < n)) {
502 /* Not enough room in the ring. */
503 counter_u64_add(r->dropped, n);
504 return (ENOBUFS);
505 }
506
507 /* There is room in the ring. */
508
509 ns.state = os.state;
510 ns.pidx_head = increment_idx(r, os.pidx_head, n);
511 critical_enter();
512 if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
513 break;
514 critical_exit();
515 cpu_spinwait();
516 };
517
518 pidx_start = os.pidx_head;
519 pidx_stop = ns.pidx_head;
520
521 /*
522 * Wait for other producers who got in ahead of us to enqueue their
523 * items, one producer at a time. It is our turn when the ring's
524 * pidx_tail reaches the beginning of our reservation (pidx_start).
525 */
526 while (ns.pidx_tail != pidx_start) {
527 cpu_spinwait();
528 ns.state = atomic_load_64(&r->state);
529 }
530
531 /* Now it is our turn to fill up the area we reserved earlier. */
532 i = pidx_start;
533 do {
534 r->items[i] = *items++;
535 if (__predict_false(++i == r->size))
536 i = 0;
537 } while (i != pidx_stop);
538
539 /*
540 * Update the ring's pidx_tail. The release style atomic guarantees
541 * that the items are visible to any thread that sees the updated pidx.
542 */
543 os.state = atomic_load_64(&r->state);
544 do {
545 ns.state = os.state;
546 ns.pidx_tail = pidx_stop;
547 } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
548 critical_exit();
549
550 counter_u64_add(r->not_consumer, 1);
551 return (0);
552 }
553
554 void
mp_ring_check_drainage(struct mp_ring * r,int budget)555 mp_ring_check_drainage(struct mp_ring *r, int budget)
556 {
557 union ring_state os, ns;
558
559 os.state = atomic_load_64(&r->state);
560 if (os.flags == STALLED && r->can_drain(r)) {
561 MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */
562 ns.state = os.state;
563 ns.flags = BUSY;
564 if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) {
565 mtx_lock(r->cons_lock);
566 drain_ring(r, budget);
567 mtx_unlock(r->cons_lock);
568 }
569 } else if (os.flags == COALESCING) {
570 MPASS(os.cidx == os.pidx_tail);
571 ns.state = os.state;
572 ns.flags = BUSY;
573 if (atomic_cmpset_acq_64(&r->state, os.state, ns.state)) {
574 mtx_lock(r->cons_lock);
575 drain_txpkts(r, ns, budget);
576 mtx_unlock(r->cons_lock);
577 }
578 }
579 }
580
581 void
mp_ring_reset_stats(struct mp_ring * r)582 mp_ring_reset_stats(struct mp_ring *r)
583 {
584 int i;
585
586 counter_u64_zero(r->dropped);
587 for (i = 0; i < nitems(r->consumer); i++)
588 counter_u64_zero(r->consumer[i]);
589 counter_u64_zero(r->not_consumer);
590 counter_u64_zero(r->abdications);
591 counter_u64_zero(r->stalls);
592 counter_u64_zero(r->consumed);
593 counter_u64_zero(r->cons_idle);
594 counter_u64_zero(r->cons_idle2);
595 }
596
597 bool
mp_ring_is_idle(struct mp_ring * r)598 mp_ring_is_idle(struct mp_ring *r)
599 {
600 union ring_state s;
601
602 s.state = atomic_load_64(&r->state);
603 if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx &&
604 s.flags == IDLE)
605 return (true);
606
607 return (false);
608 }
609
610 void
mp_ring_sysctls(struct mp_ring * r,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * children)611 mp_ring_sysctls(struct mp_ring *r, struct sysctl_ctx_list *ctx,
612 struct sysctl_oid_list *children)
613 {
614 struct sysctl_oid *oid;
615
616 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mp_ring", CTLFLAG_RD |
617 CTLFLAG_MPSAFE, NULL, "mp_ring statistics");
618 children = SYSCTL_CHILDREN(oid);
619
620 SYSCTL_ADD_U64(ctx, children, OID_AUTO, "state", CTLFLAG_RD,
621 __DEVOLATILE(uint64_t *, &r->state), 0, "ring state");
622 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "dropped", CTLFLAG_RD,
623 &r->dropped, "# of items dropped");
624 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "consumed",
625 CTLFLAG_RD, &r->consumed, "# of items consumed");
626 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "fast_consumer",
627 CTLFLAG_RD, &r->consumer[C_FAST],
628 "# of times producer became consumer (fast)");
629 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "consumer2",
630 CTLFLAG_RD, &r->consumer[C_2],
631 "# of times producer became consumer (2)");
632 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "consumer3",
633 CTLFLAG_RD, &r->consumer[C_3],
634 "# of times producer became consumer (3)");
635 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "takeovers",
636 CTLFLAG_RD, &r->consumer[C_TAKEOVER],
637 "# of times producer took over from another consumer.");
638 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "not_consumer",
639 CTLFLAG_RD, &r->not_consumer,
640 "# of times producer did not become consumer");
641 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "abdications",
642 CTLFLAG_RD, &r->abdications, "# of consumer abdications");
643 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "stalls",
644 CTLFLAG_RD, &r->stalls, "# of consumer stalls");
645 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cons_idle",
646 CTLFLAG_RD, &r->cons_idle,
647 "# of times consumer ran fully to completion");
648 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cons_idle2",
649 CTLFLAG_RD, &r->cons_idle2,
650 "# of times consumer idled when another enqueue was in progress");
651 }
652