1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_BREADCRUMBS_TYPES__
7 #define __INTEL_BREADCRUMBS_TYPES__
8 
9 #include <linux/irq_work.h>
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <linux/types.h>
13 
14 /*
15  * Rather than have every client wait upon all user interrupts,
16  * with the herd waking after every interrupt and each doing the
17  * heavyweight seqno dance, we delegate the task (of being the
18  * bottom-half of the user interrupt) to the first client. After
19  * every interrupt, we wake up one client, who does the heavyweight
20  * coherent seqno read and either goes back to sleep (if incomplete),
21  * or wakes up all the completed clients in parallel, before then
22  * transferring the bottom-half status to the next client in the queue.
23  *
24  * Compared to walking the entire list of waiters in a single dedicated
25  * bottom-half, we reduce the latency of the first waiter by avoiding
26  * a context switch, but incur additional coherent seqno reads when
27  * following the chain of request breadcrumbs. Since it is most likely
28  * that we have a single client waiting on each seqno, then reducing
29  * the overhead of waking that client is much preferred.
30  */
31 struct intel_breadcrumbs {
32 	/* Not all breadcrumbs are attached to physical HW */
33 	struct intel_engine_cs *irq_engine;
34 
35 	spinlock_t signalers_lock; /* protects the list of signalers */
36 	struct list_head signalers;
37 	struct llist_head signaled_requests;
38 
39 	spinlock_t irq_lock; /* protects the interrupt from hardirq context */
40 	struct irq_work irq_work; /* for use from inside irq_lock */
41 	unsigned int irq_enabled;
42 	bool irq_armed;
43 };
44 
45 #endif /* __INTEL_BREADCRUMBS_TYPES__ */
46