xref: /kvmtool/riscv/plic.c (revision 762224e47cc2f8f5192d014caee3315ad4129fc9)
1*762224e4SAnup Patel 
2*762224e4SAnup Patel #include "kvm/devices.h"
3*762224e4SAnup Patel #include "kvm/ioeventfd.h"
4*762224e4SAnup Patel #include "kvm/ioport.h"
5*762224e4SAnup Patel #include "kvm/kvm.h"
6*762224e4SAnup Patel #include "kvm/kvm-cpu.h"
7*762224e4SAnup Patel #include "kvm/irq.h"
8*762224e4SAnup Patel #include "kvm/mutex.h"
9*762224e4SAnup Patel 
10*762224e4SAnup Patel #include <linux/byteorder.h>
11*762224e4SAnup Patel #include <linux/kernel.h>
12*762224e4SAnup Patel #include <linux/kvm.h>
13*762224e4SAnup Patel #include <linux/sizes.h>
14*762224e4SAnup Patel 
15*762224e4SAnup Patel /*
16*762224e4SAnup Patel  * From the RISC-V Privlidged Spec v1.10:
17*762224e4SAnup Patel  *
18*762224e4SAnup Patel  * Global interrupt sources are assigned small unsigned integer identifiers,
19*762224e4SAnup Patel  * beginning at the value 1.  An interrupt ID of 0 is reserved to mean no
20*762224e4SAnup Patel  * interrupt.  Interrupt identifiers are also used to break ties when two or
21*762224e4SAnup Patel  * more interrupt sources have the same assigned priority. Smaller values of
22*762224e4SAnup Patel  * interrupt ID take precedence over larger values of interrupt ID.
23*762224e4SAnup Patel  *
24*762224e4SAnup Patel  * While the RISC-V supervisor spec doesn't define the maximum number of
25*762224e4SAnup Patel  * devices supported by the PLIC, the largest number supported by devices
26*762224e4SAnup Patel  * marked as 'riscv,plic0' (which is the only device type this driver supports,
27*762224e4SAnup Patel  * and is the only extant PLIC as of now) is 1024.  As mentioned above, device
28*762224e4SAnup Patel  * 0 is defined to be non-existant so this device really only supports 1023
29*762224e4SAnup Patel  * devices.
30*762224e4SAnup Patel  */
31*762224e4SAnup Patel 
32*762224e4SAnup Patel #define MAX_DEVICES	1024
33*762224e4SAnup Patel #define MAX_CONTEXTS	15872
34*762224e4SAnup Patel 
35*762224e4SAnup Patel /*
36*762224e4SAnup Patel  * The PLIC consists of memory-mapped control registers, with a memory map as
37*762224e4SAnup Patel  * follows:
38*762224e4SAnup Patel  *
39*762224e4SAnup Patel  * base + 0x000000: Reserved (interrupt source 0 does not exist)
40*762224e4SAnup Patel  * base + 0x000004: Interrupt source 1 priority
41*762224e4SAnup Patel  * base + 0x000008: Interrupt source 2 priority
42*762224e4SAnup Patel  * ...
43*762224e4SAnup Patel  * base + 0x000FFC: Interrupt source 1023 priority
44*762224e4SAnup Patel  * base + 0x001000: Pending 0
45*762224e4SAnup Patel  * base + 0x001FFF: Pending
46*762224e4SAnup Patel  * base + 0x002000: Enable bits for sources 0-31 on context 0
47*762224e4SAnup Patel  * base + 0x002004: Enable bits for sources 32-63 on context 0
48*762224e4SAnup Patel  * ...
49*762224e4SAnup Patel  * base + 0x0020FC: Enable bits for sources 992-1023 on context 0
50*762224e4SAnup Patel  * base + 0x002080: Enable bits for sources 0-31 on context 1
51*762224e4SAnup Patel  * ...
52*762224e4SAnup Patel  * base + 0x002100: Enable bits for sources 0-31 on context 2
53*762224e4SAnup Patel  * ...
54*762224e4SAnup Patel  * base + 0x1F1F80: Enable bits for sources 992-1023 on context 15871
55*762224e4SAnup Patel  * base + 0x1F1F84: Reserved
56*762224e4SAnup Patel  * ...		    (higher context IDs would fit here, but wouldn't fit
57*762224e4SAnup Patel  *		     inside the per-context priority vector)
58*762224e4SAnup Patel  * base + 0x1FFFFC: Reserved
59*762224e4SAnup Patel  * base + 0x200000: Priority threshold for context 0
60*762224e4SAnup Patel  * base + 0x200004: Claim/complete for context 0
61*762224e4SAnup Patel  * base + 0x200008: Reserved
62*762224e4SAnup Patel  * ...
63*762224e4SAnup Patel  * base + 0x200FFC: Reserved
64*762224e4SAnup Patel  * base + 0x201000: Priority threshold for context 1
65*762224e4SAnup Patel  * base + 0x201004: Claim/complete for context 1
66*762224e4SAnup Patel  * ...
67*762224e4SAnup Patel  * base + 0xFFE000: Priority threshold for context 15871
68*762224e4SAnup Patel  * base + 0xFFE004: Claim/complete for context 15871
69*762224e4SAnup Patel  * base + 0xFFE008: Reserved
70*762224e4SAnup Patel  * ...
71*762224e4SAnup Patel  * base + 0xFFFFFC: Reserved
72*762224e4SAnup Patel  */
73*762224e4SAnup Patel 
74*762224e4SAnup Patel /* Each interrupt source has a priority register associated with it. */
75*762224e4SAnup Patel #define PRIORITY_BASE		0
76*762224e4SAnup Patel #define PRIORITY_PER_ID		4
77*762224e4SAnup Patel 
78*762224e4SAnup Patel /*
79*762224e4SAnup Patel  * Each hart context has a vector of interupt enable bits associated with it.
80*762224e4SAnup Patel  * There's one bit for each interrupt source.
81*762224e4SAnup Patel  */
82*762224e4SAnup Patel #define ENABLE_BASE		0x2000
83*762224e4SAnup Patel #define ENABLE_PER_HART		0x80
84*762224e4SAnup Patel 
85*762224e4SAnup Patel /*
86*762224e4SAnup Patel  * Each hart context has a set of control registers associated with it.  Right
87*762224e4SAnup Patel  * now there's only two: a source priority threshold over which the hart will
88*762224e4SAnup Patel  * take an interrupt, and a register to claim interrupts.
89*762224e4SAnup Patel  */
90*762224e4SAnup Patel #define CONTEXT_BASE		0x200000
91*762224e4SAnup Patel #define CONTEXT_PER_HART	0x1000
92*762224e4SAnup Patel #define CONTEXT_THRESHOLD	0
93*762224e4SAnup Patel #define CONTEXT_CLAIM		4
94*762224e4SAnup Patel 
95*762224e4SAnup Patel #define REG_SIZE		0x1000000
96*762224e4SAnup Patel 
97*762224e4SAnup Patel struct plic_state;
98*762224e4SAnup Patel 
99*762224e4SAnup Patel struct plic_context {
100*762224e4SAnup Patel 	/* State to which this belongs */
101*762224e4SAnup Patel 	struct plic_state *s;
102*762224e4SAnup Patel 
103*762224e4SAnup Patel 	/* Static Configuration */
104*762224e4SAnup Patel 	u32 num;
105*762224e4SAnup Patel 	struct kvm_cpu *vcpu;
106*762224e4SAnup Patel 
107*762224e4SAnup Patel 	/* Local IRQ state */
108*762224e4SAnup Patel 	struct mutex irq_lock;
109*762224e4SAnup Patel 	u8 irq_priority_threshold;
110*762224e4SAnup Patel 	u32 irq_enable[MAX_DEVICES/32];
111*762224e4SAnup Patel 	u32 irq_pending[MAX_DEVICES/32];
112*762224e4SAnup Patel 	u8 irq_pending_priority[MAX_DEVICES];
113*762224e4SAnup Patel 	u32 irq_claimed[MAX_DEVICES/32];
114*762224e4SAnup Patel 	u32 irq_autoclear[MAX_DEVICES/32];
115*762224e4SAnup Patel };
116*762224e4SAnup Patel 
117*762224e4SAnup Patel struct plic_state {
118*762224e4SAnup Patel 	bool ready;
119*762224e4SAnup Patel 	struct kvm *kvm;
120*762224e4SAnup Patel 	struct device_header dev_hdr;
121*762224e4SAnup Patel 
122*762224e4SAnup Patel 	/* Static Configuration */
123*762224e4SAnup Patel 	u32 num_irq;
124*762224e4SAnup Patel 	u32 num_irq_word;
125*762224e4SAnup Patel 	u32 max_prio;
126*762224e4SAnup Patel 
127*762224e4SAnup Patel 	/* Context Array */
128*762224e4SAnup Patel 	u32 num_context;
129*762224e4SAnup Patel 	struct plic_context *contexts;
130*762224e4SAnup Patel 
131*762224e4SAnup Patel 	/* Global IRQ state */
132*762224e4SAnup Patel 	struct mutex irq_lock;
133*762224e4SAnup Patel 	u8 irq_priority[MAX_DEVICES];
134*762224e4SAnup Patel 	u32 irq_level[MAX_DEVICES/32];
135*762224e4SAnup Patel };
136*762224e4SAnup Patel 
137*762224e4SAnup Patel static struct plic_state plic;
138*762224e4SAnup Patel 
139*762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
140*762224e4SAnup Patel static u32 __plic_context_best_pending_irq(struct plic_state *s,
141*762224e4SAnup Patel 					   struct plic_context *c)
142*762224e4SAnup Patel {
143*762224e4SAnup Patel 	u8 best_irq_prio = 0;
144*762224e4SAnup Patel 	u32 i, j, irq, best_irq = 0;
145*762224e4SAnup Patel 
146*762224e4SAnup Patel 	for (i = 0; i < s->num_irq_word; i++) {
147*762224e4SAnup Patel 		if (!c->irq_pending[i])
148*762224e4SAnup Patel 			continue;
149*762224e4SAnup Patel 
150*762224e4SAnup Patel 		for (j = 0; j < 32; j++) {
151*762224e4SAnup Patel 			irq = i * 32 + j;
152*762224e4SAnup Patel 			if ((s->num_irq <= irq) ||
153*762224e4SAnup Patel 			    !(c->irq_pending[i] & (1 << j)) ||
154*762224e4SAnup Patel 			    (c->irq_claimed[i] & (1 << j)))
155*762224e4SAnup Patel 				continue;
156*762224e4SAnup Patel 
157*762224e4SAnup Patel 			if (!best_irq ||
158*762224e4SAnup Patel 			    (best_irq_prio < c->irq_pending_priority[irq])) {
159*762224e4SAnup Patel 				best_irq = irq;
160*762224e4SAnup Patel 				best_irq_prio = c->irq_pending_priority[irq];
161*762224e4SAnup Patel 			}
162*762224e4SAnup Patel 		}
163*762224e4SAnup Patel 	}
164*762224e4SAnup Patel 
165*762224e4SAnup Patel 	return best_irq;
166*762224e4SAnup Patel }
167*762224e4SAnup Patel 
168*762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
169*762224e4SAnup Patel static void __plic_context_irq_update(struct plic_state *s,
170*762224e4SAnup Patel 				      struct plic_context *c)
171*762224e4SAnup Patel {
172*762224e4SAnup Patel 	u32 best_irq = __plic_context_best_pending_irq(s, c);
173*762224e4SAnup Patel 	u32 virq = (best_irq) ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
174*762224e4SAnup Patel 
175*762224e4SAnup Patel 	if (ioctl(c->vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
176*762224e4SAnup Patel 		pr_warning("KVM_INTERRUPT failed");
177*762224e4SAnup Patel }
178*762224e4SAnup Patel 
179*762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
180*762224e4SAnup Patel static u32 __plic_context_irq_claim(struct plic_state *s,
181*762224e4SAnup Patel 				    struct plic_context *c)
182*762224e4SAnup Patel {
183*762224e4SAnup Patel 	u32 virq = KVM_INTERRUPT_UNSET;
184*762224e4SAnup Patel 	u32 best_irq = __plic_context_best_pending_irq(s, c);
185*762224e4SAnup Patel 	u32 best_irq_word = best_irq / 32;
186*762224e4SAnup Patel 	u32 best_irq_mask = (1 << (best_irq % 32));
187*762224e4SAnup Patel 
188*762224e4SAnup Patel 	if (ioctl(c->vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
189*762224e4SAnup Patel 		pr_warning("KVM_INTERRUPT failed");
190*762224e4SAnup Patel 
191*762224e4SAnup Patel 	if (best_irq) {
192*762224e4SAnup Patel 		if (c->irq_autoclear[best_irq_word] & best_irq_mask) {
193*762224e4SAnup Patel 			c->irq_pending[best_irq_word] &= ~best_irq_mask;
194*762224e4SAnup Patel 			c->irq_pending_priority[best_irq] = 0;
195*762224e4SAnup Patel 			c->irq_claimed[best_irq_word] &= ~best_irq_mask;
196*762224e4SAnup Patel 			c->irq_autoclear[best_irq_word] &= ~best_irq_mask;
197*762224e4SAnup Patel 		} else
198*762224e4SAnup Patel 			c->irq_claimed[best_irq_word] |= best_irq_mask;
199*762224e4SAnup Patel 	}
200*762224e4SAnup Patel 
201*762224e4SAnup Patel 	__plic_context_irq_update(s, c);
202*762224e4SAnup Patel 
203*762224e4SAnup Patel 	return best_irq;
204*762224e4SAnup Patel }
205*762224e4SAnup Patel 
206*762224e4SAnup Patel void plic__irq_trig(struct kvm *kvm, int irq, int level, bool edge)
207*762224e4SAnup Patel {
208*762224e4SAnup Patel 	bool irq_marked = false;
209*762224e4SAnup Patel 	u8 i, irq_prio, irq_word;
210*762224e4SAnup Patel 	u32 irq_mask;
211*762224e4SAnup Patel 	struct plic_context *c = NULL;
212*762224e4SAnup Patel 	struct plic_state *s = &plic;
213*762224e4SAnup Patel 
214*762224e4SAnup Patel 	if (!s->ready)
215*762224e4SAnup Patel 		return;
216*762224e4SAnup Patel 
217*762224e4SAnup Patel 	if (irq <= 0 || s->num_irq <= (u32)irq)
218*762224e4SAnup Patel 		goto done;
219*762224e4SAnup Patel 
220*762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
221*762224e4SAnup Patel 
222*762224e4SAnup Patel 	irq_prio = s->irq_priority[irq];
223*762224e4SAnup Patel 	irq_word = irq / 32;
224*762224e4SAnup Patel 	irq_mask = 1 << (irq % 32);
225*762224e4SAnup Patel 
226*762224e4SAnup Patel 	if (level)
227*762224e4SAnup Patel 		s->irq_level[irq_word] |= irq_mask;
228*762224e4SAnup Patel 	else
229*762224e4SAnup Patel 		s->irq_level[irq_word] &= ~irq_mask;
230*762224e4SAnup Patel 
231*762224e4SAnup Patel 	/*
232*762224e4SAnup Patel 	 * Note: PLIC interrupts are level-triggered. As of now,
233*762224e4SAnup Patel 	 * there is no notion of edge-triggered interrupts. To
234*762224e4SAnup Patel 	 * handle this we auto-clear edge-triggered interrupts
235*762224e4SAnup Patel 	 * when PLIC context CLAIM register is read.
236*762224e4SAnup Patel 	 */
237*762224e4SAnup Patel 	for (i = 0; i < s->num_context; i++) {
238*762224e4SAnup Patel 		c = &s->contexts[i];
239*762224e4SAnup Patel 
240*762224e4SAnup Patel 		mutex_lock(&c->irq_lock);
241*762224e4SAnup Patel 		if (c->irq_enable[irq_word] & irq_mask) {
242*762224e4SAnup Patel 			if (level) {
243*762224e4SAnup Patel 				c->irq_pending[irq_word] |= irq_mask;
244*762224e4SAnup Patel 				c->irq_pending_priority[irq] = irq_prio;
245*762224e4SAnup Patel 				if (edge)
246*762224e4SAnup Patel 					c->irq_autoclear[irq_word] |= irq_mask;
247*762224e4SAnup Patel 			} else {
248*762224e4SAnup Patel 				c->irq_pending[irq_word] &= ~irq_mask;
249*762224e4SAnup Patel 				c->irq_pending_priority[irq] = 0;
250*762224e4SAnup Patel 				c->irq_claimed[irq_word] &= ~irq_mask;
251*762224e4SAnup Patel 				c->irq_autoclear[irq_word] &= ~irq_mask;
252*762224e4SAnup Patel 			}
253*762224e4SAnup Patel 			__plic_context_irq_update(s, c);
254*762224e4SAnup Patel 			irq_marked = true;
255*762224e4SAnup Patel 		}
256*762224e4SAnup Patel 		mutex_unlock(&c->irq_lock);
257*762224e4SAnup Patel 
258*762224e4SAnup Patel 		if (irq_marked)
259*762224e4SAnup Patel 			break;
260*762224e4SAnup Patel 	}
261*762224e4SAnup Patel 
262*762224e4SAnup Patel done:
263*762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
264*762224e4SAnup Patel }
265*762224e4SAnup Patel 
266*762224e4SAnup Patel static void plic__priority_read(struct plic_state *s,
267*762224e4SAnup Patel 				u64 offset, void *data)
268*762224e4SAnup Patel {
269*762224e4SAnup Patel 	u32 irq = (offset >> 2);
270*762224e4SAnup Patel 
271*762224e4SAnup Patel 	if (irq == 0 || irq >= s->num_irq)
272*762224e4SAnup Patel 		return;
273*762224e4SAnup Patel 
274*762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
275*762224e4SAnup Patel 	ioport__write32(data, s->irq_priority[irq]);
276*762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
277*762224e4SAnup Patel }
278*762224e4SAnup Patel 
279*762224e4SAnup Patel static void plic__priority_write(struct plic_state *s,
280*762224e4SAnup Patel 				 u64 offset, void *data)
281*762224e4SAnup Patel {
282*762224e4SAnup Patel 	u32 val, irq = (offset >> 2);
283*762224e4SAnup Patel 
284*762224e4SAnup Patel 	if (irq == 0 || irq >= s->num_irq)
285*762224e4SAnup Patel 		return;
286*762224e4SAnup Patel 
287*762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
288*762224e4SAnup Patel 	val = ioport__read32(data);
289*762224e4SAnup Patel 	val &= ((1 << PRIORITY_PER_ID) - 1);
290*762224e4SAnup Patel 	s->irq_priority[irq] = val;
291*762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
292*762224e4SAnup Patel }
293*762224e4SAnup Patel 
294*762224e4SAnup Patel static void plic__context_enable_read(struct plic_state *s,
295*762224e4SAnup Patel 				      struct plic_context *c,
296*762224e4SAnup Patel 				      u64 offset, void *data)
297*762224e4SAnup Patel {
298*762224e4SAnup Patel 	u32 irq_word = offset >> 2;
299*762224e4SAnup Patel 
300*762224e4SAnup Patel 	if (s->num_irq_word < irq_word)
301*762224e4SAnup Patel 		return;
302*762224e4SAnup Patel 
303*762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
304*762224e4SAnup Patel 	ioport__write32(data, c->irq_enable[irq_word]);
305*762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
306*762224e4SAnup Patel }
307*762224e4SAnup Patel 
308*762224e4SAnup Patel static void plic__context_enable_write(struct plic_state *s,
309*762224e4SAnup Patel 				       struct plic_context *c,
310*762224e4SAnup Patel 				       u64 offset, void *data)
311*762224e4SAnup Patel {
312*762224e4SAnup Patel 	u8 irq_prio;
313*762224e4SAnup Patel 	u32 i, irq, irq_mask;
314*762224e4SAnup Patel 	u32 irq_word = offset >> 2;
315*762224e4SAnup Patel 	u32 old_val, new_val, xor_val;
316*762224e4SAnup Patel 
317*762224e4SAnup Patel 	if (s->num_irq_word < irq_word)
318*762224e4SAnup Patel 		return;
319*762224e4SAnup Patel 
320*762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
321*762224e4SAnup Patel 
322*762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
323*762224e4SAnup Patel 
324*762224e4SAnup Patel 	old_val = c->irq_enable[irq_word];
325*762224e4SAnup Patel 	new_val = ioport__read32(data);
326*762224e4SAnup Patel 
327*762224e4SAnup Patel 	if (irq_word == 0)
328*762224e4SAnup Patel 		new_val &= ~0x1;
329*762224e4SAnup Patel 
330*762224e4SAnup Patel 	c->irq_enable[irq_word] = new_val;
331*762224e4SAnup Patel 
332*762224e4SAnup Patel 	xor_val = old_val ^ new_val;
333*762224e4SAnup Patel 	for (i = 0; i < 32; i++) {
334*762224e4SAnup Patel 		irq = irq_word * 32 + i;
335*762224e4SAnup Patel 		irq_mask = 1 << i;
336*762224e4SAnup Patel 		irq_prio = s->irq_priority[irq];
337*762224e4SAnup Patel 		if (!(xor_val & irq_mask))
338*762224e4SAnup Patel 			continue;
339*762224e4SAnup Patel 		if ((new_val & irq_mask) &&
340*762224e4SAnup Patel 		    (s->irq_level[irq_word] & irq_mask)) {
341*762224e4SAnup Patel 			c->irq_pending[irq_word] |= irq_mask;
342*762224e4SAnup Patel 			c->irq_pending_priority[irq] = irq_prio;
343*762224e4SAnup Patel 		} else if (!(new_val & irq_mask)) {
344*762224e4SAnup Patel 			c->irq_pending[irq_word] &= ~irq_mask;
345*762224e4SAnup Patel 			c->irq_pending_priority[irq] = 0;
346*762224e4SAnup Patel 			c->irq_claimed[irq_word] &= ~irq_mask;
347*762224e4SAnup Patel 		}
348*762224e4SAnup Patel 	}
349*762224e4SAnup Patel 
350*762224e4SAnup Patel 	__plic_context_irq_update(s, c);
351*762224e4SAnup Patel 
352*762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
353*762224e4SAnup Patel 
354*762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
355*762224e4SAnup Patel }
356*762224e4SAnup Patel 
357*762224e4SAnup Patel static void plic__context_read(struct plic_state *s,
358*762224e4SAnup Patel 			       struct plic_context *c,
359*762224e4SAnup Patel 			       u64 offset, void *data)
360*762224e4SAnup Patel {
361*762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
362*762224e4SAnup Patel 
363*762224e4SAnup Patel 	switch (offset) {
364*762224e4SAnup Patel 	case CONTEXT_THRESHOLD:
365*762224e4SAnup Patel 		ioport__write32(data, c->irq_priority_threshold);
366*762224e4SAnup Patel 		break;
367*762224e4SAnup Patel 	case CONTEXT_CLAIM:
368*762224e4SAnup Patel 		ioport__write32(data, __plic_context_irq_claim(s, c));
369*762224e4SAnup Patel 		break;
370*762224e4SAnup Patel 	default:
371*762224e4SAnup Patel 		break;
372*762224e4SAnup Patel 	};
373*762224e4SAnup Patel 
374*762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
375*762224e4SAnup Patel }
376*762224e4SAnup Patel 
377*762224e4SAnup Patel static void plic__context_write(struct plic_state *s,
378*762224e4SAnup Patel 				struct plic_context *c,
379*762224e4SAnup Patel 				u64 offset, void *data)
380*762224e4SAnup Patel {
381*762224e4SAnup Patel 	u32 val, irq_word, irq_mask;
382*762224e4SAnup Patel 	bool irq_update = false;
383*762224e4SAnup Patel 
384*762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
385*762224e4SAnup Patel 
386*762224e4SAnup Patel 	switch (offset) {
387*762224e4SAnup Patel 	case CONTEXT_THRESHOLD:
388*762224e4SAnup Patel 		val = ioport__read32(data);
389*762224e4SAnup Patel 		val &= ((1 << PRIORITY_PER_ID) - 1);
390*762224e4SAnup Patel 		if (val <= s->max_prio)
391*762224e4SAnup Patel 			c->irq_priority_threshold = val;
392*762224e4SAnup Patel 		else
393*762224e4SAnup Patel 			irq_update = true;
394*762224e4SAnup Patel 		break;
395*762224e4SAnup Patel 	case CONTEXT_CLAIM:
396*762224e4SAnup Patel 		val = ioport__read32(data);
397*762224e4SAnup Patel 		irq_word = val / 32;
398*762224e4SAnup Patel 		irq_mask = 1 << (val % 32);
399*762224e4SAnup Patel 		if ((val < plic.num_irq) &&
400*762224e4SAnup Patel 		    (c->irq_enable[irq_word] & irq_mask)) {
401*762224e4SAnup Patel 			c->irq_claimed[irq_word] &= ~irq_mask;
402*762224e4SAnup Patel 			irq_update = true;
403*762224e4SAnup Patel 		}
404*762224e4SAnup Patel 		break;
405*762224e4SAnup Patel 	default:
406*762224e4SAnup Patel 		irq_update = true;
407*762224e4SAnup Patel 		break;
408*762224e4SAnup Patel 	};
409*762224e4SAnup Patel 
410*762224e4SAnup Patel 	if (irq_update)
411*762224e4SAnup Patel 		__plic_context_irq_update(s, c);
412*762224e4SAnup Patel 
413*762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
414*762224e4SAnup Patel }
415*762224e4SAnup Patel 
416*762224e4SAnup Patel static void plic__mmio_callback(struct kvm_cpu *vcpu,
417*762224e4SAnup Patel 				u64 addr, u8 *data, u32 len,
418*762224e4SAnup Patel 				u8 is_write, void *ptr)
419*762224e4SAnup Patel {
420*762224e4SAnup Patel 	u32 cntx;
421*762224e4SAnup Patel 	struct plic_state *s = ptr;
422*762224e4SAnup Patel 
423*762224e4SAnup Patel 	if (len != 4)
424*762224e4SAnup Patel 		die("plic: invalid len=%d", len);
425*762224e4SAnup Patel 
426*762224e4SAnup Patel 	addr &= ~0x3;
427*762224e4SAnup Patel 	addr -= RISCV_PLIC;
428*762224e4SAnup Patel 
429*762224e4SAnup Patel 	if (is_write) {
430*762224e4SAnup Patel 		if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) {
431*762224e4SAnup Patel 			plic__priority_write(s, addr, data);
432*762224e4SAnup Patel 		} else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) {
433*762224e4SAnup Patel 			cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART;
434*762224e4SAnup Patel 			addr -= cntx * ENABLE_PER_HART + ENABLE_BASE;
435*762224e4SAnup Patel 			if (cntx < s->num_context)
436*762224e4SAnup Patel 				plic__context_enable_write(s,
437*762224e4SAnup Patel 							   &s->contexts[cntx],
438*762224e4SAnup Patel 							   addr, data);
439*762224e4SAnup Patel 		} else if (CONTEXT_BASE <= addr && addr < REG_SIZE) {
440*762224e4SAnup Patel 			cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART;
441*762224e4SAnup Patel 			addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE;
442*762224e4SAnup Patel 			if (cntx < s->num_context)
443*762224e4SAnup Patel 				plic__context_write(s, &s->contexts[cntx],
444*762224e4SAnup Patel 						    addr, data);
445*762224e4SAnup Patel 		}
446*762224e4SAnup Patel 	} else {
447*762224e4SAnup Patel 		if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) {
448*762224e4SAnup Patel 			plic__priority_read(s, addr, data);
449*762224e4SAnup Patel 		} else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) {
450*762224e4SAnup Patel 			cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART;
451*762224e4SAnup Patel 			addr -= cntx * ENABLE_PER_HART + ENABLE_BASE;
452*762224e4SAnup Patel 			if (cntx < s->num_context)
453*762224e4SAnup Patel 				plic__context_enable_read(s,
454*762224e4SAnup Patel 							  &s->contexts[cntx],
455*762224e4SAnup Patel 							  addr, data);
456*762224e4SAnup Patel 		} else if (CONTEXT_BASE <= addr && addr < REG_SIZE) {
457*762224e4SAnup Patel 			cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART;
458*762224e4SAnup Patel 			addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE;
459*762224e4SAnup Patel 			if (cntx < s->num_context)
460*762224e4SAnup Patel 				plic__context_read(s, &s->contexts[cntx],
461*762224e4SAnup Patel 						   addr, data);
462*762224e4SAnup Patel 		}
463*762224e4SAnup Patel 	}
464*762224e4SAnup Patel }
465*762224e4SAnup Patel 
466*762224e4SAnup Patel static int plic__init(struct kvm *kvm)
467*762224e4SAnup Patel {
468*762224e4SAnup Patel 	u32 i;
469*762224e4SAnup Patel 	int ret;
470*762224e4SAnup Patel 	struct plic_context *c;
471*762224e4SAnup Patel 
472*762224e4SAnup Patel 	plic.kvm = kvm;
473*762224e4SAnup Patel 	plic.dev_hdr = (struct device_header) {
474*762224e4SAnup Patel 		.bus_type	= DEVICE_BUS_MMIO,
475*762224e4SAnup Patel 	};
476*762224e4SAnup Patel 
477*762224e4SAnup Patel 	plic.num_irq = MAX_DEVICES;
478*762224e4SAnup Patel 	plic.num_irq_word = plic.num_irq / 32;
479*762224e4SAnup Patel 	if ((plic.num_irq_word * 32) < plic.num_irq)
480*762224e4SAnup Patel 		plic.num_irq_word++;
481*762224e4SAnup Patel 	plic.max_prio = (1UL << PRIORITY_PER_ID) - 1;
482*762224e4SAnup Patel 
483*762224e4SAnup Patel 	plic.num_context = kvm->nrcpus * 2;
484*762224e4SAnup Patel 	plic.contexts = calloc(plic.num_context, sizeof(struct plic_context));
485*762224e4SAnup Patel 	if (!plic.contexts)
486*762224e4SAnup Patel 		return -ENOMEM;
487*762224e4SAnup Patel 	for (i = 0; i < plic.num_context; i++) {
488*762224e4SAnup Patel 		c = &plic.contexts[i];
489*762224e4SAnup Patel 		c->s = &plic;
490*762224e4SAnup Patel 		c->num = i;
491*762224e4SAnup Patel 		c->vcpu = kvm->cpus[i / 2];
492*762224e4SAnup Patel 		mutex_init(&c->irq_lock);
493*762224e4SAnup Patel 	}
494*762224e4SAnup Patel 
495*762224e4SAnup Patel 	mutex_init(&plic.irq_lock);
496*762224e4SAnup Patel 
497*762224e4SAnup Patel 	ret = kvm__register_mmio(kvm, RISCV_PLIC, RISCV_PLIC_SIZE,
498*762224e4SAnup Patel 				 false, plic__mmio_callback, &plic);
499*762224e4SAnup Patel 	if (ret)
500*762224e4SAnup Patel 		return ret;
501*762224e4SAnup Patel 
502*762224e4SAnup Patel 	ret = device__register(&plic.dev_hdr);
503*762224e4SAnup Patel 	if (ret)
504*762224e4SAnup Patel 		return ret;
505*762224e4SAnup Patel 
506*762224e4SAnup Patel 	plic.ready = true;
507*762224e4SAnup Patel 
508*762224e4SAnup Patel 	return 0;
509*762224e4SAnup Patel 
510*762224e4SAnup Patel }
511*762224e4SAnup Patel dev_init(plic__init);
512*762224e4SAnup Patel 
513*762224e4SAnup Patel static int plic__exit(struct kvm *kvm)
514*762224e4SAnup Patel {
515*762224e4SAnup Patel 	plic.ready = false;
516*762224e4SAnup Patel 	kvm__deregister_mmio(kvm, RISCV_PLIC);
517*762224e4SAnup Patel 	free(plic.contexts);
518*762224e4SAnup Patel 
519*762224e4SAnup Patel 	return 0;
520*762224e4SAnup Patel }
521*762224e4SAnup Patel dev_exit(plic__exit);
522