xref: /kvmtool/riscv/plic.c (revision 0dff350174f8da2b65dd43a3e569e5377f4ee906)
1762224e4SAnup Patel 
2762224e4SAnup Patel #include "kvm/devices.h"
37c9aac00SAnup Patel #include "kvm/fdt.h"
4762224e4SAnup Patel #include "kvm/ioeventfd.h"
5762224e4SAnup Patel #include "kvm/ioport.h"
6762224e4SAnup Patel #include "kvm/kvm.h"
7762224e4SAnup Patel #include "kvm/kvm-cpu.h"
8762224e4SAnup Patel #include "kvm/irq.h"
9762224e4SAnup Patel #include "kvm/mutex.h"
10762224e4SAnup Patel 
11762224e4SAnup Patel #include <linux/byteorder.h>
12762224e4SAnup Patel #include <linux/kernel.h>
13762224e4SAnup Patel #include <linux/kvm.h>
14762224e4SAnup Patel #include <linux/sizes.h>
15762224e4SAnup Patel 
16762224e4SAnup Patel /*
17762224e4SAnup Patel  * From the RISC-V Privlidged Spec v1.10:
18762224e4SAnup Patel  *
19762224e4SAnup Patel  * Global interrupt sources are assigned small unsigned integer identifiers,
20762224e4SAnup Patel  * beginning at the value 1.  An interrupt ID of 0 is reserved to mean no
21762224e4SAnup Patel  * interrupt.  Interrupt identifiers are also used to break ties when two or
22762224e4SAnup Patel  * more interrupt sources have the same assigned priority. Smaller values of
23762224e4SAnup Patel  * interrupt ID take precedence over larger values of interrupt ID.
24762224e4SAnup Patel  *
25762224e4SAnup Patel  * While the RISC-V supervisor spec doesn't define the maximum number of
26762224e4SAnup Patel  * devices supported by the PLIC, the largest number supported by devices
27762224e4SAnup Patel  * marked as 'riscv,plic0' (which is the only device type this driver supports,
28762224e4SAnup Patel  * and is the only extant PLIC as of now) is 1024.  As mentioned above, device
29762224e4SAnup Patel  * 0 is defined to be non-existant so this device really only supports 1023
30762224e4SAnup Patel  * devices.
31762224e4SAnup Patel  */
32762224e4SAnup Patel 
33762224e4SAnup Patel #define MAX_DEVICES	1024
34762224e4SAnup Patel #define MAX_CONTEXTS	15872
35762224e4SAnup Patel 
36762224e4SAnup Patel /*
37762224e4SAnup Patel  * The PLIC consists of memory-mapped control registers, with a memory map as
38762224e4SAnup Patel  * follows:
39762224e4SAnup Patel  *
40762224e4SAnup Patel  * base + 0x000000: Reserved (interrupt source 0 does not exist)
41762224e4SAnup Patel  * base + 0x000004: Interrupt source 1 priority
42762224e4SAnup Patel  * base + 0x000008: Interrupt source 2 priority
43762224e4SAnup Patel  * ...
44762224e4SAnup Patel  * base + 0x000FFC: Interrupt source 1023 priority
45762224e4SAnup Patel  * base + 0x001000: Pending 0
46762224e4SAnup Patel  * base + 0x001FFF: Pending
47762224e4SAnup Patel  * base + 0x002000: Enable bits for sources 0-31 on context 0
48762224e4SAnup Patel  * base + 0x002004: Enable bits for sources 32-63 on context 0
49762224e4SAnup Patel  * ...
50762224e4SAnup Patel  * base + 0x0020FC: Enable bits for sources 992-1023 on context 0
51762224e4SAnup Patel  * base + 0x002080: Enable bits for sources 0-31 on context 1
52762224e4SAnup Patel  * ...
53762224e4SAnup Patel  * base + 0x002100: Enable bits for sources 0-31 on context 2
54762224e4SAnup Patel  * ...
55762224e4SAnup Patel  * base + 0x1F1F80: Enable bits for sources 992-1023 on context 15871
56762224e4SAnup Patel  * base + 0x1F1F84: Reserved
57762224e4SAnup Patel  * ...		    (higher context IDs would fit here, but wouldn't fit
58762224e4SAnup Patel  *		     inside the per-context priority vector)
59762224e4SAnup Patel  * base + 0x1FFFFC: Reserved
60762224e4SAnup Patel  * base + 0x200000: Priority threshold for context 0
61762224e4SAnup Patel  * base + 0x200004: Claim/complete for context 0
62762224e4SAnup Patel  * base + 0x200008: Reserved
63762224e4SAnup Patel  * ...
64762224e4SAnup Patel  * base + 0x200FFC: Reserved
65762224e4SAnup Patel  * base + 0x201000: Priority threshold for context 1
66762224e4SAnup Patel  * base + 0x201004: Claim/complete for context 1
67762224e4SAnup Patel  * ...
68762224e4SAnup Patel  * base + 0xFFE000: Priority threshold for context 15871
69762224e4SAnup Patel  * base + 0xFFE004: Claim/complete for context 15871
70762224e4SAnup Patel  * base + 0xFFE008: Reserved
71762224e4SAnup Patel  * ...
72762224e4SAnup Patel  * base + 0xFFFFFC: Reserved
73762224e4SAnup Patel  */
74762224e4SAnup Patel 
75762224e4SAnup Patel /* Each interrupt source has a priority register associated with it. */
76762224e4SAnup Patel #define PRIORITY_BASE		0
77762224e4SAnup Patel #define PRIORITY_PER_ID		4
78762224e4SAnup Patel 
79762224e4SAnup Patel /*
80762224e4SAnup Patel  * Each hart context has a vector of interupt enable bits associated with it.
81762224e4SAnup Patel  * There's one bit for each interrupt source.
82762224e4SAnup Patel  */
83762224e4SAnup Patel #define ENABLE_BASE		0x2000
84762224e4SAnup Patel #define ENABLE_PER_HART		0x80
85762224e4SAnup Patel 
86762224e4SAnup Patel /*
87762224e4SAnup Patel  * Each hart context has a set of control registers associated with it.  Right
88762224e4SAnup Patel  * now there's only two: a source priority threshold over which the hart will
89762224e4SAnup Patel  * take an interrupt, and a register to claim interrupts.
90762224e4SAnup Patel  */
91762224e4SAnup Patel #define CONTEXT_BASE		0x200000
92762224e4SAnup Patel #define CONTEXT_PER_HART	0x1000
93762224e4SAnup Patel #define CONTEXT_THRESHOLD	0
94762224e4SAnup Patel #define CONTEXT_CLAIM		4
95762224e4SAnup Patel 
96762224e4SAnup Patel #define REG_SIZE		0x1000000
97762224e4SAnup Patel 
98762224e4SAnup Patel struct plic_state;
99762224e4SAnup Patel 
100762224e4SAnup Patel struct plic_context {
101762224e4SAnup Patel 	/* State to which this belongs */
102762224e4SAnup Patel 	struct plic_state *s;
103762224e4SAnup Patel 
104762224e4SAnup Patel 	/* Static Configuration */
105762224e4SAnup Patel 	u32 num;
106762224e4SAnup Patel 	struct kvm_cpu *vcpu;
107762224e4SAnup Patel 
108762224e4SAnup Patel 	/* Local IRQ state */
109762224e4SAnup Patel 	struct mutex irq_lock;
110762224e4SAnup Patel 	u8 irq_priority_threshold;
111762224e4SAnup Patel 	u32 irq_enable[MAX_DEVICES/32];
112762224e4SAnup Patel 	u32 irq_pending[MAX_DEVICES/32];
113762224e4SAnup Patel 	u8 irq_pending_priority[MAX_DEVICES];
114762224e4SAnup Patel 	u32 irq_claimed[MAX_DEVICES/32];
115762224e4SAnup Patel 	u32 irq_autoclear[MAX_DEVICES/32];
116762224e4SAnup Patel };
117762224e4SAnup Patel 
118762224e4SAnup Patel struct plic_state {
119762224e4SAnup Patel 	bool ready;
120762224e4SAnup Patel 	struct kvm *kvm;
121762224e4SAnup Patel 
122762224e4SAnup Patel 	/* Static Configuration */
123762224e4SAnup Patel 	u32 num_irq;
124762224e4SAnup Patel 	u32 num_irq_word;
125762224e4SAnup Patel 	u32 max_prio;
126762224e4SAnup Patel 
127762224e4SAnup Patel 	/* Context Array */
128762224e4SAnup Patel 	u32 num_context;
129762224e4SAnup Patel 	struct plic_context *contexts;
130762224e4SAnup Patel 
131762224e4SAnup Patel 	/* Global IRQ state */
132762224e4SAnup Patel 	struct mutex irq_lock;
133762224e4SAnup Patel 	u8 irq_priority[MAX_DEVICES];
134762224e4SAnup Patel 	u32 irq_level[MAX_DEVICES/32];
135762224e4SAnup Patel };
136762224e4SAnup Patel 
137762224e4SAnup Patel static struct plic_state plic;
138762224e4SAnup Patel 
139762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
140762224e4SAnup Patel static u32 __plic_context_best_pending_irq(struct plic_state *s,
141762224e4SAnup Patel 					   struct plic_context *c)
142762224e4SAnup Patel {
143762224e4SAnup Patel 	u8 best_irq_prio = 0;
144762224e4SAnup Patel 	u32 i, j, irq, best_irq = 0;
145762224e4SAnup Patel 
146762224e4SAnup Patel 	for (i = 0; i < s->num_irq_word; i++) {
147762224e4SAnup Patel 		if (!c->irq_pending[i])
148762224e4SAnup Patel 			continue;
149762224e4SAnup Patel 
150762224e4SAnup Patel 		for (j = 0; j < 32; j++) {
151762224e4SAnup Patel 			irq = i * 32 + j;
152762224e4SAnup Patel 			if ((s->num_irq <= irq) ||
153762224e4SAnup Patel 			    !(c->irq_pending[i] & (1 << j)) ||
154762224e4SAnup Patel 			    (c->irq_claimed[i] & (1 << j)))
155762224e4SAnup Patel 				continue;
156762224e4SAnup Patel 
157762224e4SAnup Patel 			if (!best_irq ||
158762224e4SAnup Patel 			    (best_irq_prio < c->irq_pending_priority[irq])) {
159762224e4SAnup Patel 				best_irq = irq;
160762224e4SAnup Patel 				best_irq_prio = c->irq_pending_priority[irq];
161762224e4SAnup Patel 			}
162762224e4SAnup Patel 		}
163762224e4SAnup Patel 	}
164762224e4SAnup Patel 
165762224e4SAnup Patel 	return best_irq;
166762224e4SAnup Patel }
167762224e4SAnup Patel 
168762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
169762224e4SAnup Patel static void __plic_context_irq_update(struct plic_state *s,
170762224e4SAnup Patel 				      struct plic_context *c)
171762224e4SAnup Patel {
172762224e4SAnup Patel 	u32 best_irq = __plic_context_best_pending_irq(s, c);
173762224e4SAnup Patel 	u32 virq = (best_irq) ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
174762224e4SAnup Patel 
175762224e4SAnup Patel 	if (ioctl(c->vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
176762224e4SAnup Patel 		pr_warning("KVM_INTERRUPT failed");
177762224e4SAnup Patel }
178762224e4SAnup Patel 
179762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
180762224e4SAnup Patel static u32 __plic_context_irq_claim(struct plic_state *s,
181762224e4SAnup Patel 				    struct plic_context *c)
182762224e4SAnup Patel {
183762224e4SAnup Patel 	u32 virq = KVM_INTERRUPT_UNSET;
184762224e4SAnup Patel 	u32 best_irq = __plic_context_best_pending_irq(s, c);
185762224e4SAnup Patel 	u32 best_irq_word = best_irq / 32;
186762224e4SAnup Patel 	u32 best_irq_mask = (1 << (best_irq % 32));
187762224e4SAnup Patel 
188762224e4SAnup Patel 	if (ioctl(c->vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
189762224e4SAnup Patel 		pr_warning("KVM_INTERRUPT failed");
190762224e4SAnup Patel 
191762224e4SAnup Patel 	if (best_irq) {
192762224e4SAnup Patel 		if (c->irq_autoclear[best_irq_word] & best_irq_mask) {
193762224e4SAnup Patel 			c->irq_pending[best_irq_word] &= ~best_irq_mask;
194762224e4SAnup Patel 			c->irq_pending_priority[best_irq] = 0;
195762224e4SAnup Patel 			c->irq_claimed[best_irq_word] &= ~best_irq_mask;
196762224e4SAnup Patel 			c->irq_autoclear[best_irq_word] &= ~best_irq_mask;
197762224e4SAnup Patel 		} else
198762224e4SAnup Patel 			c->irq_claimed[best_irq_word] |= best_irq_mask;
199762224e4SAnup Patel 	}
200762224e4SAnup Patel 
201762224e4SAnup Patel 	__plic_context_irq_update(s, c);
202762224e4SAnup Patel 
203762224e4SAnup Patel 	return best_irq;
204762224e4SAnup Patel }
205762224e4SAnup Patel 
206*0dff3501SAnup Patel static void plic__irq_trig(struct kvm *kvm, int irq, int level, bool edge)
207762224e4SAnup Patel {
208762224e4SAnup Patel 	bool irq_marked = false;
209762224e4SAnup Patel 	u8 i, irq_prio, irq_word;
210762224e4SAnup Patel 	u32 irq_mask;
211762224e4SAnup Patel 	struct plic_context *c = NULL;
212762224e4SAnup Patel 	struct plic_state *s = &plic;
213762224e4SAnup Patel 
214762224e4SAnup Patel 	if (!s->ready)
215762224e4SAnup Patel 		return;
216762224e4SAnup Patel 
217762224e4SAnup Patel 	if (irq <= 0 || s->num_irq <= (u32)irq)
218762224e4SAnup Patel 		goto done;
219762224e4SAnup Patel 
220762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
221762224e4SAnup Patel 
222762224e4SAnup Patel 	irq_prio = s->irq_priority[irq];
223762224e4SAnup Patel 	irq_word = irq / 32;
224762224e4SAnup Patel 	irq_mask = 1 << (irq % 32);
225762224e4SAnup Patel 
226762224e4SAnup Patel 	if (level)
227762224e4SAnup Patel 		s->irq_level[irq_word] |= irq_mask;
228762224e4SAnup Patel 	else
229762224e4SAnup Patel 		s->irq_level[irq_word] &= ~irq_mask;
230762224e4SAnup Patel 
231762224e4SAnup Patel 	/*
232762224e4SAnup Patel 	 * Note: PLIC interrupts are level-triggered. As of now,
233762224e4SAnup Patel 	 * there is no notion of edge-triggered interrupts. To
234762224e4SAnup Patel 	 * handle this we auto-clear edge-triggered interrupts
235762224e4SAnup Patel 	 * when PLIC context CLAIM register is read.
236762224e4SAnup Patel 	 */
237762224e4SAnup Patel 	for (i = 0; i < s->num_context; i++) {
238762224e4SAnup Patel 		c = &s->contexts[i];
239762224e4SAnup Patel 
240762224e4SAnup Patel 		mutex_lock(&c->irq_lock);
241762224e4SAnup Patel 		if (c->irq_enable[irq_word] & irq_mask) {
242762224e4SAnup Patel 			if (level) {
243762224e4SAnup Patel 				c->irq_pending[irq_word] |= irq_mask;
244762224e4SAnup Patel 				c->irq_pending_priority[irq] = irq_prio;
245762224e4SAnup Patel 				if (edge)
246762224e4SAnup Patel 					c->irq_autoclear[irq_word] |= irq_mask;
247762224e4SAnup Patel 			} else {
248762224e4SAnup Patel 				c->irq_pending[irq_word] &= ~irq_mask;
249762224e4SAnup Patel 				c->irq_pending_priority[irq] = 0;
250762224e4SAnup Patel 				c->irq_claimed[irq_word] &= ~irq_mask;
251762224e4SAnup Patel 				c->irq_autoclear[irq_word] &= ~irq_mask;
252762224e4SAnup Patel 			}
253762224e4SAnup Patel 			__plic_context_irq_update(s, c);
254762224e4SAnup Patel 			irq_marked = true;
255762224e4SAnup Patel 		}
256762224e4SAnup Patel 		mutex_unlock(&c->irq_lock);
257762224e4SAnup Patel 
258762224e4SAnup Patel 		if (irq_marked)
259762224e4SAnup Patel 			break;
260762224e4SAnup Patel 	}
261762224e4SAnup Patel 
262762224e4SAnup Patel done:
263762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
264762224e4SAnup Patel }
265762224e4SAnup Patel 
266762224e4SAnup Patel static void plic__priority_read(struct plic_state *s,
267762224e4SAnup Patel 				u64 offset, void *data)
268762224e4SAnup Patel {
269762224e4SAnup Patel 	u32 irq = (offset >> 2);
270762224e4SAnup Patel 
271762224e4SAnup Patel 	if (irq == 0 || irq >= s->num_irq)
272762224e4SAnup Patel 		return;
273762224e4SAnup Patel 
274762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
275762224e4SAnup Patel 	ioport__write32(data, s->irq_priority[irq]);
276762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
277762224e4SAnup Patel }
278762224e4SAnup Patel 
279762224e4SAnup Patel static void plic__priority_write(struct plic_state *s,
280762224e4SAnup Patel 				 u64 offset, void *data)
281762224e4SAnup Patel {
282762224e4SAnup Patel 	u32 val, irq = (offset >> 2);
283762224e4SAnup Patel 
284762224e4SAnup Patel 	if (irq == 0 || irq >= s->num_irq)
285762224e4SAnup Patel 		return;
286762224e4SAnup Patel 
287762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
288762224e4SAnup Patel 	val = ioport__read32(data);
289762224e4SAnup Patel 	val &= ((1 << PRIORITY_PER_ID) - 1);
290762224e4SAnup Patel 	s->irq_priority[irq] = val;
291762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
292762224e4SAnup Patel }
293762224e4SAnup Patel 
294762224e4SAnup Patel static void plic__context_enable_read(struct plic_state *s,
295762224e4SAnup Patel 				      struct plic_context *c,
296762224e4SAnup Patel 				      u64 offset, void *data)
297762224e4SAnup Patel {
298762224e4SAnup Patel 	u32 irq_word = offset >> 2;
299762224e4SAnup Patel 
300762224e4SAnup Patel 	if (s->num_irq_word < irq_word)
301762224e4SAnup Patel 		return;
302762224e4SAnup Patel 
303762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
304762224e4SAnup Patel 	ioport__write32(data, c->irq_enable[irq_word]);
305762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
306762224e4SAnup Patel }
307762224e4SAnup Patel 
308762224e4SAnup Patel static void plic__context_enable_write(struct plic_state *s,
309762224e4SAnup Patel 				       struct plic_context *c,
310762224e4SAnup Patel 				       u64 offset, void *data)
311762224e4SAnup Patel {
312762224e4SAnup Patel 	u8 irq_prio;
313762224e4SAnup Patel 	u32 i, irq, irq_mask;
314762224e4SAnup Patel 	u32 irq_word = offset >> 2;
315762224e4SAnup Patel 	u32 old_val, new_val, xor_val;
316762224e4SAnup Patel 
317762224e4SAnup Patel 	if (s->num_irq_word < irq_word)
318762224e4SAnup Patel 		return;
319762224e4SAnup Patel 
320762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
321762224e4SAnup Patel 
322762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
323762224e4SAnup Patel 
324762224e4SAnup Patel 	old_val = c->irq_enable[irq_word];
325762224e4SAnup Patel 	new_val = ioport__read32(data);
326762224e4SAnup Patel 
327762224e4SAnup Patel 	if (irq_word == 0)
328762224e4SAnup Patel 		new_val &= ~0x1;
329762224e4SAnup Patel 
330762224e4SAnup Patel 	c->irq_enable[irq_word] = new_val;
331762224e4SAnup Patel 
332762224e4SAnup Patel 	xor_val = old_val ^ new_val;
333762224e4SAnup Patel 	for (i = 0; i < 32; i++) {
334762224e4SAnup Patel 		irq = irq_word * 32 + i;
335762224e4SAnup Patel 		irq_mask = 1 << i;
336762224e4SAnup Patel 		irq_prio = s->irq_priority[irq];
337762224e4SAnup Patel 		if (!(xor_val & irq_mask))
338762224e4SAnup Patel 			continue;
339762224e4SAnup Patel 		if ((new_val & irq_mask) &&
340762224e4SAnup Patel 		    (s->irq_level[irq_word] & irq_mask)) {
341762224e4SAnup Patel 			c->irq_pending[irq_word] |= irq_mask;
342762224e4SAnup Patel 			c->irq_pending_priority[irq] = irq_prio;
343762224e4SAnup Patel 		} else if (!(new_val & irq_mask)) {
344762224e4SAnup Patel 			c->irq_pending[irq_word] &= ~irq_mask;
345762224e4SAnup Patel 			c->irq_pending_priority[irq] = 0;
346762224e4SAnup Patel 			c->irq_claimed[irq_word] &= ~irq_mask;
347762224e4SAnup Patel 		}
348762224e4SAnup Patel 	}
349762224e4SAnup Patel 
350762224e4SAnup Patel 	__plic_context_irq_update(s, c);
351762224e4SAnup Patel 
352762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
353762224e4SAnup Patel 
354762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
355762224e4SAnup Patel }
356762224e4SAnup Patel 
357762224e4SAnup Patel static void plic__context_read(struct plic_state *s,
358762224e4SAnup Patel 			       struct plic_context *c,
359762224e4SAnup Patel 			       u64 offset, void *data)
360762224e4SAnup Patel {
361762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
362762224e4SAnup Patel 
363762224e4SAnup Patel 	switch (offset) {
364762224e4SAnup Patel 	case CONTEXT_THRESHOLD:
365762224e4SAnup Patel 		ioport__write32(data, c->irq_priority_threshold);
366762224e4SAnup Patel 		break;
367762224e4SAnup Patel 	case CONTEXT_CLAIM:
368762224e4SAnup Patel 		ioport__write32(data, __plic_context_irq_claim(s, c));
369762224e4SAnup Patel 		break;
370762224e4SAnup Patel 	default:
371762224e4SAnup Patel 		break;
372762224e4SAnup Patel 	};
373762224e4SAnup Patel 
374762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
375762224e4SAnup Patel }
376762224e4SAnup Patel 
377762224e4SAnup Patel static void plic__context_write(struct plic_state *s,
378762224e4SAnup Patel 				struct plic_context *c,
379762224e4SAnup Patel 				u64 offset, void *data)
380762224e4SAnup Patel {
381762224e4SAnup Patel 	u32 val, irq_word, irq_mask;
382762224e4SAnup Patel 	bool irq_update = false;
383762224e4SAnup Patel 
384762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
385762224e4SAnup Patel 
386762224e4SAnup Patel 	switch (offset) {
387762224e4SAnup Patel 	case CONTEXT_THRESHOLD:
388762224e4SAnup Patel 		val = ioport__read32(data);
389762224e4SAnup Patel 		val &= ((1 << PRIORITY_PER_ID) - 1);
390762224e4SAnup Patel 		if (val <= s->max_prio)
391762224e4SAnup Patel 			c->irq_priority_threshold = val;
392762224e4SAnup Patel 		else
393762224e4SAnup Patel 			irq_update = true;
394762224e4SAnup Patel 		break;
395762224e4SAnup Patel 	case CONTEXT_CLAIM:
396762224e4SAnup Patel 		val = ioport__read32(data);
397762224e4SAnup Patel 		irq_word = val / 32;
398762224e4SAnup Patel 		irq_mask = 1 << (val % 32);
399762224e4SAnup Patel 		if ((val < plic.num_irq) &&
400762224e4SAnup Patel 		    (c->irq_enable[irq_word] & irq_mask)) {
401762224e4SAnup Patel 			c->irq_claimed[irq_word] &= ~irq_mask;
402762224e4SAnup Patel 			irq_update = true;
403762224e4SAnup Patel 		}
404762224e4SAnup Patel 		break;
405762224e4SAnup Patel 	default:
406762224e4SAnup Patel 		irq_update = true;
407762224e4SAnup Patel 		break;
408762224e4SAnup Patel 	};
409762224e4SAnup Patel 
410762224e4SAnup Patel 	if (irq_update)
411762224e4SAnup Patel 		__plic_context_irq_update(s, c);
412762224e4SAnup Patel 
413762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
414762224e4SAnup Patel }
415762224e4SAnup Patel 
416762224e4SAnup Patel static void plic__mmio_callback(struct kvm_cpu *vcpu,
417762224e4SAnup Patel 				u64 addr, u8 *data, u32 len,
418762224e4SAnup Patel 				u8 is_write, void *ptr)
419762224e4SAnup Patel {
420762224e4SAnup Patel 	u32 cntx;
421762224e4SAnup Patel 	struct plic_state *s = ptr;
422762224e4SAnup Patel 
423762224e4SAnup Patel 	if (len != 4)
424762224e4SAnup Patel 		die("plic: invalid len=%d", len);
425762224e4SAnup Patel 
426762224e4SAnup Patel 	addr &= ~0x3;
427*0dff3501SAnup Patel 	addr -= RISCV_IRQCHIP;
428762224e4SAnup Patel 
429762224e4SAnup Patel 	if (is_write) {
430762224e4SAnup Patel 		if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) {
431762224e4SAnup Patel 			plic__priority_write(s, addr, data);
432762224e4SAnup Patel 		} else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) {
433762224e4SAnup Patel 			cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART;
434762224e4SAnup Patel 			addr -= cntx * ENABLE_PER_HART + ENABLE_BASE;
435762224e4SAnup Patel 			if (cntx < s->num_context)
436762224e4SAnup Patel 				plic__context_enable_write(s,
437762224e4SAnup Patel 							   &s->contexts[cntx],
438762224e4SAnup Patel 							   addr, data);
439762224e4SAnup Patel 		} else if (CONTEXT_BASE <= addr && addr < REG_SIZE) {
440762224e4SAnup Patel 			cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART;
441762224e4SAnup Patel 			addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE;
442762224e4SAnup Patel 			if (cntx < s->num_context)
443762224e4SAnup Patel 				plic__context_write(s, &s->contexts[cntx],
444762224e4SAnup Patel 						    addr, data);
445762224e4SAnup Patel 		}
446762224e4SAnup Patel 	} else {
447762224e4SAnup Patel 		if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) {
448762224e4SAnup Patel 			plic__priority_read(s, addr, data);
449762224e4SAnup Patel 		} else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) {
450762224e4SAnup Patel 			cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART;
451762224e4SAnup Patel 			addr -= cntx * ENABLE_PER_HART + ENABLE_BASE;
452762224e4SAnup Patel 			if (cntx < s->num_context)
453762224e4SAnup Patel 				plic__context_enable_read(s,
454762224e4SAnup Patel 							  &s->contexts[cntx],
455762224e4SAnup Patel 							  addr, data);
456762224e4SAnup Patel 		} else if (CONTEXT_BASE <= addr && addr < REG_SIZE) {
457762224e4SAnup Patel 			cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART;
458762224e4SAnup Patel 			addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE;
459762224e4SAnup Patel 			if (cntx < s->num_context)
460762224e4SAnup Patel 				plic__context_read(s, &s->contexts[cntx],
461762224e4SAnup Patel 						   addr, data);
462762224e4SAnup Patel 		}
463762224e4SAnup Patel 	}
464762224e4SAnup Patel }
465762224e4SAnup Patel 
466*0dff3501SAnup Patel static void plic__generate_fdt_node(void *fdt, struct kvm *kvm)
4677c9aac00SAnup Patel {
4687c9aac00SAnup Patel 	u32 i;
469*0dff3501SAnup Patel 	char name[64];
4707c9aac00SAnup Patel 	u32 reg_cells[4], *irq_cells;
4717c9aac00SAnup Patel 
4727c9aac00SAnup Patel 	reg_cells[0] = 0;
473*0dff3501SAnup Patel 	reg_cells[1] = cpu_to_fdt32(RISCV_IRQCHIP);
4747c9aac00SAnup Patel 	reg_cells[2] = 0;
475*0dff3501SAnup Patel 	reg_cells[3] = cpu_to_fdt32(RISCV_IRQCHIP_SIZE);
4767c9aac00SAnup Patel 
4777c9aac00SAnup Patel 	irq_cells = calloc(plic.num_context * 2, sizeof(u32));
4787c9aac00SAnup Patel 	if (!irq_cells)
4797c9aac00SAnup Patel 		die("Failed to alloc irq_cells");
4807c9aac00SAnup Patel 
481*0dff3501SAnup Patel 	sprintf(name, "interrupt-controller@%08x", (u32)RISCV_IRQCHIP);
482*0dff3501SAnup Patel 	_FDT(fdt_begin_node(fdt, name));
4837c9aac00SAnup Patel 	_FDT(fdt_property_string(fdt, "compatible", "riscv,plic0"));
4847c9aac00SAnup Patel 	_FDT(fdt_property(fdt, "reg", reg_cells, sizeof(reg_cells)));
4857c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "#interrupt-cells", 1));
4867c9aac00SAnup Patel 	_FDT(fdt_property(fdt, "interrupt-controller", NULL, 0));
4877c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "riscv,max-priority", plic.max_prio));
4887c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "riscv,ndev", MAX_DEVICES - 1));
4897c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "phandle", PHANDLE_PLIC));
4907c9aac00SAnup Patel 	for (i = 0; i < (plic.num_context / 2); i++) {
4917c9aac00SAnup Patel 		irq_cells[4*i + 0] = cpu_to_fdt32(PHANDLE_CPU_INTC_BASE + i);
4927c9aac00SAnup Patel 		irq_cells[4*i + 1] = cpu_to_fdt32(0xffffffff);
4937c9aac00SAnup Patel 		irq_cells[4*i + 2] = cpu_to_fdt32(PHANDLE_CPU_INTC_BASE + i);
4947c9aac00SAnup Patel 		irq_cells[4*i + 3] = cpu_to_fdt32(9);
4957c9aac00SAnup Patel 	}
4967c9aac00SAnup Patel 	_FDT(fdt_property(fdt, "interrupts-extended", irq_cells,
4977c9aac00SAnup Patel 			  sizeof(u32) * plic.num_context * 2));
4987c9aac00SAnup Patel 	_FDT(fdt_end_node(fdt));
4997c9aac00SAnup Patel 
5007c9aac00SAnup Patel 	free(irq_cells);
5017c9aac00SAnup Patel }
5027c9aac00SAnup Patel 
503762224e4SAnup Patel static int plic__init(struct kvm *kvm)
504762224e4SAnup Patel {
505762224e4SAnup Patel 	u32 i;
506762224e4SAnup Patel 	int ret;
507762224e4SAnup Patel 	struct plic_context *c;
508762224e4SAnup Patel 
509*0dff3501SAnup Patel 	if (riscv_irqchip != IRQCHIP_PLIC)
510*0dff3501SAnup Patel 		return 0;
511762224e4SAnup Patel 
512*0dff3501SAnup Patel 	plic.kvm = kvm;
513762224e4SAnup Patel 	plic.num_irq = MAX_DEVICES;
514762224e4SAnup Patel 	plic.num_irq_word = plic.num_irq / 32;
515762224e4SAnup Patel 	if ((plic.num_irq_word * 32) < plic.num_irq)
516762224e4SAnup Patel 		plic.num_irq_word++;
517762224e4SAnup Patel 	plic.max_prio = (1UL << PRIORITY_PER_ID) - 1;
518762224e4SAnup Patel 
519762224e4SAnup Patel 	plic.num_context = kvm->nrcpus * 2;
520762224e4SAnup Patel 	plic.contexts = calloc(plic.num_context, sizeof(struct plic_context));
521762224e4SAnup Patel 	if (!plic.contexts)
522762224e4SAnup Patel 		return -ENOMEM;
523762224e4SAnup Patel 	for (i = 0; i < plic.num_context; i++) {
524762224e4SAnup Patel 		c = &plic.contexts[i];
525762224e4SAnup Patel 		c->s = &plic;
526762224e4SAnup Patel 		c->num = i;
527762224e4SAnup Patel 		c->vcpu = kvm->cpus[i / 2];
528762224e4SAnup Patel 		mutex_init(&c->irq_lock);
529762224e4SAnup Patel 	}
530762224e4SAnup Patel 
531762224e4SAnup Patel 	mutex_init(&plic.irq_lock);
532762224e4SAnup Patel 
533*0dff3501SAnup Patel 	ret = kvm__register_mmio(kvm, RISCV_IRQCHIP, RISCV_IRQCHIP_SIZE,
534762224e4SAnup Patel 				 false, plic__mmio_callback, &plic);
535762224e4SAnup Patel 	if (ret)
536762224e4SAnup Patel 		return ret;
537762224e4SAnup Patel 
538762224e4SAnup Patel 	plic.ready = true;
539762224e4SAnup Patel 
540762224e4SAnup Patel 	return 0;
541762224e4SAnup Patel 
542762224e4SAnup Patel }
543762224e4SAnup Patel dev_init(plic__init);
544762224e4SAnup Patel 
545762224e4SAnup Patel static int plic__exit(struct kvm *kvm)
546762224e4SAnup Patel {
547*0dff3501SAnup Patel 	if (riscv_irqchip != IRQCHIP_PLIC)
548*0dff3501SAnup Patel 		return 0;
549*0dff3501SAnup Patel 
550762224e4SAnup Patel 	plic.ready = false;
551*0dff3501SAnup Patel 	kvm__deregister_mmio(kvm, RISCV_IRQCHIP);
552762224e4SAnup Patel 	free(plic.contexts);
553762224e4SAnup Patel 
554762224e4SAnup Patel 	return 0;
555762224e4SAnup Patel }
556762224e4SAnup Patel dev_exit(plic__exit);
557*0dff3501SAnup Patel 
558*0dff3501SAnup Patel void plic__create(struct kvm *kvm)
559*0dff3501SAnup Patel {
560*0dff3501SAnup Patel 	if (riscv_irqchip != IRQCHIP_UNKNOWN)
561*0dff3501SAnup Patel 		return;
562*0dff3501SAnup Patel 
563*0dff3501SAnup Patel 	riscv_irqchip = IRQCHIP_PLIC;
564*0dff3501SAnup Patel 	riscv_irqchip_inkernel = false;
565*0dff3501SAnup Patel 	riscv_irqchip_trigger = plic__irq_trig;
566*0dff3501SAnup Patel 	riscv_irqchip_generate_fdt_node = plic__generate_fdt_node;
567*0dff3501SAnup Patel 	riscv_irqchip_phandle = PHANDLE_PLIC;
568*0dff3501SAnup Patel 	riscv_irqchip_msi_phandle = PHANDLE_RESERVED;
569*0dff3501SAnup Patel 	riscv_irqchip_line_sensing = false;
570*0dff3501SAnup Patel }
571