xref: /kvmtool/riscv/plic.c (revision f6cc06d6b53540b0c8d9eada04fb6c5bd90e56fd)
1762224e4SAnup Patel 
2762224e4SAnup Patel #include "kvm/devices.h"
37c9aac00SAnup Patel #include "kvm/fdt.h"
4762224e4SAnup Patel #include "kvm/ioeventfd.h"
5762224e4SAnup Patel #include "kvm/ioport.h"
6762224e4SAnup Patel #include "kvm/kvm.h"
7762224e4SAnup Patel #include "kvm/kvm-cpu.h"
8762224e4SAnup Patel #include "kvm/irq.h"
9762224e4SAnup Patel #include "kvm/mutex.h"
10762224e4SAnup Patel 
11762224e4SAnup Patel #include <linux/byteorder.h>
12762224e4SAnup Patel #include <linux/kernel.h>
13762224e4SAnup Patel #include <linux/kvm.h>
14762224e4SAnup Patel #include <linux/sizes.h>
15762224e4SAnup Patel 
16762224e4SAnup Patel /*
17762224e4SAnup Patel  * From the RISC-V Privlidged Spec v1.10:
18762224e4SAnup Patel  *
19762224e4SAnup Patel  * Global interrupt sources are assigned small unsigned integer identifiers,
20762224e4SAnup Patel  * beginning at the value 1.  An interrupt ID of 0 is reserved to mean no
21762224e4SAnup Patel  * interrupt.  Interrupt identifiers are also used to break ties when two or
22762224e4SAnup Patel  * more interrupt sources have the same assigned priority. Smaller values of
23762224e4SAnup Patel  * interrupt ID take precedence over larger values of interrupt ID.
24762224e4SAnup Patel  *
25762224e4SAnup Patel  * While the RISC-V supervisor spec doesn't define the maximum number of
26762224e4SAnup Patel  * devices supported by the PLIC, the largest number supported by devices
27762224e4SAnup Patel  * marked as 'riscv,plic0' (which is the only device type this driver supports,
28762224e4SAnup Patel  * and is the only extant PLIC as of now) is 1024.  As mentioned above, device
29762224e4SAnup Patel  * 0 is defined to be non-existant so this device really only supports 1023
30762224e4SAnup Patel  * devices.
31762224e4SAnup Patel  */
32762224e4SAnup Patel 
33762224e4SAnup Patel #define MAX_DEVICES	1024
34762224e4SAnup Patel #define MAX_CONTEXTS	15872
35762224e4SAnup Patel 
36762224e4SAnup Patel /*
37762224e4SAnup Patel  * The PLIC consists of memory-mapped control registers, with a memory map as
38762224e4SAnup Patel  * follows:
39762224e4SAnup Patel  *
40762224e4SAnup Patel  * base + 0x000000: Reserved (interrupt source 0 does not exist)
41762224e4SAnup Patel  * base + 0x000004: Interrupt source 1 priority
42762224e4SAnup Patel  * base + 0x000008: Interrupt source 2 priority
43762224e4SAnup Patel  * ...
44762224e4SAnup Patel  * base + 0x000FFC: Interrupt source 1023 priority
45762224e4SAnup Patel  * base + 0x001000: Pending 0
46762224e4SAnup Patel  * base + 0x001FFF: Pending
47762224e4SAnup Patel  * base + 0x002000: Enable bits for sources 0-31 on context 0
48762224e4SAnup Patel  * base + 0x002004: Enable bits for sources 32-63 on context 0
49762224e4SAnup Patel  * ...
50762224e4SAnup Patel  * base + 0x0020FC: Enable bits for sources 992-1023 on context 0
51762224e4SAnup Patel  * base + 0x002080: Enable bits for sources 0-31 on context 1
52762224e4SAnup Patel  * ...
53762224e4SAnup Patel  * base + 0x002100: Enable bits for sources 0-31 on context 2
54762224e4SAnup Patel  * ...
55762224e4SAnup Patel  * base + 0x1F1F80: Enable bits for sources 992-1023 on context 15871
56762224e4SAnup Patel  * base + 0x1F1F84: Reserved
57762224e4SAnup Patel  * ...		    (higher context IDs would fit here, but wouldn't fit
58762224e4SAnup Patel  *		     inside the per-context priority vector)
59762224e4SAnup Patel  * base + 0x1FFFFC: Reserved
60762224e4SAnup Patel  * base + 0x200000: Priority threshold for context 0
61762224e4SAnup Patel  * base + 0x200004: Claim/complete for context 0
62762224e4SAnup Patel  * base + 0x200008: Reserved
63762224e4SAnup Patel  * ...
64762224e4SAnup Patel  * base + 0x200FFC: Reserved
65762224e4SAnup Patel  * base + 0x201000: Priority threshold for context 1
66762224e4SAnup Patel  * base + 0x201004: Claim/complete for context 1
67762224e4SAnup Patel  * ...
68762224e4SAnup Patel  * base + 0xFFE000: Priority threshold for context 15871
69762224e4SAnup Patel  * base + 0xFFE004: Claim/complete for context 15871
70762224e4SAnup Patel  * base + 0xFFE008: Reserved
71762224e4SAnup Patel  * ...
72762224e4SAnup Patel  * base + 0xFFFFFC: Reserved
73762224e4SAnup Patel  */
74762224e4SAnup Patel 
75762224e4SAnup Patel /* Each interrupt source has a priority register associated with it. */
76762224e4SAnup Patel #define PRIORITY_BASE		0
77762224e4SAnup Patel #define PRIORITY_PER_ID		4
78762224e4SAnup Patel 
79762224e4SAnup Patel /*
80762224e4SAnup Patel  * Each hart context has a vector of interupt enable bits associated with it.
81762224e4SAnup Patel  * There's one bit for each interrupt source.
82762224e4SAnup Patel  */
83762224e4SAnup Patel #define ENABLE_BASE		0x2000
84762224e4SAnup Patel #define ENABLE_PER_HART		0x80
85762224e4SAnup Patel 
86762224e4SAnup Patel /*
87762224e4SAnup Patel  * Each hart context has a set of control registers associated with it.  Right
88762224e4SAnup Patel  * now there's only two: a source priority threshold over which the hart will
89762224e4SAnup Patel  * take an interrupt, and a register to claim interrupts.
90762224e4SAnup Patel  */
91762224e4SAnup Patel #define CONTEXT_BASE		0x200000
92762224e4SAnup Patel #define CONTEXT_PER_HART	0x1000
93762224e4SAnup Patel #define CONTEXT_THRESHOLD	0
94762224e4SAnup Patel #define CONTEXT_CLAIM		4
95762224e4SAnup Patel 
96762224e4SAnup Patel #define REG_SIZE		0x1000000
97762224e4SAnup Patel 
98*f6cc06d6SAnup Patel #define IRQCHIP_PLIC_NR		0
99*f6cc06d6SAnup Patel 
100762224e4SAnup Patel struct plic_state;
101762224e4SAnup Patel 
102762224e4SAnup Patel struct plic_context {
103762224e4SAnup Patel 	/* State to which this belongs */
104762224e4SAnup Patel 	struct plic_state *s;
105762224e4SAnup Patel 
106762224e4SAnup Patel 	/* Static Configuration */
107762224e4SAnup Patel 	u32 num;
108762224e4SAnup Patel 	struct kvm_cpu *vcpu;
109762224e4SAnup Patel 
110762224e4SAnup Patel 	/* Local IRQ state */
111762224e4SAnup Patel 	struct mutex irq_lock;
112762224e4SAnup Patel 	u8 irq_priority_threshold;
113762224e4SAnup Patel 	u32 irq_enable[MAX_DEVICES/32];
114762224e4SAnup Patel 	u32 irq_pending[MAX_DEVICES/32];
115762224e4SAnup Patel 	u8 irq_pending_priority[MAX_DEVICES];
116762224e4SAnup Patel 	u32 irq_claimed[MAX_DEVICES/32];
117762224e4SAnup Patel 	u32 irq_autoclear[MAX_DEVICES/32];
118762224e4SAnup Patel };
119762224e4SAnup Patel 
120762224e4SAnup Patel struct plic_state {
121762224e4SAnup Patel 	bool ready;
122762224e4SAnup Patel 	struct kvm *kvm;
123762224e4SAnup Patel 
124762224e4SAnup Patel 	/* Static Configuration */
125762224e4SAnup Patel 	u32 num_irq;
126762224e4SAnup Patel 	u32 num_irq_word;
127762224e4SAnup Patel 	u32 max_prio;
128762224e4SAnup Patel 
129762224e4SAnup Patel 	/* Context Array */
130762224e4SAnup Patel 	u32 num_context;
131762224e4SAnup Patel 	struct plic_context *contexts;
132762224e4SAnup Patel 
133762224e4SAnup Patel 	/* Global IRQ state */
134762224e4SAnup Patel 	struct mutex irq_lock;
135762224e4SAnup Patel 	u8 irq_priority[MAX_DEVICES];
136762224e4SAnup Patel 	u32 irq_level[MAX_DEVICES/32];
137762224e4SAnup Patel };
138762224e4SAnup Patel 
139762224e4SAnup Patel static struct plic_state plic;
140762224e4SAnup Patel 
141762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
__plic_context_best_pending_irq(struct plic_state * s,struct plic_context * c)142762224e4SAnup Patel static u32 __plic_context_best_pending_irq(struct plic_state *s,
143762224e4SAnup Patel 					   struct plic_context *c)
144762224e4SAnup Patel {
145762224e4SAnup Patel 	u8 best_irq_prio = 0;
146762224e4SAnup Patel 	u32 i, j, irq, best_irq = 0;
147762224e4SAnup Patel 
148762224e4SAnup Patel 	for (i = 0; i < s->num_irq_word; i++) {
149762224e4SAnup Patel 		if (!c->irq_pending[i])
150762224e4SAnup Patel 			continue;
151762224e4SAnup Patel 
152762224e4SAnup Patel 		for (j = 0; j < 32; j++) {
153762224e4SAnup Patel 			irq = i * 32 + j;
154762224e4SAnup Patel 			if ((s->num_irq <= irq) ||
155762224e4SAnup Patel 			    !(c->irq_pending[i] & (1 << j)) ||
156762224e4SAnup Patel 			    (c->irq_claimed[i] & (1 << j)))
157762224e4SAnup Patel 				continue;
158762224e4SAnup Patel 
159762224e4SAnup Patel 			if (!best_irq ||
160762224e4SAnup Patel 			    (best_irq_prio < c->irq_pending_priority[irq])) {
161762224e4SAnup Patel 				best_irq = irq;
162762224e4SAnup Patel 				best_irq_prio = c->irq_pending_priority[irq];
163762224e4SAnup Patel 			}
164762224e4SAnup Patel 		}
165762224e4SAnup Patel 	}
166762224e4SAnup Patel 
167762224e4SAnup Patel 	return best_irq;
168762224e4SAnup Patel }
169762224e4SAnup Patel 
170762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
__plic_context_irq_update(struct plic_state * s,struct plic_context * c)171762224e4SAnup Patel static void __plic_context_irq_update(struct plic_state *s,
172762224e4SAnup Patel 				      struct plic_context *c)
173762224e4SAnup Patel {
174762224e4SAnup Patel 	u32 best_irq = __plic_context_best_pending_irq(s, c);
175762224e4SAnup Patel 	u32 virq = (best_irq) ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
176762224e4SAnup Patel 
177762224e4SAnup Patel 	if (ioctl(c->vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
178762224e4SAnup Patel 		pr_warning("KVM_INTERRUPT failed");
179762224e4SAnup Patel }
180762224e4SAnup Patel 
181762224e4SAnup Patel /* Note: Must be called with c->irq_lock held */
__plic_context_irq_claim(struct plic_state * s,struct plic_context * c)182762224e4SAnup Patel static u32 __plic_context_irq_claim(struct plic_state *s,
183762224e4SAnup Patel 				    struct plic_context *c)
184762224e4SAnup Patel {
185762224e4SAnup Patel 	u32 virq = KVM_INTERRUPT_UNSET;
186762224e4SAnup Patel 	u32 best_irq = __plic_context_best_pending_irq(s, c);
187762224e4SAnup Patel 	u32 best_irq_word = best_irq / 32;
188762224e4SAnup Patel 	u32 best_irq_mask = (1 << (best_irq % 32));
189762224e4SAnup Patel 
190762224e4SAnup Patel 	if (ioctl(c->vcpu->vcpu_fd, KVM_INTERRUPT, &virq) < 0)
191762224e4SAnup Patel 		pr_warning("KVM_INTERRUPT failed");
192762224e4SAnup Patel 
193762224e4SAnup Patel 	if (best_irq) {
194762224e4SAnup Patel 		if (c->irq_autoclear[best_irq_word] & best_irq_mask) {
195762224e4SAnup Patel 			c->irq_pending[best_irq_word] &= ~best_irq_mask;
196762224e4SAnup Patel 			c->irq_pending_priority[best_irq] = 0;
197762224e4SAnup Patel 			c->irq_claimed[best_irq_word] &= ~best_irq_mask;
198762224e4SAnup Patel 			c->irq_autoclear[best_irq_word] &= ~best_irq_mask;
199762224e4SAnup Patel 		} else
200762224e4SAnup Patel 			c->irq_claimed[best_irq_word] |= best_irq_mask;
201762224e4SAnup Patel 	}
202762224e4SAnup Patel 
203762224e4SAnup Patel 	__plic_context_irq_update(s, c);
204762224e4SAnup Patel 
205762224e4SAnup Patel 	return best_irq;
206762224e4SAnup Patel }
207762224e4SAnup Patel 
plic__irq_trig(struct kvm * kvm,int irq,int level,bool edge)2080dff3501SAnup Patel static void plic__irq_trig(struct kvm *kvm, int irq, int level, bool edge)
209762224e4SAnup Patel {
210762224e4SAnup Patel 	bool irq_marked = false;
211762224e4SAnup Patel 	u8 i, irq_prio, irq_word;
212762224e4SAnup Patel 	u32 irq_mask;
213762224e4SAnup Patel 	struct plic_context *c = NULL;
214762224e4SAnup Patel 	struct plic_state *s = &plic;
215762224e4SAnup Patel 
216762224e4SAnup Patel 	if (!s->ready)
217762224e4SAnup Patel 		return;
218762224e4SAnup Patel 
219762224e4SAnup Patel 	if (irq <= 0 || s->num_irq <= (u32)irq)
220762224e4SAnup Patel 		goto done;
221762224e4SAnup Patel 
222762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
223762224e4SAnup Patel 
224762224e4SAnup Patel 	irq_prio = s->irq_priority[irq];
225762224e4SAnup Patel 	irq_word = irq / 32;
226762224e4SAnup Patel 	irq_mask = 1 << (irq % 32);
227762224e4SAnup Patel 
228762224e4SAnup Patel 	if (level)
229762224e4SAnup Patel 		s->irq_level[irq_word] |= irq_mask;
230762224e4SAnup Patel 	else
231762224e4SAnup Patel 		s->irq_level[irq_word] &= ~irq_mask;
232762224e4SAnup Patel 
233762224e4SAnup Patel 	/*
234762224e4SAnup Patel 	 * Note: PLIC interrupts are level-triggered. As of now,
235762224e4SAnup Patel 	 * there is no notion of edge-triggered interrupts. To
236762224e4SAnup Patel 	 * handle this we auto-clear edge-triggered interrupts
237762224e4SAnup Patel 	 * when PLIC context CLAIM register is read.
238762224e4SAnup Patel 	 */
239762224e4SAnup Patel 	for (i = 0; i < s->num_context; i++) {
240762224e4SAnup Patel 		c = &s->contexts[i];
241762224e4SAnup Patel 
242762224e4SAnup Patel 		mutex_lock(&c->irq_lock);
243762224e4SAnup Patel 		if (c->irq_enable[irq_word] & irq_mask) {
244762224e4SAnup Patel 			if (level) {
245762224e4SAnup Patel 				c->irq_pending[irq_word] |= irq_mask;
246762224e4SAnup Patel 				c->irq_pending_priority[irq] = irq_prio;
247762224e4SAnup Patel 				if (edge)
248762224e4SAnup Patel 					c->irq_autoclear[irq_word] |= irq_mask;
249762224e4SAnup Patel 			} else {
250762224e4SAnup Patel 				c->irq_pending[irq_word] &= ~irq_mask;
251762224e4SAnup Patel 				c->irq_pending_priority[irq] = 0;
252762224e4SAnup Patel 				c->irq_claimed[irq_word] &= ~irq_mask;
253762224e4SAnup Patel 				c->irq_autoclear[irq_word] &= ~irq_mask;
254762224e4SAnup Patel 			}
255762224e4SAnup Patel 			__plic_context_irq_update(s, c);
256762224e4SAnup Patel 			irq_marked = true;
257762224e4SAnup Patel 		}
258762224e4SAnup Patel 		mutex_unlock(&c->irq_lock);
259762224e4SAnup Patel 
260762224e4SAnup Patel 		if (irq_marked)
261762224e4SAnup Patel 			break;
262762224e4SAnup Patel 	}
263762224e4SAnup Patel 
264762224e4SAnup Patel done:
265762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
266762224e4SAnup Patel }
267762224e4SAnup Patel 
plic__priority_read(struct plic_state * s,u64 offset,void * data)268762224e4SAnup Patel static void plic__priority_read(struct plic_state *s,
269762224e4SAnup Patel 				u64 offset, void *data)
270762224e4SAnup Patel {
271762224e4SAnup Patel 	u32 irq = (offset >> 2);
272762224e4SAnup Patel 
273762224e4SAnup Patel 	if (irq == 0 || irq >= s->num_irq)
274762224e4SAnup Patel 		return;
275762224e4SAnup Patel 
276762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
277762224e4SAnup Patel 	ioport__write32(data, s->irq_priority[irq]);
278762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
279762224e4SAnup Patel }
280762224e4SAnup Patel 
plic__priority_write(struct plic_state * s,u64 offset,void * data)281762224e4SAnup Patel static void plic__priority_write(struct plic_state *s,
282762224e4SAnup Patel 				 u64 offset, void *data)
283762224e4SAnup Patel {
284762224e4SAnup Patel 	u32 val, irq = (offset >> 2);
285762224e4SAnup Patel 
286762224e4SAnup Patel 	if (irq == 0 || irq >= s->num_irq)
287762224e4SAnup Patel 		return;
288762224e4SAnup Patel 
289762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
290762224e4SAnup Patel 	val = ioport__read32(data);
291762224e4SAnup Patel 	val &= ((1 << PRIORITY_PER_ID) - 1);
292762224e4SAnup Patel 	s->irq_priority[irq] = val;
293762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
294762224e4SAnup Patel }
295762224e4SAnup Patel 
plic__context_enable_read(struct plic_state * s,struct plic_context * c,u64 offset,void * data)296762224e4SAnup Patel static void plic__context_enable_read(struct plic_state *s,
297762224e4SAnup Patel 				      struct plic_context *c,
298762224e4SAnup Patel 				      u64 offset, void *data)
299762224e4SAnup Patel {
300762224e4SAnup Patel 	u32 irq_word = offset >> 2;
301762224e4SAnup Patel 
302762224e4SAnup Patel 	if (s->num_irq_word < irq_word)
303762224e4SAnup Patel 		return;
304762224e4SAnup Patel 
305762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
306762224e4SAnup Patel 	ioport__write32(data, c->irq_enable[irq_word]);
307762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
308762224e4SAnup Patel }
309762224e4SAnup Patel 
plic__context_enable_write(struct plic_state * s,struct plic_context * c,u64 offset,void * data)310762224e4SAnup Patel static void plic__context_enable_write(struct plic_state *s,
311762224e4SAnup Patel 				       struct plic_context *c,
312762224e4SAnup Patel 				       u64 offset, void *data)
313762224e4SAnup Patel {
314762224e4SAnup Patel 	u8 irq_prio;
315762224e4SAnup Patel 	u32 i, irq, irq_mask;
316762224e4SAnup Patel 	u32 irq_word = offset >> 2;
317762224e4SAnup Patel 	u32 old_val, new_val, xor_val;
318762224e4SAnup Patel 
319762224e4SAnup Patel 	if (s->num_irq_word < irq_word)
320762224e4SAnup Patel 		return;
321762224e4SAnup Patel 
322762224e4SAnup Patel 	mutex_lock(&s->irq_lock);
323762224e4SAnup Patel 
324762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
325762224e4SAnup Patel 
326762224e4SAnup Patel 	old_val = c->irq_enable[irq_word];
327762224e4SAnup Patel 	new_val = ioport__read32(data);
328762224e4SAnup Patel 
329762224e4SAnup Patel 	if (irq_word == 0)
330762224e4SAnup Patel 		new_val &= ~0x1;
331762224e4SAnup Patel 
332762224e4SAnup Patel 	c->irq_enable[irq_word] = new_val;
333762224e4SAnup Patel 
334762224e4SAnup Patel 	xor_val = old_val ^ new_val;
335762224e4SAnup Patel 	for (i = 0; i < 32; i++) {
336762224e4SAnup Patel 		irq = irq_word * 32 + i;
337762224e4SAnup Patel 		irq_mask = 1 << i;
338762224e4SAnup Patel 		irq_prio = s->irq_priority[irq];
339762224e4SAnup Patel 		if (!(xor_val & irq_mask))
340762224e4SAnup Patel 			continue;
341762224e4SAnup Patel 		if ((new_val & irq_mask) &&
342762224e4SAnup Patel 		    (s->irq_level[irq_word] & irq_mask)) {
343762224e4SAnup Patel 			c->irq_pending[irq_word] |= irq_mask;
344762224e4SAnup Patel 			c->irq_pending_priority[irq] = irq_prio;
345762224e4SAnup Patel 		} else if (!(new_val & irq_mask)) {
346762224e4SAnup Patel 			c->irq_pending[irq_word] &= ~irq_mask;
347762224e4SAnup Patel 			c->irq_pending_priority[irq] = 0;
348762224e4SAnup Patel 			c->irq_claimed[irq_word] &= ~irq_mask;
349762224e4SAnup Patel 		}
350762224e4SAnup Patel 	}
351762224e4SAnup Patel 
352762224e4SAnup Patel 	__plic_context_irq_update(s, c);
353762224e4SAnup Patel 
354762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
355762224e4SAnup Patel 
356762224e4SAnup Patel 	mutex_unlock(&s->irq_lock);
357762224e4SAnup Patel }
358762224e4SAnup Patel 
plic__context_read(struct plic_state * s,struct plic_context * c,u64 offset,void * data)359762224e4SAnup Patel static void plic__context_read(struct plic_state *s,
360762224e4SAnup Patel 			       struct plic_context *c,
361762224e4SAnup Patel 			       u64 offset, void *data)
362762224e4SAnup Patel {
363762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
364762224e4SAnup Patel 
365762224e4SAnup Patel 	switch (offset) {
366762224e4SAnup Patel 	case CONTEXT_THRESHOLD:
367762224e4SAnup Patel 		ioport__write32(data, c->irq_priority_threshold);
368762224e4SAnup Patel 		break;
369762224e4SAnup Patel 	case CONTEXT_CLAIM:
370762224e4SAnup Patel 		ioport__write32(data, __plic_context_irq_claim(s, c));
371762224e4SAnup Patel 		break;
372762224e4SAnup Patel 	default:
373762224e4SAnup Patel 		break;
374762224e4SAnup Patel 	};
375762224e4SAnup Patel 
376762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
377762224e4SAnup Patel }
378762224e4SAnup Patel 
plic__context_write(struct plic_state * s,struct plic_context * c,u64 offset,void * data)379762224e4SAnup Patel static void plic__context_write(struct plic_state *s,
380762224e4SAnup Patel 				struct plic_context *c,
381762224e4SAnup Patel 				u64 offset, void *data)
382762224e4SAnup Patel {
383762224e4SAnup Patel 	u32 val, irq_word, irq_mask;
384762224e4SAnup Patel 	bool irq_update = false;
385762224e4SAnup Patel 
386762224e4SAnup Patel 	mutex_lock(&c->irq_lock);
387762224e4SAnup Patel 
388762224e4SAnup Patel 	switch (offset) {
389762224e4SAnup Patel 	case CONTEXT_THRESHOLD:
390762224e4SAnup Patel 		val = ioport__read32(data);
391762224e4SAnup Patel 		val &= ((1 << PRIORITY_PER_ID) - 1);
392762224e4SAnup Patel 		if (val <= s->max_prio)
393762224e4SAnup Patel 			c->irq_priority_threshold = val;
394762224e4SAnup Patel 		else
395762224e4SAnup Patel 			irq_update = true;
396762224e4SAnup Patel 		break;
397762224e4SAnup Patel 	case CONTEXT_CLAIM:
398762224e4SAnup Patel 		val = ioport__read32(data);
399762224e4SAnup Patel 		irq_word = val / 32;
400762224e4SAnup Patel 		irq_mask = 1 << (val % 32);
401762224e4SAnup Patel 		if ((val < plic.num_irq) &&
402762224e4SAnup Patel 		    (c->irq_enable[irq_word] & irq_mask)) {
403762224e4SAnup Patel 			c->irq_claimed[irq_word] &= ~irq_mask;
404762224e4SAnup Patel 			irq_update = true;
405762224e4SAnup Patel 		}
406762224e4SAnup Patel 		break;
407762224e4SAnup Patel 	default:
408762224e4SAnup Patel 		irq_update = true;
409762224e4SAnup Patel 		break;
410762224e4SAnup Patel 	};
411762224e4SAnup Patel 
412762224e4SAnup Patel 	if (irq_update)
413762224e4SAnup Patel 		__plic_context_irq_update(s, c);
414762224e4SAnup Patel 
415762224e4SAnup Patel 	mutex_unlock(&c->irq_lock);
416762224e4SAnup Patel }
417762224e4SAnup Patel 
plic__mmio_callback(struct kvm_cpu * vcpu,u64 addr,u8 * data,u32 len,u8 is_write,void * ptr)418762224e4SAnup Patel static void plic__mmio_callback(struct kvm_cpu *vcpu,
419762224e4SAnup Patel 				u64 addr, u8 *data, u32 len,
420762224e4SAnup Patel 				u8 is_write, void *ptr)
421762224e4SAnup Patel {
422762224e4SAnup Patel 	u32 cntx;
423762224e4SAnup Patel 	struct plic_state *s = ptr;
424762224e4SAnup Patel 
425762224e4SAnup Patel 	if (len != 4)
426762224e4SAnup Patel 		die("plic: invalid len=%d", len);
427762224e4SAnup Patel 
428762224e4SAnup Patel 	addr &= ~0x3;
4290dff3501SAnup Patel 	addr -= RISCV_IRQCHIP;
430762224e4SAnup Patel 
431762224e4SAnup Patel 	if (is_write) {
432762224e4SAnup Patel 		if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) {
433762224e4SAnup Patel 			plic__priority_write(s, addr, data);
434762224e4SAnup Patel 		} else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) {
435762224e4SAnup Patel 			cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART;
436762224e4SAnup Patel 			addr -= cntx * ENABLE_PER_HART + ENABLE_BASE;
437762224e4SAnup Patel 			if (cntx < s->num_context)
438762224e4SAnup Patel 				plic__context_enable_write(s,
439762224e4SAnup Patel 							   &s->contexts[cntx],
440762224e4SAnup Patel 							   addr, data);
441762224e4SAnup Patel 		} else if (CONTEXT_BASE <= addr && addr < REG_SIZE) {
442762224e4SAnup Patel 			cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART;
443762224e4SAnup Patel 			addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE;
444762224e4SAnup Patel 			if (cntx < s->num_context)
445762224e4SAnup Patel 				plic__context_write(s, &s->contexts[cntx],
446762224e4SAnup Patel 						    addr, data);
447762224e4SAnup Patel 		}
448762224e4SAnup Patel 	} else {
449762224e4SAnup Patel 		if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) {
450762224e4SAnup Patel 			plic__priority_read(s, addr, data);
451762224e4SAnup Patel 		} else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) {
452762224e4SAnup Patel 			cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART;
453762224e4SAnup Patel 			addr -= cntx * ENABLE_PER_HART + ENABLE_BASE;
454762224e4SAnup Patel 			if (cntx < s->num_context)
455762224e4SAnup Patel 				plic__context_enable_read(s,
456762224e4SAnup Patel 							  &s->contexts[cntx],
457762224e4SAnup Patel 							  addr, data);
458762224e4SAnup Patel 		} else if (CONTEXT_BASE <= addr && addr < REG_SIZE) {
459762224e4SAnup Patel 			cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART;
460762224e4SAnup Patel 			addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE;
461762224e4SAnup Patel 			if (cntx < s->num_context)
462762224e4SAnup Patel 				plic__context_read(s, &s->contexts[cntx],
463762224e4SAnup Patel 						   addr, data);
464762224e4SAnup Patel 		}
465762224e4SAnup Patel 	}
466762224e4SAnup Patel }
467762224e4SAnup Patel 
plic__generate_fdt_node(void * fdt,struct kvm * kvm)4680dff3501SAnup Patel static void plic__generate_fdt_node(void *fdt, struct kvm *kvm)
4697c9aac00SAnup Patel {
4707c9aac00SAnup Patel 	u32 i;
4710dff3501SAnup Patel 	char name[64];
4727c9aac00SAnup Patel 	u32 reg_cells[4], *irq_cells;
4737c9aac00SAnup Patel 
4747c9aac00SAnup Patel 	reg_cells[0] = 0;
4750dff3501SAnup Patel 	reg_cells[1] = cpu_to_fdt32(RISCV_IRQCHIP);
4767c9aac00SAnup Patel 	reg_cells[2] = 0;
4770dff3501SAnup Patel 	reg_cells[3] = cpu_to_fdt32(RISCV_IRQCHIP_SIZE);
4787c9aac00SAnup Patel 
4797c9aac00SAnup Patel 	irq_cells = calloc(plic.num_context * 2, sizeof(u32));
4807c9aac00SAnup Patel 	if (!irq_cells)
4817c9aac00SAnup Patel 		die("Failed to alloc irq_cells");
4827c9aac00SAnup Patel 
4830dff3501SAnup Patel 	sprintf(name, "interrupt-controller@%08x", (u32)RISCV_IRQCHIP);
4840dff3501SAnup Patel 	_FDT(fdt_begin_node(fdt, name));
4857c9aac00SAnup Patel 	_FDT(fdt_property_string(fdt, "compatible", "riscv,plic0"));
4867c9aac00SAnup Patel 	_FDT(fdt_property(fdt, "reg", reg_cells, sizeof(reg_cells)));
4877c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "#interrupt-cells", 1));
4887c9aac00SAnup Patel 	_FDT(fdt_property(fdt, "interrupt-controller", NULL, 0));
4897c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "riscv,max-priority", plic.max_prio));
4907c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "riscv,ndev", MAX_DEVICES - 1));
4917c9aac00SAnup Patel 	_FDT(fdt_property_cell(fdt, "phandle", PHANDLE_PLIC));
4927c9aac00SAnup Patel 	for (i = 0; i < (plic.num_context / 2); i++) {
4937c9aac00SAnup Patel 		irq_cells[4*i + 0] = cpu_to_fdt32(PHANDLE_CPU_INTC_BASE + i);
4947c9aac00SAnup Patel 		irq_cells[4*i + 1] = cpu_to_fdt32(0xffffffff);
4957c9aac00SAnup Patel 		irq_cells[4*i + 2] = cpu_to_fdt32(PHANDLE_CPU_INTC_BASE + i);
4967c9aac00SAnup Patel 		irq_cells[4*i + 3] = cpu_to_fdt32(9);
4977c9aac00SAnup Patel 	}
4987c9aac00SAnup Patel 	_FDT(fdt_property(fdt, "interrupts-extended", irq_cells,
4997c9aac00SAnup Patel 			  sizeof(u32) * plic.num_context * 2));
5007c9aac00SAnup Patel 	_FDT(fdt_end_node(fdt));
5017c9aac00SAnup Patel 
5027c9aac00SAnup Patel 	free(irq_cells);
5037c9aac00SAnup Patel }
5047c9aac00SAnup Patel 
plic__irq_routing_init(struct kvm * kvm)505*f6cc06d6SAnup Patel static int plic__irq_routing_init(struct kvm *kvm)
506*f6cc06d6SAnup Patel {
507*f6cc06d6SAnup Patel 	int r;
508*f6cc06d6SAnup Patel 
509*f6cc06d6SAnup Patel 	/*
510*f6cc06d6SAnup Patel 	 * This describes the default routing that the kernel uses without
511*f6cc06d6SAnup Patel 	 * any routing explicitly set up via KVM_SET_GSI_ROUTING. So we
512*f6cc06d6SAnup Patel 	 * don't need to commit these setting right now. The first actual
513*f6cc06d6SAnup Patel 	 * user (MSI routing) will engage these mappings then.
514*f6cc06d6SAnup Patel 	 */
515*f6cc06d6SAnup Patel 	for (next_gsi = 0; next_gsi < MAX_DEVICES; next_gsi++) {
516*f6cc06d6SAnup Patel 		r = irq__allocate_routing_entry();
517*f6cc06d6SAnup Patel 		if (r)
518*f6cc06d6SAnup Patel 			return r;
519*f6cc06d6SAnup Patel 
520*f6cc06d6SAnup Patel 		irq_routing->entries[irq_routing->nr++] =
521*f6cc06d6SAnup Patel 			(struct kvm_irq_routing_entry) {
522*f6cc06d6SAnup Patel 				.gsi = next_gsi,
523*f6cc06d6SAnup Patel 				.type = KVM_IRQ_ROUTING_IRQCHIP,
524*f6cc06d6SAnup Patel 				.u.irqchip.irqchip = IRQCHIP_PLIC_NR,
525*f6cc06d6SAnup Patel 				.u.irqchip.pin = next_gsi,
526*f6cc06d6SAnup Patel 		};
527*f6cc06d6SAnup Patel 	}
528*f6cc06d6SAnup Patel 
529*f6cc06d6SAnup Patel 	return 0;
530*f6cc06d6SAnup Patel }
531*f6cc06d6SAnup Patel 
plic__init(struct kvm * kvm)532762224e4SAnup Patel static int plic__init(struct kvm *kvm)
533762224e4SAnup Patel {
534762224e4SAnup Patel 	u32 i;
535762224e4SAnup Patel 	int ret;
536762224e4SAnup Patel 	struct plic_context *c;
537762224e4SAnup Patel 
5380dff3501SAnup Patel 	if (riscv_irqchip != IRQCHIP_PLIC)
5390dff3501SAnup Patel 		return 0;
540762224e4SAnup Patel 
5410dff3501SAnup Patel 	plic.kvm = kvm;
542762224e4SAnup Patel 	plic.num_irq = MAX_DEVICES;
543762224e4SAnup Patel 	plic.num_irq_word = plic.num_irq / 32;
544762224e4SAnup Patel 	if ((plic.num_irq_word * 32) < plic.num_irq)
545762224e4SAnup Patel 		plic.num_irq_word++;
546762224e4SAnup Patel 	plic.max_prio = (1UL << PRIORITY_PER_ID) - 1;
547762224e4SAnup Patel 
548762224e4SAnup Patel 	plic.num_context = kvm->nrcpus * 2;
549762224e4SAnup Patel 	plic.contexts = calloc(plic.num_context, sizeof(struct plic_context));
550762224e4SAnup Patel 	if (!plic.contexts)
551762224e4SAnup Patel 		return -ENOMEM;
552762224e4SAnup Patel 	for (i = 0; i < plic.num_context; i++) {
553762224e4SAnup Patel 		c = &plic.contexts[i];
554762224e4SAnup Patel 		c->s = &plic;
555762224e4SAnup Patel 		c->num = i;
556762224e4SAnup Patel 		c->vcpu = kvm->cpus[i / 2];
557762224e4SAnup Patel 		mutex_init(&c->irq_lock);
558762224e4SAnup Patel 	}
559762224e4SAnup Patel 
560762224e4SAnup Patel 	mutex_init(&plic.irq_lock);
561762224e4SAnup Patel 
5620dff3501SAnup Patel 	ret = kvm__register_mmio(kvm, RISCV_IRQCHIP, RISCV_IRQCHIP_SIZE,
563762224e4SAnup Patel 				 false, plic__mmio_callback, &plic);
564762224e4SAnup Patel 	if (ret)
565762224e4SAnup Patel 		return ret;
566762224e4SAnup Patel 
567*f6cc06d6SAnup Patel 	/* Setup default IRQ routing */
568*f6cc06d6SAnup Patel 	plic__irq_routing_init(kvm);
569*f6cc06d6SAnup Patel 
570762224e4SAnup Patel 	plic.ready = true;
571762224e4SAnup Patel 
572762224e4SAnup Patel 	return 0;
573762224e4SAnup Patel 
574762224e4SAnup Patel }
575762224e4SAnup Patel dev_init(plic__init);
576762224e4SAnup Patel 
plic__exit(struct kvm * kvm)577762224e4SAnup Patel static int plic__exit(struct kvm *kvm)
578762224e4SAnup Patel {
5790dff3501SAnup Patel 	if (riscv_irqchip != IRQCHIP_PLIC)
5800dff3501SAnup Patel 		return 0;
5810dff3501SAnup Patel 
582762224e4SAnup Patel 	plic.ready = false;
5830dff3501SAnup Patel 	kvm__deregister_mmio(kvm, RISCV_IRQCHIP);
584762224e4SAnup Patel 	free(plic.contexts);
585762224e4SAnup Patel 
586762224e4SAnup Patel 	return 0;
587762224e4SAnup Patel }
588762224e4SAnup Patel dev_exit(plic__exit);
5890dff3501SAnup Patel 
plic__create(struct kvm * kvm)5900dff3501SAnup Patel void plic__create(struct kvm *kvm)
5910dff3501SAnup Patel {
5920dff3501SAnup Patel 	if (riscv_irqchip != IRQCHIP_UNKNOWN)
5930dff3501SAnup Patel 		return;
5940dff3501SAnup Patel 
5950dff3501SAnup Patel 	riscv_irqchip = IRQCHIP_PLIC;
5960dff3501SAnup Patel 	riscv_irqchip_inkernel = false;
5970dff3501SAnup Patel 	riscv_irqchip_trigger = plic__irq_trig;
5980dff3501SAnup Patel 	riscv_irqchip_generate_fdt_node = plic__generate_fdt_node;
5990dff3501SAnup Patel 	riscv_irqchip_phandle = PHANDLE_PLIC;
6000dff3501SAnup Patel 	riscv_irqchip_msi_phandle = PHANDLE_RESERVED;
6010dff3501SAnup Patel 	riscv_irqchip_line_sensing = false;
6020dff3501SAnup Patel }
603