xref: /kvmtool/powerpc/xics.c (revision 42ac24f9e8b502767c1882b7ddbe57e4ec9f03fd)
1f17e5a37SMatt Evans /*
2f17e5a37SMatt Evans  * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
3f17e5a37SMatt Evans  *
4f17e5a37SMatt Evans  * Borrowed heavily from QEMU's xics.c,
5f17e5a37SMatt Evans  * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
6f17e5a37SMatt Evans  *
7f17e5a37SMatt Evans  * Modifications copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
8f17e5a37SMatt Evans  *
9f17e5a37SMatt Evans  * This program is free software; you can redistribute it and/or modify it
10f17e5a37SMatt Evans  * under the terms of the GNU General Public License version 2 as published
11f17e5a37SMatt Evans  * by the Free Software Foundation.
12f17e5a37SMatt Evans  */
13f17e5a37SMatt Evans 
14f17e5a37SMatt Evans #include "spapr.h"
15f17e5a37SMatt Evans #include "xics.h"
16f17e5a37SMatt Evans #include "kvm/util.h"
17f17e5a37SMatt Evans 
18f17e5a37SMatt Evans #include <stdio.h>
19f17e5a37SMatt Evans #include <malloc.h>
20f17e5a37SMatt Evans 
21f17e5a37SMatt Evans 
22f17e5a37SMatt Evans /* #define DEBUG_XICS yes */
23f17e5a37SMatt Evans #ifdef DEBUG_XICS
24f17e5a37SMatt Evans #define xics_dprintf(fmt, ...)					\
25f17e5a37SMatt Evans 	do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
26f17e5a37SMatt Evans #else
27f17e5a37SMatt Evans #define xics_dprintf(fmt, ...)			\
28f17e5a37SMatt Evans 	do { } while (0)
29f17e5a37SMatt Evans #endif
30f17e5a37SMatt Evans 
31f17e5a37SMatt Evans /*
32f17e5a37SMatt Evans  * ICP: Presentation layer
33f17e5a37SMatt Evans  */
34f17e5a37SMatt Evans 
35f17e5a37SMatt Evans struct icp_server_state {
36f17e5a37SMatt Evans 	uint32_t xirr;
37f17e5a37SMatt Evans 	uint8_t pending_priority;
38f17e5a37SMatt Evans 	uint8_t mfrr;
39f17e5a37SMatt Evans 	struct kvm_cpu *cpu;
40f17e5a37SMatt Evans };
41f17e5a37SMatt Evans 
42f17e5a37SMatt Evans #define XICS_IRQ_OFFSET 16
43f17e5a37SMatt Evans #define XISR_MASK	0x00ffffff
44f17e5a37SMatt Evans #define CPPR_MASK	0xff000000
45f17e5a37SMatt Evans 
46f17e5a37SMatt Evans #define XISR(ss)   (((ss)->xirr) & XISR_MASK)
47f17e5a37SMatt Evans #define CPPR(ss)   (((ss)->xirr) >> 24)
48f17e5a37SMatt Evans 
49f17e5a37SMatt Evans struct ics_state;
50f17e5a37SMatt Evans 
51f17e5a37SMatt Evans struct icp_state {
52f17e5a37SMatt Evans 	unsigned long nr_servers;
53f17e5a37SMatt Evans 	struct icp_server_state *ss;
54f17e5a37SMatt Evans 	struct ics_state *ics;
55f17e5a37SMatt Evans };
56f17e5a37SMatt Evans 
57f17e5a37SMatt Evans static void ics_reject(struct ics_state *ics, int nr);
58f17e5a37SMatt Evans static void ics_resend(struct ics_state *ics);
59f17e5a37SMatt Evans static void ics_eoi(struct ics_state *ics, int nr);
60f17e5a37SMatt Evans 
61f17e5a37SMatt Evans static inline void cpu_irq_raise(struct kvm_cpu *vcpu)
62f17e5a37SMatt Evans {
63f17e5a37SMatt Evans 	xics_dprintf("INT1[%p]\n", vcpu);
64f17e5a37SMatt Evans 	kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1);
65f17e5a37SMatt Evans }
66f17e5a37SMatt Evans 
67f17e5a37SMatt Evans static inline void cpu_irq_lower(struct kvm_cpu *vcpu)
68f17e5a37SMatt Evans {
69f17e5a37SMatt Evans 	xics_dprintf("INT0[%p]\n", vcpu);
70f17e5a37SMatt Evans 	kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 0);
71f17e5a37SMatt Evans }
72f17e5a37SMatt Evans 
73f17e5a37SMatt Evans static void icp_check_ipi(struct icp_state *icp, int server)
74f17e5a37SMatt Evans {
75f17e5a37SMatt Evans 	struct icp_server_state *ss = icp->ss + server;
76f17e5a37SMatt Evans 
77f17e5a37SMatt Evans 	if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
78f17e5a37SMatt Evans 		return;
79f17e5a37SMatt Evans 	}
80f17e5a37SMatt Evans 
81f17e5a37SMatt Evans 	if (XISR(ss)) {
82f17e5a37SMatt Evans 		ics_reject(icp->ics, XISR(ss));
83f17e5a37SMatt Evans 	}
84f17e5a37SMatt Evans 
85f17e5a37SMatt Evans 	ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
86f17e5a37SMatt Evans 	ss->pending_priority = ss->mfrr;
87f17e5a37SMatt Evans 	cpu_irq_raise(ss->cpu);
88f17e5a37SMatt Evans }
89f17e5a37SMatt Evans 
90f17e5a37SMatt Evans static void icp_resend(struct icp_state *icp, int server)
91f17e5a37SMatt Evans {
92f17e5a37SMatt Evans 	struct icp_server_state *ss = icp->ss + server;
93f17e5a37SMatt Evans 
94f17e5a37SMatt Evans 	if (ss->mfrr < CPPR(ss)) {
95f17e5a37SMatt Evans 		icp_check_ipi(icp, server);
96f17e5a37SMatt Evans 	}
97f17e5a37SMatt Evans 	ics_resend(icp->ics);
98f17e5a37SMatt Evans }
99f17e5a37SMatt Evans 
100f17e5a37SMatt Evans static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
101f17e5a37SMatt Evans {
102f17e5a37SMatt Evans 	struct icp_server_state *ss = icp->ss + server;
103f17e5a37SMatt Evans 	uint8_t old_cppr;
104f17e5a37SMatt Evans 	uint32_t old_xisr;
105f17e5a37SMatt Evans 
106f17e5a37SMatt Evans 	old_cppr = CPPR(ss);
107f17e5a37SMatt Evans 	ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
108f17e5a37SMatt Evans 
109f17e5a37SMatt Evans 	if (cppr < old_cppr) {
110f17e5a37SMatt Evans 		if (XISR(ss) && (cppr <= ss->pending_priority)) {
111f17e5a37SMatt Evans 			old_xisr = XISR(ss);
112f17e5a37SMatt Evans 			ss->xirr &= ~XISR_MASK; /* Clear XISR */
113f17e5a37SMatt Evans 			cpu_irq_lower(ss->cpu);
114f17e5a37SMatt Evans 			ics_reject(icp->ics, old_xisr);
115f17e5a37SMatt Evans 		}
116f17e5a37SMatt Evans 	} else {
117f17e5a37SMatt Evans 		if (!XISR(ss)) {
118f17e5a37SMatt Evans 			icp_resend(icp, server);
119f17e5a37SMatt Evans 		}
120f17e5a37SMatt Evans 	}
121f17e5a37SMatt Evans }
122f17e5a37SMatt Evans 
123f17e5a37SMatt Evans static void icp_set_mfrr(struct icp_state *icp, int nr, uint8_t mfrr)
124f17e5a37SMatt Evans {
125f17e5a37SMatt Evans 	struct icp_server_state *ss = icp->ss + nr;
126f17e5a37SMatt Evans 
127f17e5a37SMatt Evans 	ss->mfrr = mfrr;
128f17e5a37SMatt Evans 	if (mfrr < CPPR(ss)) {
129f17e5a37SMatt Evans 		icp_check_ipi(icp, nr);
130f17e5a37SMatt Evans 	}
131f17e5a37SMatt Evans }
132f17e5a37SMatt Evans 
133f17e5a37SMatt Evans static uint32_t icp_accept(struct icp_server_state *ss)
134f17e5a37SMatt Evans {
135f17e5a37SMatt Evans 	uint32_t xirr;
136f17e5a37SMatt Evans 
137f17e5a37SMatt Evans 	cpu_irq_lower(ss->cpu);
138f17e5a37SMatt Evans 	xirr = ss->xirr;
139f17e5a37SMatt Evans 	ss->xirr = ss->pending_priority << 24;
140f17e5a37SMatt Evans 	return xirr;
141f17e5a37SMatt Evans }
142f17e5a37SMatt Evans 
143f17e5a37SMatt Evans static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
144f17e5a37SMatt Evans {
145f17e5a37SMatt Evans 	struct icp_server_state *ss = icp->ss + server;
146f17e5a37SMatt Evans 
147f17e5a37SMatt Evans 	ics_eoi(icp->ics, xirr & XISR_MASK);
148f17e5a37SMatt Evans 	/* Send EOI -> ICS */
149f17e5a37SMatt Evans 	ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
150f17e5a37SMatt Evans 	if (!XISR(ss)) {
151f17e5a37SMatt Evans 		icp_resend(icp, server);
152f17e5a37SMatt Evans 	}
153f17e5a37SMatt Evans }
154f17e5a37SMatt Evans 
155f17e5a37SMatt Evans static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
156f17e5a37SMatt Evans {
157f17e5a37SMatt Evans 	struct icp_server_state *ss = icp->ss + server;
158f17e5a37SMatt Evans 	xics_dprintf("icp_irq(nr %d, server %d, prio 0x%x)\n", nr, server, priority);
159f17e5a37SMatt Evans 	if ((priority >= CPPR(ss))
160f17e5a37SMatt Evans 	    || (XISR(ss) && (ss->pending_priority <= priority))) {
161f17e5a37SMatt Evans 		xics_dprintf("reject %d, CPPR 0x%x, XISR 0x%x, pprio 0x%x, prio 0x%x\n",
162f17e5a37SMatt Evans 			     nr, CPPR(ss), XISR(ss), ss->pending_priority, priority);
163f17e5a37SMatt Evans 		ics_reject(icp->ics, nr);
164f17e5a37SMatt Evans 	} else {
165f17e5a37SMatt Evans 		if (XISR(ss)) {
166f17e5a37SMatt Evans 			xics_dprintf("reject %d, CPPR 0x%x, XISR 0x%x, pprio 0x%x, prio 0x%x\n",
167f17e5a37SMatt Evans 				     nr, CPPR(ss), XISR(ss), ss->pending_priority, priority);
168f17e5a37SMatt Evans 			ics_reject(icp->ics, XISR(ss));
169f17e5a37SMatt Evans 		}
170f17e5a37SMatt Evans 		ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
171f17e5a37SMatt Evans 		ss->pending_priority = priority;
172f17e5a37SMatt Evans 		cpu_irq_raise(ss->cpu);
173f17e5a37SMatt Evans 	}
174f17e5a37SMatt Evans }
175f17e5a37SMatt Evans 
176f17e5a37SMatt Evans /*
177f17e5a37SMatt Evans  * ICS: Source layer
178f17e5a37SMatt Evans  */
179f17e5a37SMatt Evans 
180f17e5a37SMatt Evans struct ics_irq_state {
181f17e5a37SMatt Evans 	int server;
182f17e5a37SMatt Evans 	uint8_t priority;
183f17e5a37SMatt Evans 	uint8_t saved_priority;
184f17e5a37SMatt Evans 	int rejected:1;
185f17e5a37SMatt Evans 	int masked_pending:1;
186f17e5a37SMatt Evans };
187f17e5a37SMatt Evans 
188f17e5a37SMatt Evans struct ics_state {
189f17e5a37SMatt Evans 	unsigned int nr_irqs;
190f17e5a37SMatt Evans 	unsigned int offset;
191f17e5a37SMatt Evans 	struct ics_irq_state *irqs;
192f17e5a37SMatt Evans 	struct icp_state *icp;
193f17e5a37SMatt Evans };
194f17e5a37SMatt Evans 
195f17e5a37SMatt Evans static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
196f17e5a37SMatt Evans {
197f17e5a37SMatt Evans 	return (nr >= ics->offset)
198f17e5a37SMatt Evans 		&& (nr < (ics->offset + ics->nr_irqs));
199f17e5a37SMatt Evans }
200f17e5a37SMatt Evans 
201f17e5a37SMatt Evans static void ics_set_irq_msi(struct ics_state *ics, int srcno, int val)
202f17e5a37SMatt Evans {
203f17e5a37SMatt Evans 	struct ics_irq_state *irq = ics->irqs + srcno;
204f17e5a37SMatt Evans 
205f17e5a37SMatt Evans 	if (val) {
206f17e5a37SMatt Evans 		if (irq->priority == 0xff) {
207f17e5a37SMatt Evans 			xics_dprintf(" irq pri ff, masked pending\n");
208f17e5a37SMatt Evans 			irq->masked_pending = 1;
209f17e5a37SMatt Evans 		} else	{
210f17e5a37SMatt Evans 			icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
211f17e5a37SMatt Evans 		}
212f17e5a37SMatt Evans 	}
213f17e5a37SMatt Evans }
214f17e5a37SMatt Evans 
215f17e5a37SMatt Evans static void ics_reject_msi(struct ics_state *ics, int nr)
216f17e5a37SMatt Evans {
217f17e5a37SMatt Evans 	struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
218f17e5a37SMatt Evans 
219f17e5a37SMatt Evans 	irq->rejected = 1;
220f17e5a37SMatt Evans }
221f17e5a37SMatt Evans 
222f17e5a37SMatt Evans static void ics_resend_msi(struct ics_state *ics)
223f17e5a37SMatt Evans {
224f17e5a37SMatt Evans 	unsigned int i;
225f17e5a37SMatt Evans 
226f17e5a37SMatt Evans 	for (i = 0; i < ics->nr_irqs; i++) {
227f17e5a37SMatt Evans 		struct ics_irq_state *irq = ics->irqs + i;
228f17e5a37SMatt Evans 
229f17e5a37SMatt Evans 		/* FIXME: filter by server#? */
230f17e5a37SMatt Evans 		if (irq->rejected) {
231f17e5a37SMatt Evans 			irq->rejected = 0;
232f17e5a37SMatt Evans 			if (irq->priority != 0xff) {
233f17e5a37SMatt Evans 				icp_irq(ics->icp, irq->server, i + ics->offset, irq->priority);
234f17e5a37SMatt Evans 			}
235f17e5a37SMatt Evans 		}
236f17e5a37SMatt Evans 	}
237f17e5a37SMatt Evans }
238f17e5a37SMatt Evans 
239f17e5a37SMatt Evans static void ics_write_xive_msi(struct ics_state *ics, int nr, int server,
240f17e5a37SMatt Evans 			       uint8_t priority)
241f17e5a37SMatt Evans {
242f17e5a37SMatt Evans 	struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
243f17e5a37SMatt Evans 
244f17e5a37SMatt Evans 	irq->server = server;
245f17e5a37SMatt Evans 	irq->priority = priority;
246f17e5a37SMatt Evans 	xics_dprintf("ics_write_xive_msi(nr %d, server %d, pri 0x%x)\n", nr, server, priority);
247f17e5a37SMatt Evans 
248f17e5a37SMatt Evans 	if (!irq->masked_pending || (priority == 0xff)) {
249f17e5a37SMatt Evans 		return;
250f17e5a37SMatt Evans 	}
251f17e5a37SMatt Evans 
252f17e5a37SMatt Evans 	irq->masked_pending = 0;
253f17e5a37SMatt Evans 	icp_irq(ics->icp, server, nr, priority);
254f17e5a37SMatt Evans }
255f17e5a37SMatt Evans 
256f17e5a37SMatt Evans static void ics_reject(struct ics_state *ics, int nr)
257f17e5a37SMatt Evans {
258f17e5a37SMatt Evans 	ics_reject_msi(ics, nr);
259f17e5a37SMatt Evans }
260f17e5a37SMatt Evans 
261f17e5a37SMatt Evans static void ics_resend(struct ics_state *ics)
262f17e5a37SMatt Evans {
263f17e5a37SMatt Evans 	ics_resend_msi(ics);
264f17e5a37SMatt Evans }
265f17e5a37SMatt Evans 
266f17e5a37SMatt Evans static void ics_eoi(struct ics_state *ics, int nr)
267f17e5a37SMatt Evans {
268f17e5a37SMatt Evans }
269f17e5a37SMatt Evans 
270f17e5a37SMatt Evans /*
271f17e5a37SMatt Evans  * Exported functions
272f17e5a37SMatt Evans  */
273f17e5a37SMatt Evans 
274f17e5a37SMatt Evans static int allocated_irqnum = XICS_IRQ_OFFSET;
275f17e5a37SMatt Evans 
276f17e5a37SMatt Evans /*
277f17e5a37SMatt Evans  * xics_alloc_irqnum(): This is hacky.  The problem boils down to the PCI device
278f17e5a37SMatt Evans  * code which just calls kvm__irq_line( .. pcidev->pci_hdr.irq_line ..) at will.
279f17e5a37SMatt Evans  * Each PCI device's IRQ line is allocated by irq__register_device() (which
280f17e5a37SMatt Evans  * allocates an IRQ AND allocates a.. PCI device num..).
281f17e5a37SMatt Evans  *
282f17e5a37SMatt Evans  * In future I'd like to at least mimic some kind of 'upstream IRQ controller'
283f17e5a37SMatt Evans  * whereby PCI devices let their PHB know when they want to IRQ, and that
284f17e5a37SMatt Evans  * percolates up.
285f17e5a37SMatt Evans  *
286f17e5a37SMatt Evans  * For now, allocate a REAL xics irq number and (via irq__register_device) push
287f17e5a37SMatt Evans  * that into the config space.	8 bits only though!
288f17e5a37SMatt Evans  */
289f17e5a37SMatt Evans int xics_alloc_irqnum(void)
290f17e5a37SMatt Evans {
291f17e5a37SMatt Evans 	int irq = allocated_irqnum++;
292f17e5a37SMatt Evans 
293f17e5a37SMatt Evans 	if (irq > 255)
294f17e5a37SMatt Evans 		die("Huge numbers of IRQs aren't supported with the daft kvmtool IRQ system.");
295f17e5a37SMatt Evans 
296f17e5a37SMatt Evans 	return irq;
297f17e5a37SMatt Evans }
298f17e5a37SMatt Evans 
299f17e5a37SMatt Evans static target_ulong h_cppr(struct kvm_cpu *vcpu,
300f17e5a37SMatt Evans 			   target_ulong opcode, target_ulong *args)
301f17e5a37SMatt Evans {
302f17e5a37SMatt Evans 	target_ulong cppr = args[0];
303f17e5a37SMatt Evans 
304f17e5a37SMatt Evans 	xics_dprintf("h_cppr(%lx)\n", cppr);
305*42ac24f9SSasha Levin 	icp_set_cppr(vcpu->kvm->arch.icp, vcpu->cpu_id, cppr);
306f17e5a37SMatt Evans 	return H_SUCCESS;
307f17e5a37SMatt Evans }
308f17e5a37SMatt Evans 
309f17e5a37SMatt Evans static target_ulong h_ipi(struct kvm_cpu *vcpu,
310f17e5a37SMatt Evans 			  target_ulong opcode, target_ulong *args)
311f17e5a37SMatt Evans {
312f17e5a37SMatt Evans 	target_ulong server = args[0];
313f17e5a37SMatt Evans 	target_ulong mfrr = args[1];
314f17e5a37SMatt Evans 
315f17e5a37SMatt Evans 	xics_dprintf("h_ipi(%lx, %lx)\n", server, mfrr);
316*42ac24f9SSasha Levin 	if (server >= vcpu->kvm->arch.icp->nr_servers) {
317f17e5a37SMatt Evans 		return H_PARAMETER;
318f17e5a37SMatt Evans 	}
319f17e5a37SMatt Evans 
320*42ac24f9SSasha Levin 	icp_set_mfrr(vcpu->kvm->arch.icp, server, mfrr);
321f17e5a37SMatt Evans 	return H_SUCCESS;
322f17e5a37SMatt Evans }
323f17e5a37SMatt Evans 
324f17e5a37SMatt Evans static target_ulong h_xirr(struct kvm_cpu *vcpu,
325f17e5a37SMatt Evans 			   target_ulong opcode, target_ulong *args)
326f17e5a37SMatt Evans {
327*42ac24f9SSasha Levin 	uint32_t xirr = icp_accept(vcpu->kvm->arch.icp->ss + vcpu->cpu_id);
328f17e5a37SMatt Evans 
329f17e5a37SMatt Evans 	xics_dprintf("h_xirr() = %x\n", xirr);
330f17e5a37SMatt Evans 	args[0] = xirr;
331f17e5a37SMatt Evans 	return H_SUCCESS;
332f17e5a37SMatt Evans }
333f17e5a37SMatt Evans 
334f17e5a37SMatt Evans static target_ulong h_eoi(struct kvm_cpu *vcpu,
335f17e5a37SMatt Evans 			  target_ulong opcode, target_ulong *args)
336f17e5a37SMatt Evans {
337f17e5a37SMatt Evans 	target_ulong xirr = args[0];
338f17e5a37SMatt Evans 
339f17e5a37SMatt Evans 	xics_dprintf("h_eoi(%lx)\n", xirr);
340*42ac24f9SSasha Levin 	icp_eoi(vcpu->kvm->arch.icp, vcpu->cpu_id, xirr);
341f17e5a37SMatt Evans 	return H_SUCCESS;
342f17e5a37SMatt Evans }
343f17e5a37SMatt Evans 
344f17e5a37SMatt Evans static void rtas_set_xive(struct kvm_cpu *vcpu, uint32_t token,
345f17e5a37SMatt Evans 			  uint32_t nargs, target_ulong args,
346f17e5a37SMatt Evans 			  uint32_t nret, target_ulong rets)
347f17e5a37SMatt Evans {
348*42ac24f9SSasha Levin 	struct ics_state *ics = vcpu->kvm->arch.icp->ics;
349f17e5a37SMatt Evans 	uint32_t nr, server, priority;
350f17e5a37SMatt Evans 
351f17e5a37SMatt Evans 	if ((nargs != 3) || (nret != 1)) {
352f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
353f17e5a37SMatt Evans 		return;
354f17e5a37SMatt Evans 	}
355f17e5a37SMatt Evans 
356f17e5a37SMatt Evans 	nr = rtas_ld(vcpu->kvm, args, 0);
357f17e5a37SMatt Evans 	server = rtas_ld(vcpu->kvm, args, 1);
358f17e5a37SMatt Evans 	priority = rtas_ld(vcpu->kvm, args, 2);
359f17e5a37SMatt Evans 
360f17e5a37SMatt Evans 	xics_dprintf("rtas_set_xive(%x,%x,%x)\n", nr, server, priority);
361f17e5a37SMatt Evans 	if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
362f17e5a37SMatt Evans 	    || (priority > 0xff)) {
363f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
364f17e5a37SMatt Evans 		return;
365f17e5a37SMatt Evans 	}
366f17e5a37SMatt Evans 
367f17e5a37SMatt Evans 	ics_write_xive_msi(ics, nr, server, priority);
368f17e5a37SMatt Evans 
369f17e5a37SMatt Evans 	rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
370f17e5a37SMatt Evans }
371f17e5a37SMatt Evans 
372f17e5a37SMatt Evans static void rtas_get_xive(struct kvm_cpu *vcpu, uint32_t token,
373f17e5a37SMatt Evans 			  uint32_t nargs, target_ulong args,
374f17e5a37SMatt Evans 			  uint32_t nret, target_ulong rets)
375f17e5a37SMatt Evans {
376*42ac24f9SSasha Levin 	struct ics_state *ics = vcpu->kvm->arch.icp->ics;
377f17e5a37SMatt Evans 	uint32_t nr;
378f17e5a37SMatt Evans 
379f17e5a37SMatt Evans 	if ((nargs != 1) || (nret != 3)) {
380f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
381f17e5a37SMatt Evans 		return;
382f17e5a37SMatt Evans 	}
383f17e5a37SMatt Evans 
384f17e5a37SMatt Evans 	nr = rtas_ld(vcpu->kvm, args, 0);
385f17e5a37SMatt Evans 
386f17e5a37SMatt Evans 	if (!ics_valid_irq(ics, nr)) {
387f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
388f17e5a37SMatt Evans 		return;
389f17e5a37SMatt Evans 	}
390f17e5a37SMatt Evans 
391f17e5a37SMatt Evans 	rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
392f17e5a37SMatt Evans 	rtas_st(vcpu->kvm, rets, 1, ics->irqs[nr - ics->offset].server);
393f17e5a37SMatt Evans 	rtas_st(vcpu->kvm, rets, 2, ics->irqs[nr - ics->offset].priority);
394f17e5a37SMatt Evans }
395f17e5a37SMatt Evans 
396f17e5a37SMatt Evans static void rtas_int_off(struct kvm_cpu *vcpu, uint32_t token,
397f17e5a37SMatt Evans 			 uint32_t nargs, target_ulong args,
398f17e5a37SMatt Evans 			 uint32_t nret, target_ulong rets)
399f17e5a37SMatt Evans {
400*42ac24f9SSasha Levin 	struct ics_state *ics = vcpu->kvm->arch.icp->ics;
401f17e5a37SMatt Evans 	uint32_t nr;
402f17e5a37SMatt Evans 
403f17e5a37SMatt Evans 	if ((nargs != 1) || (nret != 1)) {
404f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
405f17e5a37SMatt Evans 		return;
406f17e5a37SMatt Evans 	}
407f17e5a37SMatt Evans 
408f17e5a37SMatt Evans 	nr = rtas_ld(vcpu->kvm, args, 0);
409f17e5a37SMatt Evans 
410f17e5a37SMatt Evans 	if (!ics_valid_irq(ics, nr)) {
411f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
412f17e5a37SMatt Evans 		return;
413f17e5a37SMatt Evans 	}
414f17e5a37SMatt Evans 
415f17e5a37SMatt Evans 	/* ME: QEMU wrote xive_msi here, in #if 0.  Deleted. */
416f17e5a37SMatt Evans 
417f17e5a37SMatt Evans 	rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
418f17e5a37SMatt Evans }
419f17e5a37SMatt Evans 
420f17e5a37SMatt Evans static void rtas_int_on(struct kvm_cpu *vcpu, uint32_t token,
421f17e5a37SMatt Evans 			uint32_t nargs, target_ulong args,
422f17e5a37SMatt Evans 			uint32_t nret, target_ulong rets)
423f17e5a37SMatt Evans {
424*42ac24f9SSasha Levin 	struct ics_state *ics = vcpu->kvm->arch.icp->ics;
425f17e5a37SMatt Evans 	uint32_t nr;
426f17e5a37SMatt Evans 
427f17e5a37SMatt Evans 	if ((nargs != 1) || (nret != 1)) {
428f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
429f17e5a37SMatt Evans 		return;
430f17e5a37SMatt Evans 	}
431f17e5a37SMatt Evans 
432f17e5a37SMatt Evans 	nr = rtas_ld(vcpu->kvm, args, 0);
433f17e5a37SMatt Evans 
434f17e5a37SMatt Evans 	if (!ics_valid_irq(ics, nr)) {
435f17e5a37SMatt Evans 		rtas_st(vcpu->kvm, rets, 0, -3);
436f17e5a37SMatt Evans 		return;
437f17e5a37SMatt Evans 	}
438f17e5a37SMatt Evans 
439f17e5a37SMatt Evans 	/* ME: QEMU wrote xive_msi here, in #if 0.  Deleted. */
440f17e5a37SMatt Evans 
441f17e5a37SMatt Evans 	rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
442f17e5a37SMatt Evans }
443f17e5a37SMatt Evans 
444f17e5a37SMatt Evans void xics_cpu_register(struct kvm_cpu *vcpu)
445f17e5a37SMatt Evans {
446*42ac24f9SSasha Levin 	if (vcpu->cpu_id < vcpu->kvm->arch.icp->nr_servers)
447*42ac24f9SSasha Levin 		vcpu->kvm->arch.icp->ss[vcpu->cpu_id].cpu = vcpu;
448f17e5a37SMatt Evans 	else
449f17e5a37SMatt Evans 		die("Setting invalid server for cpuid %ld\n", vcpu->cpu_id);
450f17e5a37SMatt Evans }
451f17e5a37SMatt Evans 
452f17e5a37SMatt Evans struct icp_state *xics_system_init(unsigned int nr_irqs, unsigned int nr_cpus)
453f17e5a37SMatt Evans {
454f17e5a37SMatt Evans 	int max_server_num;
455f17e5a37SMatt Evans 	unsigned int i;
456f17e5a37SMatt Evans 	struct icp_state *icp;
457f17e5a37SMatt Evans 	struct ics_state *ics;
458f17e5a37SMatt Evans 
459f17e5a37SMatt Evans 	max_server_num = nr_cpus;
460f17e5a37SMatt Evans 
461f17e5a37SMatt Evans 	icp = malloc(sizeof(*icp));
462f17e5a37SMatt Evans 	icp->nr_servers = max_server_num + 1;
463f17e5a37SMatt Evans 	icp->ss = malloc(icp->nr_servers*sizeof(struct icp_server_state));
464f17e5a37SMatt Evans 
465f17e5a37SMatt Evans 	for (i = 0; i < icp->nr_servers; i++) {
466f17e5a37SMatt Evans 		icp->ss[i].xirr = 0;
467f17e5a37SMatt Evans 		icp->ss[i].pending_priority = 0;
468f17e5a37SMatt Evans 		icp->ss[i].cpu = 0;
469f17e5a37SMatt Evans 		icp->ss[i].mfrr = 0xff;
470f17e5a37SMatt Evans 	}
471f17e5a37SMatt Evans 
472f17e5a37SMatt Evans 	/*
473f17e5a37SMatt Evans 	 * icp->ss[env->cpu_index].cpu is set by CPUs calling in to
474f17e5a37SMatt Evans 	 * xics_cpu_register().
475f17e5a37SMatt Evans 	 */
476f17e5a37SMatt Evans 
477f17e5a37SMatt Evans 	ics = malloc(sizeof(*ics));
478f17e5a37SMatt Evans 	ics->nr_irqs = nr_irqs;
479f17e5a37SMatt Evans 	ics->offset = XICS_IRQ_OFFSET;
480f17e5a37SMatt Evans 	ics->irqs = malloc(nr_irqs * sizeof(struct ics_irq_state));
481f17e5a37SMatt Evans 
482f17e5a37SMatt Evans 	icp->ics = ics;
483f17e5a37SMatt Evans 	ics->icp = icp;
484f17e5a37SMatt Evans 
485f17e5a37SMatt Evans 	for (i = 0; i < nr_irqs; i++) {
486f17e5a37SMatt Evans 		ics->irqs[i].server = 0;
487f17e5a37SMatt Evans 		ics->irqs[i].priority = 0xff;
488f17e5a37SMatt Evans 		ics->irqs[i].saved_priority = 0xff;
489f17e5a37SMatt Evans 		ics->irqs[i].rejected = 0;
490f17e5a37SMatt Evans 		ics->irqs[i].masked_pending = 0;
491f17e5a37SMatt Evans 	}
492f17e5a37SMatt Evans 
493f17e5a37SMatt Evans 	spapr_register_hypercall(H_CPPR, h_cppr);
494f17e5a37SMatt Evans 	spapr_register_hypercall(H_IPI, h_ipi);
495f17e5a37SMatt Evans 	spapr_register_hypercall(H_XIRR, h_xirr);
496f17e5a37SMatt Evans 	spapr_register_hypercall(H_EOI, h_eoi);
497f17e5a37SMatt Evans 
498f17e5a37SMatt Evans 	spapr_rtas_register("ibm,set-xive", rtas_set_xive);
499f17e5a37SMatt Evans 	spapr_rtas_register("ibm,get-xive", rtas_get_xive);
500f17e5a37SMatt Evans 	spapr_rtas_register("ibm,int-off", rtas_int_off);
501f17e5a37SMatt Evans 	spapr_rtas_register("ibm,int-on", rtas_int_on);
502f17e5a37SMatt Evans 
503f17e5a37SMatt Evans 	return icp;
504f17e5a37SMatt Evans }
505f17e5a37SMatt Evans 
506f17e5a37SMatt Evans void kvm__irq_line(struct kvm *kvm, int irq, int level)
507f17e5a37SMatt Evans {
508f17e5a37SMatt Evans 	/*
509f17e5a37SMatt Evans 	 * Route event to ICS, which routes to ICP, which eventually does a
510f17e5a37SMatt Evans 	 * kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1)
511f17e5a37SMatt Evans 	 */
512f17e5a37SMatt Evans 	xics_dprintf("Raising IRQ %d -> %d\n", irq, level);
513*42ac24f9SSasha Levin 	ics_set_irq_msi(kvm->arch.icp->ics, irq - kvm->arch.icp->ics->offset, level);
514f17e5a37SMatt Evans }
515