1 /*
2 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
3 *
4 * Borrowed heavily from QEMU's xics.c,
5 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
6 *
7 * Modifications copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14 #include "spapr.h"
15 #include "xics.h"
16 #include "kvm/util.h"
17 #include "kvm/kvm.h"
18
19 #include <stdio.h>
20 #include <malloc.h>
21
22 #define XICS_NUM_IRQS 1024
23
24
25 /* #define DEBUG_XICS yes */
26 #ifdef DEBUG_XICS
27 #define xics_dprintf(fmt, ...) \
28 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
29 #else
30 #define xics_dprintf(fmt, ...) \
31 do { } while (0)
32 #endif
33
34 /*
35 * ICP: Presentation layer
36 */
37
38 struct icp_server_state {
39 uint32_t xirr;
40 uint8_t pending_priority;
41 uint8_t mfrr;
42 struct kvm_cpu *cpu;
43 };
44
45 #define XICS_IRQ_OFFSET KVM_IRQ_OFFSET
46 #define XISR_MASK 0x00ffffff
47 #define CPPR_MASK 0xff000000
48
49 #define XISR(ss) (((ss)->xirr) & XISR_MASK)
50 #define CPPR(ss) (((ss)->xirr) >> 24)
51
52 struct ics_state;
53
54 struct icp_state {
55 unsigned long nr_servers;
56 struct icp_server_state *ss;
57 struct ics_state *ics;
58 };
59
60 static void ics_reject(struct ics_state *ics, int nr);
61 static void ics_resend(struct ics_state *ics);
62 static void ics_eoi(struct ics_state *ics, int nr);
63
cpu_irq_raise(struct kvm_cpu * vcpu)64 static inline void cpu_irq_raise(struct kvm_cpu *vcpu)
65 {
66 xics_dprintf("INT1[%p]\n", vcpu);
67 kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1);
68 }
69
cpu_irq_lower(struct kvm_cpu * vcpu)70 static inline void cpu_irq_lower(struct kvm_cpu *vcpu)
71 {
72 xics_dprintf("INT0[%p]\n", vcpu);
73 kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 0);
74 }
75
icp_check_ipi(struct icp_state * icp,int server)76 static void icp_check_ipi(struct icp_state *icp, int server)
77 {
78 struct icp_server_state *ss = icp->ss + server;
79
80 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
81 return;
82 }
83
84 if (XISR(ss)) {
85 ics_reject(icp->ics, XISR(ss));
86 }
87
88 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
89 ss->pending_priority = ss->mfrr;
90 cpu_irq_raise(ss->cpu);
91 }
92
icp_resend(struct icp_state * icp,int server)93 static void icp_resend(struct icp_state *icp, int server)
94 {
95 struct icp_server_state *ss = icp->ss + server;
96
97 if (ss->mfrr < CPPR(ss)) {
98 icp_check_ipi(icp, server);
99 }
100 ics_resend(icp->ics);
101 }
102
icp_set_cppr(struct icp_state * icp,int server,uint8_t cppr)103 static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
104 {
105 struct icp_server_state *ss = icp->ss + server;
106 uint8_t old_cppr;
107 uint32_t old_xisr;
108
109 old_cppr = CPPR(ss);
110 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
111
112 if (cppr < old_cppr) {
113 if (XISR(ss) && (cppr <= ss->pending_priority)) {
114 old_xisr = XISR(ss);
115 ss->xirr &= ~XISR_MASK; /* Clear XISR */
116 cpu_irq_lower(ss->cpu);
117 ics_reject(icp->ics, old_xisr);
118 }
119 } else {
120 if (!XISR(ss)) {
121 icp_resend(icp, server);
122 }
123 }
124 }
125
icp_set_mfrr(struct icp_state * icp,int nr,uint8_t mfrr)126 static void icp_set_mfrr(struct icp_state *icp, int nr, uint8_t mfrr)
127 {
128 struct icp_server_state *ss = icp->ss + nr;
129
130 ss->mfrr = mfrr;
131 if (mfrr < CPPR(ss)) {
132 icp_check_ipi(icp, nr);
133 }
134 }
135
icp_accept(struct icp_server_state * ss)136 static uint32_t icp_accept(struct icp_server_state *ss)
137 {
138 uint32_t xirr;
139
140 cpu_irq_lower(ss->cpu);
141 xirr = ss->xirr;
142 ss->xirr = ss->pending_priority << 24;
143 return xirr;
144 }
145
icp_eoi(struct icp_state * icp,int server,uint32_t xirr)146 static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
147 {
148 struct icp_server_state *ss = icp->ss + server;
149
150 ics_eoi(icp->ics, xirr & XISR_MASK);
151 /* Send EOI -> ICS */
152 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
153 if (!XISR(ss)) {
154 icp_resend(icp, server);
155 }
156 }
157
icp_irq(struct icp_state * icp,int server,int nr,uint8_t priority)158 static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
159 {
160 struct icp_server_state *ss = icp->ss + server;
161 xics_dprintf("icp_irq(nr %d, server %d, prio 0x%x)\n", nr, server, priority);
162 if ((priority >= CPPR(ss))
163 || (XISR(ss) && (ss->pending_priority <= priority))) {
164 xics_dprintf("reject %d, CPPR 0x%x, XISR 0x%x, pprio 0x%x, prio 0x%x\n",
165 nr, CPPR(ss), XISR(ss), ss->pending_priority, priority);
166 ics_reject(icp->ics, nr);
167 } else {
168 if (XISR(ss)) {
169 xics_dprintf("reject %d, CPPR 0x%x, XISR 0x%x, pprio 0x%x, prio 0x%x\n",
170 nr, CPPR(ss), XISR(ss), ss->pending_priority, priority);
171 ics_reject(icp->ics, XISR(ss));
172 }
173 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
174 ss->pending_priority = priority;
175 cpu_irq_raise(ss->cpu);
176 }
177 }
178
179 /*
180 * ICS: Source layer
181 */
182
183 struct ics_irq_state {
184 int server;
185 uint8_t priority;
186 uint8_t saved_priority;
187 int rejected:1;
188 int masked_pending:1;
189 };
190
191 struct ics_state {
192 unsigned int nr_irqs;
193 unsigned int offset;
194 struct ics_irq_state *irqs;
195 struct icp_state *icp;
196 };
197
ics_valid_irq(struct ics_state * ics,uint32_t nr)198 static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
199 {
200 return (nr >= ics->offset)
201 && (nr < (ics->offset + ics->nr_irqs));
202 }
203
ics_set_irq_msi(struct ics_state * ics,int srcno,int val)204 static void ics_set_irq_msi(struct ics_state *ics, int srcno, int val)
205 {
206 struct ics_irq_state *irq = ics->irqs + srcno;
207
208 if (val) {
209 if (irq->priority == 0xff) {
210 xics_dprintf(" irq pri ff, masked pending\n");
211 irq->masked_pending = 1;
212 } else {
213 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
214 }
215 }
216 }
217
ics_reject_msi(struct ics_state * ics,int nr)218 static void ics_reject_msi(struct ics_state *ics, int nr)
219 {
220 struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
221
222 irq->rejected = 1;
223 }
224
ics_resend_msi(struct ics_state * ics)225 static void ics_resend_msi(struct ics_state *ics)
226 {
227 unsigned int i;
228
229 for (i = 0; i < ics->nr_irqs; i++) {
230 struct ics_irq_state *irq = ics->irqs + i;
231
232 /* FIXME: filter by server#? */
233 if (irq->rejected) {
234 irq->rejected = 0;
235 if (irq->priority != 0xff) {
236 icp_irq(ics->icp, irq->server, i + ics->offset, irq->priority);
237 }
238 }
239 }
240 }
241
ics_write_xive_msi(struct ics_state * ics,int nr,int server,uint8_t priority)242 static void ics_write_xive_msi(struct ics_state *ics, int nr, int server,
243 uint8_t priority)
244 {
245 struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
246
247 irq->server = server;
248 irq->priority = priority;
249 xics_dprintf("ics_write_xive_msi(nr %d, server %d, pri 0x%x)\n", nr, server, priority);
250
251 if (!irq->masked_pending || (priority == 0xff)) {
252 return;
253 }
254
255 irq->masked_pending = 0;
256 icp_irq(ics->icp, server, nr, priority);
257 }
258
ics_reject(struct ics_state * ics,int nr)259 static void ics_reject(struct ics_state *ics, int nr)
260 {
261 ics_reject_msi(ics, nr);
262 }
263
ics_resend(struct ics_state * ics)264 static void ics_resend(struct ics_state *ics)
265 {
266 ics_resend_msi(ics);
267 }
268
ics_eoi(struct ics_state * ics,int nr)269 static void ics_eoi(struct ics_state *ics, int nr)
270 {
271 }
272
273 /*
274 * Exported functions
275 */
276
h_cppr(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)277 static target_ulong h_cppr(struct kvm_cpu *vcpu,
278 target_ulong opcode, target_ulong *args)
279 {
280 target_ulong cppr = args[0];
281
282 xics_dprintf("h_cppr(%lx)\n", cppr);
283 icp_set_cppr(vcpu->kvm->arch.icp, vcpu->cpu_id, cppr);
284 return H_SUCCESS;
285 }
286
h_ipi(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)287 static target_ulong h_ipi(struct kvm_cpu *vcpu,
288 target_ulong opcode, target_ulong *args)
289 {
290 target_ulong server = args[0];
291 target_ulong mfrr = args[1];
292
293 xics_dprintf("h_ipi(%lx, %lx)\n", server, mfrr);
294 if (server >= vcpu->kvm->arch.icp->nr_servers) {
295 return H_PARAMETER;
296 }
297
298 icp_set_mfrr(vcpu->kvm->arch.icp, server, mfrr);
299 return H_SUCCESS;
300 }
301
h_xirr(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)302 static target_ulong h_xirr(struct kvm_cpu *vcpu,
303 target_ulong opcode, target_ulong *args)
304 {
305 uint32_t xirr = icp_accept(vcpu->kvm->arch.icp->ss + vcpu->cpu_id);
306
307 xics_dprintf("h_xirr() = %x\n", xirr);
308 args[0] = xirr;
309 return H_SUCCESS;
310 }
311
h_eoi(struct kvm_cpu * vcpu,target_ulong opcode,target_ulong * args)312 static target_ulong h_eoi(struct kvm_cpu *vcpu,
313 target_ulong opcode, target_ulong *args)
314 {
315 target_ulong xirr = args[0];
316
317 xics_dprintf("h_eoi(%lx)\n", xirr);
318 icp_eoi(vcpu->kvm->arch.icp, vcpu->cpu_id, xirr);
319 return H_SUCCESS;
320 }
321
rtas_set_xive(struct kvm_cpu * vcpu,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)322 static void rtas_set_xive(struct kvm_cpu *vcpu, uint32_t token,
323 uint32_t nargs, target_ulong args,
324 uint32_t nret, target_ulong rets)
325 {
326 struct ics_state *ics = vcpu->kvm->arch.icp->ics;
327 uint32_t nr, server, priority;
328
329 if ((nargs != 3) || (nret != 1)) {
330 rtas_st(vcpu->kvm, rets, 0, -3);
331 return;
332 }
333
334 nr = rtas_ld(vcpu->kvm, args, 0);
335 server = rtas_ld(vcpu->kvm, args, 1);
336 priority = rtas_ld(vcpu->kvm, args, 2);
337
338 xics_dprintf("rtas_set_xive(%x,%x,%x)\n", nr, server, priority);
339 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
340 || (priority > 0xff)) {
341 rtas_st(vcpu->kvm, rets, 0, -3);
342 return;
343 }
344
345 ics_write_xive_msi(ics, nr, server, priority);
346
347 rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
348 }
349
rtas_get_xive(struct kvm_cpu * vcpu,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)350 static void rtas_get_xive(struct kvm_cpu *vcpu, uint32_t token,
351 uint32_t nargs, target_ulong args,
352 uint32_t nret, target_ulong rets)
353 {
354 struct ics_state *ics = vcpu->kvm->arch.icp->ics;
355 uint32_t nr;
356
357 if ((nargs != 1) || (nret != 3)) {
358 rtas_st(vcpu->kvm, rets, 0, -3);
359 return;
360 }
361
362 nr = rtas_ld(vcpu->kvm, args, 0);
363
364 if (!ics_valid_irq(ics, nr)) {
365 rtas_st(vcpu->kvm, rets, 0, -3);
366 return;
367 }
368
369 rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
370 rtas_st(vcpu->kvm, rets, 1, ics->irqs[nr - ics->offset].server);
371 rtas_st(vcpu->kvm, rets, 2, ics->irqs[nr - ics->offset].priority);
372 }
373
rtas_int_off(struct kvm_cpu * vcpu,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)374 static void rtas_int_off(struct kvm_cpu *vcpu, uint32_t token,
375 uint32_t nargs, target_ulong args,
376 uint32_t nret, target_ulong rets)
377 {
378 struct ics_state *ics = vcpu->kvm->arch.icp->ics;
379 uint32_t nr;
380
381 if ((nargs != 1) || (nret != 1)) {
382 rtas_st(vcpu->kvm, rets, 0, -3);
383 return;
384 }
385
386 nr = rtas_ld(vcpu->kvm, args, 0);
387
388 if (!ics_valid_irq(ics, nr)) {
389 rtas_st(vcpu->kvm, rets, 0, -3);
390 return;
391 }
392
393 /* ME: QEMU wrote xive_msi here, in #if 0. Deleted. */
394
395 rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
396 }
397
rtas_int_on(struct kvm_cpu * vcpu,uint32_t token,uint32_t nargs,target_ulong args,uint32_t nret,target_ulong rets)398 static void rtas_int_on(struct kvm_cpu *vcpu, uint32_t token,
399 uint32_t nargs, target_ulong args,
400 uint32_t nret, target_ulong rets)
401 {
402 struct ics_state *ics = vcpu->kvm->arch.icp->ics;
403 uint32_t nr;
404
405 if ((nargs != 1) || (nret != 1)) {
406 rtas_st(vcpu->kvm, rets, 0, -3);
407 return;
408 }
409
410 nr = rtas_ld(vcpu->kvm, args, 0);
411
412 if (!ics_valid_irq(ics, nr)) {
413 rtas_st(vcpu->kvm, rets, 0, -3);
414 return;
415 }
416
417 /* ME: QEMU wrote xive_msi here, in #if 0. Deleted. */
418
419 rtas_st(vcpu->kvm, rets, 0, 0); /* Success */
420 }
421
xics_init(struct kvm * kvm)422 static int xics_init(struct kvm *kvm)
423 {
424 unsigned int i;
425 struct icp_state *icp;
426 struct ics_state *ics;
427 int j;
428
429 icp = malloc(sizeof(*icp));
430 icp->nr_servers = kvm->nrcpus;
431 icp->ss = malloc(icp->nr_servers * sizeof(struct icp_server_state));
432
433 for (i = 0; i < icp->nr_servers; i++) {
434 icp->ss[i].xirr = 0;
435 icp->ss[i].pending_priority = 0;
436 icp->ss[i].cpu = 0;
437 icp->ss[i].mfrr = 0xff;
438 }
439
440 /*
441 * icp->ss[env->cpu_index].cpu is set by CPUs calling in to
442 * xics_cpu_register().
443 */
444
445 ics = malloc(sizeof(*ics));
446 ics->nr_irqs = XICS_NUM_IRQS;
447 ics->offset = XICS_IRQ_OFFSET;
448 ics->irqs = malloc(ics->nr_irqs * sizeof(struct ics_irq_state));
449
450 icp->ics = ics;
451 ics->icp = icp;
452
453 for (i = 0; i < ics->nr_irqs; i++) {
454 ics->irqs[i].server = 0;
455 ics->irqs[i].priority = 0xff;
456 ics->irqs[i].saved_priority = 0xff;
457 ics->irqs[i].rejected = 0;
458 ics->irqs[i].masked_pending = 0;
459 }
460
461 spapr_register_hypercall(H_CPPR, h_cppr);
462 spapr_register_hypercall(H_IPI, h_ipi);
463 spapr_register_hypercall(H_XIRR, h_xirr);
464 spapr_register_hypercall(H_EOI, h_eoi);
465
466 spapr_rtas_register("ibm,set-xive", rtas_set_xive);
467 spapr_rtas_register("ibm,get-xive", rtas_get_xive);
468 spapr_rtas_register("ibm,int-off", rtas_int_off);
469 spapr_rtas_register("ibm,int-on", rtas_int_on);
470
471 for (j = 0; j < kvm->nrcpus; j++) {
472 struct kvm_cpu *vcpu = kvm->cpus[j];
473
474 if (vcpu->cpu_id >= icp->nr_servers)
475 die("Invalid server number for cpuid %ld\n", vcpu->cpu_id);
476
477 icp->ss[vcpu->cpu_id].cpu = vcpu;
478 }
479
480 kvm->arch.icp = icp;
481
482 return 0;
483 }
484 dev_base_init(xics_init);
485
486
kvm__irq_line(struct kvm * kvm,int irq,int level)487 void kvm__irq_line(struct kvm *kvm, int irq, int level)
488 {
489 /*
490 * Route event to ICS, which routes to ICP, which eventually does a
491 * kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1)
492 */
493 xics_dprintf("Raising IRQ %d -> %d\n", irq, level);
494 ics_set_irq_msi(kvm->arch.icp->ics, irq - kvm->arch.icp->ics->offset, level);
495 }
496