xref: /qemu/hw/intc/spapr_xive.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU PowerPC sPAPR XIVE interrupt controller model
3  *
4  * Copyright (c) 2017-2024, IBM Corporation.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qapi/error.h"
13 #include "qemu/error-report.h"
14 #include "target/ppc/cpu.h"
15 #include "system/cpus.h"
16 #include "system/reset.h"
17 #include "migration/vmstate.h"
18 #include "hw/ppc/fdt.h"
19 #include "hw/ppc/spapr.h"
20 #include "hw/ppc/spapr_cpu_core.h"
21 #include "hw/ppc/spapr_xive.h"
22 #include "hw/ppc/xive.h"
23 #include "hw/ppc/xive_regs.h"
24 #include "hw/qdev-properties.h"
25 #include "trace.h"
26 
27 /*
28  * XIVE Virtualization Controller BAR and Thread Management BAR that we
29  * use for the ESB pages and the TIMA pages
30  */
31 #define SPAPR_XIVE_VC_BASE   0x0006010000000000ull
32 #define SPAPR_XIVE_TM_BASE   0x0006030203180000ull
33 
34 /*
35  * The allocation of VP blocks is a complex operation in OPAL and the
36  * VP identifiers have a relation with the number of HW chips, the
37  * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
38  * controller model does not have the same constraints and can use a
39  * simple mapping scheme of the CPU vcpu_id
40  *
41  * These identifiers are never returned to the OS.
42  */
43 
44 #define SPAPR_XIVE_NVT_BASE 0x400
45 
46 /*
47  * sPAPR NVT and END indexing helpers
48  */
spapr_xive_nvt_to_target(uint8_t nvt_blk,uint32_t nvt_idx)49 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
50 {
51     return nvt_idx - SPAPR_XIVE_NVT_BASE;
52 }
53 
spapr_xive_cpu_to_nvt(PowerPCCPU * cpu,uint8_t * out_nvt_blk,uint32_t * out_nvt_idx)54 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
55                                   uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
56 {
57     assert(cpu);
58 
59     if (out_nvt_blk) {
60         *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
61     }
62 
63     if (out_nvt_blk) {
64         *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
65     }
66 }
67 
spapr_xive_target_to_nvt(uint32_t target,uint8_t * out_nvt_blk,uint32_t * out_nvt_idx)68 static int spapr_xive_target_to_nvt(uint32_t target,
69                                     uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
70 {
71     PowerPCCPU *cpu = spapr_find_cpu(target);
72 
73     if (!cpu) {
74         return -1;
75     }
76 
77     spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
78     return 0;
79 }
80 
81 /*
82  * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
83  * priorities per CPU
84  */
spapr_xive_end_to_target(uint8_t end_blk,uint32_t end_idx,uint32_t * out_server,uint8_t * out_prio)85 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
86                              uint32_t *out_server, uint8_t *out_prio)
87 {
88 
89     assert(end_blk == SPAPR_XIVE_BLOCK_ID);
90 
91     if (out_server) {
92         *out_server = end_idx >> 3;
93     }
94 
95     if (out_prio) {
96         *out_prio = end_idx & 0x7;
97     }
98     return 0;
99 }
100 
spapr_xive_cpu_to_end(PowerPCCPU * cpu,uint8_t prio,uint8_t * out_end_blk,uint32_t * out_end_idx)101 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
102                                   uint8_t *out_end_blk, uint32_t *out_end_idx)
103 {
104     assert(cpu);
105 
106     if (out_end_blk) {
107         *out_end_blk = SPAPR_XIVE_BLOCK_ID;
108     }
109 
110     if (out_end_idx) {
111         *out_end_idx = (cpu->vcpu_id << 3) + prio;
112     }
113 }
114 
spapr_xive_target_to_end(uint32_t target,uint8_t prio,uint8_t * out_end_blk,uint32_t * out_end_idx)115 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
116                                     uint8_t *out_end_blk, uint32_t *out_end_idx)
117 {
118     PowerPCCPU *cpu = spapr_find_cpu(target);
119 
120     if (!cpu) {
121         return -1;
122     }
123 
124     spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
125     return 0;
126 }
127 
128 /*
129  * On sPAPR machines, use a simplified output for the XIVE END
130  * structure dumping only the information related to the OS EQ.
131  */
spapr_xive_end_pic_print_info(SpaprXive * xive,XiveEND * end,GString * buf)132 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
133                                           GString *buf)
134 {
135     uint64_t qaddr_base = xive_end_qaddr(end);
136     uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
137     uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
138     uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
139     uint32_t qentries = 1 << (qsize + 10);
140     uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
141     uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
142 
143     g_string_append_printf(buf, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
144                            spapr_xive_nvt_to_target(0, nvt),
145                            priority, qindex, qentries, qaddr_base, qgen);
146 
147     xive_end_queue_pic_print_info(end, 6, buf);
148 }
149 
150 /*
151  * kvm_irqchip_in_kernel() will cause the compiler to turn this
152  * info a nop if CONFIG_KVM isn't defined.
153  */
154 #define spapr_xive_in_kernel(xive) \
155     (kvm_irqchip_in_kernel() && (xive)->fd != -1)
156 
spapr_xive_pic_print_info(SpaprXive * xive,GString * buf)157 static void spapr_xive_pic_print_info(SpaprXive *xive, GString *buf)
158 {
159     XiveSource *xsrc = &xive->source;
160     int i;
161 
162     if (spapr_xive_in_kernel(xive)) {
163         Error *local_err = NULL;
164 
165         kvmppc_xive_synchronize_state(xive, &local_err);
166         if (local_err) {
167             error_report_err(local_err);
168             return;
169         }
170     }
171 
172     g_string_append_printf(buf, "  LISN         PQ    EISN     CPU/PRIO EQ\n");
173 
174     for (i = 0; i < xive->nr_irqs; i++) {
175         uint8_t pq = xive_source_esb_get(xsrc, i);
176         XiveEAS *eas = &xive->eat[i];
177 
178         if (!xive_eas_is_valid(eas)) {
179             continue;
180         }
181 
182         g_string_append_printf(buf, "  %08x %s %c%c%c %s %08x ", i,
183                                xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
184                                pq & XIVE_ESB_VAL_P ? 'P' : '-',
185                                pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
186                                xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
187                                xive_eas_is_masked(eas) ? "M" : " ",
188                                (int) xive_get_field64(EAS_END_DATA, eas->w));
189 
190         if (!xive_eas_is_masked(eas)) {
191             uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
192             XiveEND *end;
193 
194             assert(end_idx < xive->nr_ends);
195             end = &xive->endt[end_idx];
196 
197             if (xive_end_is_valid(end)) {
198                 spapr_xive_end_pic_print_info(xive, end, buf);
199             }
200 
201         }
202         g_string_append_c(buf, '\n');
203     }
204 }
205 
spapr_xive_mmio_set_enabled(SpaprXive * xive,bool enable)206 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
207 {
208     memory_region_set_enabled(&xive->source.esb_mmio, enable);
209     memory_region_set_enabled(&xive->tm_mmio, enable);
210 
211     /* Disable the END ESBs until a guest OS makes use of them */
212     memory_region_set_enabled(&xive->end_source.esb_mmio, false);
213 }
214 
spapr_xive_tm_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)215 static void spapr_xive_tm_write(void *opaque, hwaddr offset,
216                           uint64_t value, unsigned size)
217 {
218     XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
219 
220     xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
221 }
222 
spapr_xive_tm_read(void * opaque,hwaddr offset,unsigned size)223 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
224 {
225     XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
226 
227     return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
228 }
229 
230 const MemoryRegionOps spapr_xive_tm_ops = {
231     .read = spapr_xive_tm_read,
232     .write = spapr_xive_tm_write,
233     .endianness = DEVICE_BIG_ENDIAN,
234     .valid = {
235         .min_access_size = 1,
236         .max_access_size = 8,
237     },
238     .impl = {
239         .min_access_size = 1,
240         .max_access_size = 8,
241     },
242 };
243 
spapr_xive_end_reset(XiveEND * end)244 static void spapr_xive_end_reset(XiveEND *end)
245 {
246     memset(end, 0, sizeof(*end));
247 
248     /* switch off the escalation and notification ESBs */
249     end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
250 }
251 
spapr_xive_reset(void * dev)252 static void spapr_xive_reset(void *dev)
253 {
254     SpaprXive *xive = SPAPR_XIVE(dev);
255     int i;
256 
257     /*
258      * The XiveSource has its own reset handler, which mask off all
259      * IRQs (!P|Q)
260      */
261 
262     /* Mask all valid EASs in the IRQ number space. */
263     for (i = 0; i < xive->nr_irqs; i++) {
264         XiveEAS *eas = &xive->eat[i];
265         if (xive_eas_is_valid(eas)) {
266             eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
267         } else {
268             eas->w = 0;
269         }
270     }
271 
272     /* Clear all ENDs */
273     for (i = 0; i < xive->nr_ends; i++) {
274         spapr_xive_end_reset(&xive->endt[i]);
275     }
276 }
277 
spapr_xive_instance_init(Object * obj)278 static void spapr_xive_instance_init(Object *obj)
279 {
280     SpaprXive *xive = SPAPR_XIVE(obj);
281 
282     object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
283 
284     object_initialize_child(obj, "end_source", &xive->end_source,
285                             TYPE_XIVE_END_SOURCE);
286 
287     /* Not connected to the KVM XIVE device */
288     xive->fd = -1;
289 }
290 
spapr_xive_realize(DeviceState * dev,Error ** errp)291 static void spapr_xive_realize(DeviceState *dev, Error **errp)
292 {
293     SpaprXive *xive = SPAPR_XIVE(dev);
294     SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
295     XiveSource *xsrc = &xive->source;
296     XiveENDSource *end_xsrc = &xive->end_source;
297     Error *local_err = NULL;
298 
299     /* Set by spapr_irq_init() */
300     g_assert(xive->nr_irqs);
301     g_assert(xive->nr_ends);
302 
303     sxc->parent_realize(dev, &local_err);
304     if (local_err) {
305         error_propagate(errp, local_err);
306         return;
307     }
308 
309     /*
310      * Initialize the internal sources, for IPIs and virtual devices.
311      */
312     object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
313                             &error_fatal);
314     object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
315     if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
316         return;
317     }
318 
319     /*
320      * Initialize the END ESB source
321      */
322     object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
323                             &error_fatal);
324     object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
325                              &error_abort);
326     if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
327         return;
328     }
329 
330     /* Set the mapping address of the END ESB pages after the source ESBs */
331     xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
332 
333     /*
334      * Allocate the routing tables
335      */
336     xive->eat = g_new0(XiveEAS, xive->nr_irqs);
337     xive->endt = g_new0(XiveEND, xive->nr_ends);
338 
339     xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
340                            xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
341 
342     qemu_register_reset(spapr_xive_reset, dev);
343 
344     /* TIMA initialization */
345     memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
346                           xive, "xive.tima", 4ull << TM_SHIFT);
347 
348     /*
349      * Map all regions. These will be enabled or disabled at reset and
350      * can also be overridden by KVM memory regions if active
351      */
352     memory_region_add_subregion(get_system_memory(), xive->vc_base,
353                                 &xsrc->esb_mmio);
354     memory_region_add_subregion(get_system_memory(), xive->end_base,
355                                 &end_xsrc->esb_mmio);
356     memory_region_add_subregion(get_system_memory(), xive->tm_base,
357                                 &xive->tm_mmio);
358 }
359 
spapr_xive_get_eas(XiveRouter * xrtr,uint8_t eas_blk,uint32_t eas_idx,XiveEAS * eas)360 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
361                               uint32_t eas_idx, XiveEAS *eas)
362 {
363     SpaprXive *xive = SPAPR_XIVE(xrtr);
364 
365     if (eas_idx >= xive->nr_irqs) {
366         return -1;
367     }
368 
369     *eas = xive->eat[eas_idx];
370     return 0;
371 }
372 
spapr_xive_get_end(XiveRouter * xrtr,uint8_t end_blk,uint32_t end_idx,XiveEND * end)373 static int spapr_xive_get_end(XiveRouter *xrtr,
374                               uint8_t end_blk, uint32_t end_idx, XiveEND *end)
375 {
376     SpaprXive *xive = SPAPR_XIVE(xrtr);
377 
378     if (end_idx >= xive->nr_ends) {
379         return -1;
380     }
381 
382     memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
383     return 0;
384 }
385 
spapr_xive_write_end(XiveRouter * xrtr,uint8_t end_blk,uint32_t end_idx,XiveEND * end,uint8_t word_number)386 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
387                                 uint32_t end_idx, XiveEND *end,
388                                 uint8_t word_number)
389 {
390     SpaprXive *xive = SPAPR_XIVE(xrtr);
391 
392     if (end_idx >= xive->nr_ends) {
393         return -1;
394     }
395 
396     memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
397     return 0;
398 }
399 
spapr_xive_get_nvt(XiveRouter * xrtr,uint8_t nvt_blk,uint32_t nvt_idx,XiveNVT * nvt)400 static int spapr_xive_get_nvt(XiveRouter *xrtr,
401                               uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
402 {
403     uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
404     PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
405 
406     if (!cpu) {
407         /* TODO: should we assert() if we can find a NVT ? */
408         return -1;
409     }
410 
411     /*
412      * sPAPR does not maintain a NVT table. Return that the NVT is
413      * valid if we have found a matching CPU
414      */
415     nvt->w0 = cpu_to_be32(NVT_W0_VALID);
416     return 0;
417 }
418 
spapr_xive_write_nvt(XiveRouter * xrtr,uint8_t nvt_blk,uint32_t nvt_idx,XiveNVT * nvt,uint8_t word_number)419 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
420                                 uint32_t nvt_idx, XiveNVT *nvt,
421                                 uint8_t word_number)
422 {
423     /*
424      * We don't need to write back to the NVTs because the sPAPR
425      * machine should never hit a non-scheduled NVT. It should never
426      * get called.
427      */
428     g_assert_not_reached();
429 }
430 
spapr_xive_match_nvt(XivePresenter * xptr,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool cam_ignore,uint8_t priority,uint32_t logic_serv,XiveTCTXMatch * match)431 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
432                                 uint8_t nvt_blk, uint32_t nvt_idx,
433                                 bool crowd, bool cam_ignore,
434                                 uint8_t priority,
435                                 uint32_t logic_serv, XiveTCTXMatch *match)
436 {
437     CPUState *cs;
438     int count = 0;
439 
440     CPU_FOREACH(cs) {
441         PowerPCCPU *cpu = POWERPC_CPU(cs);
442         XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
443         int ring;
444 
445         /*
446          * Skip partially initialized vCPUs. This can happen when
447          * vCPUs are hotplugged.
448          */
449         if (!tctx) {
450             continue;
451         }
452 
453         /*
454          * Check the thread context CAM lines and record matches.
455          */
456         ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
457                                          cam_ignore, logic_serv);
458         /*
459          * Save the matching thread interrupt context and follow on to
460          * check for duplicates which are invalid.
461          */
462         if (ring != -1) {
463             if (match->tctx) {
464                 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
465                               "context NVT %x/%x\n", nvt_blk, nvt_idx);
466                 return -1;
467             }
468 
469             match->ring = ring;
470             match->tctx = tctx;
471             count++;
472         }
473     }
474 
475     return count;
476 }
477 
spapr_xive_presenter_get_config(XivePresenter * xptr)478 static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr)
479 {
480     uint32_t cfg = 0;
481 
482     /*
483      * Let's claim GEN1 TIMA format. If running with KVM on P10, the
484      * correct answer is deep in the hardware and not accessible to
485      * us.  But it shouldn't matter as it only affects the presenter
486      * as seen by a guest OS.
487      */
488     cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
489 
490     return cfg;
491 }
492 
spapr_xive_get_block_id(XiveRouter * xrtr)493 static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
494 {
495     return SPAPR_XIVE_BLOCK_ID;
496 }
497 
spapr_xive_get_pq(XiveRouter * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)498 static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
499                              uint8_t *pq)
500 {
501     SpaprXive *xive = SPAPR_XIVE(xrtr);
502 
503     assert(SPAPR_XIVE_BLOCK_ID == blk);
504 
505     *pq = xive_source_esb_get(&xive->source, idx);
506     return 0;
507 }
508 
spapr_xive_set_pq(XiveRouter * xrtr,uint8_t blk,uint32_t idx,uint8_t * pq)509 static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
510                              uint8_t *pq)
511 {
512     SpaprXive *xive = SPAPR_XIVE(xrtr);
513 
514     assert(SPAPR_XIVE_BLOCK_ID == blk);
515 
516     *pq = xive_source_esb_set(&xive->source, idx, *pq);
517     return 0;
518 }
519 
520 
521 static const VMStateDescription vmstate_spapr_xive_end = {
522     .name = TYPE_SPAPR_XIVE "/end",
523     .version_id = 1,
524     .minimum_version_id = 1,
525     .fields = (const VMStateField []) {
526         VMSTATE_UINT32(w0, XiveEND),
527         VMSTATE_UINT32(w1, XiveEND),
528         VMSTATE_UINT32(w2, XiveEND),
529         VMSTATE_UINT32(w3, XiveEND),
530         VMSTATE_UINT32(w4, XiveEND),
531         VMSTATE_UINT32(w5, XiveEND),
532         VMSTATE_UINT32(w6, XiveEND),
533         VMSTATE_UINT32(w7, XiveEND),
534         VMSTATE_END_OF_LIST()
535     },
536 };
537 
538 static const VMStateDescription vmstate_spapr_xive_eas = {
539     .name = TYPE_SPAPR_XIVE "/eas",
540     .version_id = 1,
541     .minimum_version_id = 1,
542     .fields = (const VMStateField []) {
543         VMSTATE_UINT64(w, XiveEAS),
544         VMSTATE_END_OF_LIST()
545     },
546 };
547 
vmstate_spapr_xive_pre_save(void * opaque)548 static int vmstate_spapr_xive_pre_save(void *opaque)
549 {
550     SpaprXive *xive = SPAPR_XIVE(opaque);
551 
552     if (spapr_xive_in_kernel(xive)) {
553         return kvmppc_xive_pre_save(xive);
554     }
555 
556     return 0;
557 }
558 
559 /*
560  * Called by the sPAPR IRQ backend 'post_load' method at the machine
561  * level.
562  */
spapr_xive_post_load(SpaprInterruptController * intc,int version_id)563 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
564 {
565     SpaprXive *xive = SPAPR_XIVE(intc);
566 
567     if (spapr_xive_in_kernel(xive)) {
568         return kvmppc_xive_post_load(xive, version_id);
569     }
570 
571     return 0;
572 }
573 
574 static const VMStateDescription vmstate_spapr_xive = {
575     .name = TYPE_SPAPR_XIVE,
576     .version_id = 1,
577     .minimum_version_id = 1,
578     .pre_save = vmstate_spapr_xive_pre_save,
579     .post_load = NULL, /* handled at the machine level */
580     .fields = (const VMStateField[]) {
581         VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
582         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
583                                      vmstate_spapr_xive_eas, XiveEAS),
584         VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
585                                              vmstate_spapr_xive_end, XiveEND),
586         VMSTATE_END_OF_LIST()
587     },
588 };
589 
spapr_xive_claim_irq(SpaprInterruptController * intc,int lisn,bool lsi,Error ** errp)590 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
591                                 bool lsi, Error **errp)
592 {
593     SpaprXive *xive = SPAPR_XIVE(intc);
594     XiveSource *xsrc = &xive->source;
595 
596     assert(lisn < xive->nr_irqs);
597 
598     trace_spapr_xive_claim_irq(lisn, lsi);
599 
600     if (xive_eas_is_valid(&xive->eat[lisn])) {
601         error_setg(errp, "IRQ %d is not free", lisn);
602         return -EBUSY;
603     }
604 
605     /*
606      * Set default values when allocating an IRQ number
607      */
608     xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
609     if (lsi) {
610         xive_source_irq_set_lsi(xsrc, lisn);
611     }
612 
613     if (spapr_xive_in_kernel(xive)) {
614         return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
615     }
616 
617     return 0;
618 }
619 
spapr_xive_free_irq(SpaprInterruptController * intc,int lisn)620 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
621 {
622     SpaprXive *xive = SPAPR_XIVE(intc);
623     assert(lisn < xive->nr_irqs);
624 
625     trace_spapr_xive_free_irq(lisn);
626 
627     xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
628 }
629 
630 static const Property spapr_xive_properties[] = {
631     DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
632     DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
633     DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
634     DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
635     DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
636 };
637 
spapr_xive_cpu_intc_create(SpaprInterruptController * intc,PowerPCCPU * cpu,Error ** errp)638 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
639                                       PowerPCCPU *cpu, Error **errp)
640 {
641     SpaprXive *xive = SPAPR_XIVE(intc);
642     Object *obj;
643     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
644 
645     obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
646     if (!obj) {
647         return -1;
648     }
649 
650     spapr_cpu->tctx = XIVE_TCTX(obj);
651     return 0;
652 }
653 
xive_tctx_set_os_cam(XiveTCTX * tctx,uint32_t os_cam)654 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
655 {
656     uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
657     memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
658 }
659 
spapr_xive_cpu_intc_reset(SpaprInterruptController * intc,PowerPCCPU * cpu)660 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
661                                      PowerPCCPU *cpu)
662 {
663     XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
664     uint8_t  nvt_blk;
665     uint32_t nvt_idx;
666 
667     xive_tctx_reset(tctx);
668 
669     /*
670      * When a Virtual Processor is scheduled to run on a HW thread,
671      * the hypervisor pushes its identifier in the OS CAM line.
672      * Emulate the same behavior under QEMU.
673      */
674     spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
675 
676     xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
677 }
678 
spapr_xive_cpu_intc_destroy(SpaprInterruptController * intc,PowerPCCPU * cpu)679 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
680                                         PowerPCCPU *cpu)
681 {
682     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
683 
684     xive_tctx_destroy(spapr_cpu->tctx);
685     spapr_cpu->tctx = NULL;
686 }
687 
spapr_xive_set_irq(SpaprInterruptController * intc,int irq,int val)688 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
689 {
690     SpaprXive *xive = SPAPR_XIVE(intc);
691 
692     trace_spapr_xive_set_irq(irq, val);
693 
694     if (spapr_xive_in_kernel(xive)) {
695         kvmppc_xive_source_set_irq(&xive->source, irq, val);
696     } else {
697         xive_source_set_irq(&xive->source, irq, val);
698     }
699 }
700 
spapr_xive_print_info(SpaprInterruptController * intc,GString * buf)701 static void spapr_xive_print_info(SpaprInterruptController *intc, GString *buf)
702 {
703     SpaprXive *xive = SPAPR_XIVE(intc);
704     CPUState *cs;
705 
706     CPU_FOREACH(cs) {
707         PowerPCCPU *cpu = POWERPC_CPU(cs);
708 
709         xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, buf);
710     }
711     spapr_xive_pic_print_info(xive, buf);
712 }
713 
spapr_xive_dt(SpaprInterruptController * intc,uint32_t nr_servers,void * fdt,uint32_t phandle)714 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
715                           void *fdt, uint32_t phandle)
716 {
717     SpaprXive *xive = SPAPR_XIVE(intc);
718     int node;
719     uint64_t timas[2 * 2];
720     /* Interrupt number ranges for the IPIs */
721     uint32_t lisn_ranges[] = {
722         cpu_to_be32(SPAPR_IRQ_IPI),
723         cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
724     };
725     /*
726      * EQ size - the sizes of pages supported by the system 4K, 64K,
727      * 2M, 16M. We only advertise 64K for the moment.
728      */
729     uint32_t eq_sizes[] = {
730         cpu_to_be32(16), /* 64K */
731     };
732     /*
733      * QEMU/KVM only needs to define a single range to reserve the
734      * escalation priority. A priority bitmask would have been more
735      * appropriate.
736      */
737     uint32_t plat_res_int_priorities[] = {
738         cpu_to_be32(xive->hv_prio),    /* start */
739         cpu_to_be32(0xff - xive->hv_prio), /* count */
740     };
741 
742     /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
743     timas[0] = cpu_to_be64(xive->tm_base +
744                            XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
745     timas[1] = cpu_to_be64(1ull << TM_SHIFT);
746     timas[2] = cpu_to_be64(xive->tm_base +
747                            XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
748     timas[3] = cpu_to_be64(1ull << TM_SHIFT);
749 
750     _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
751 
752     _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
753     _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
754 
755     _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
756     _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
757                      sizeof(eq_sizes)));
758     _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
759                      sizeof(lisn_ranges)));
760 
761     /* For Linux to link the LSIs to the interrupt controller. */
762     _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
763     _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
764 
765     /* For SLOF */
766     _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
767     _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
768 
769     /*
770      * The "ibm,plat-res-int-priorities" property defines the priority
771      * ranges reserved by the hypervisor
772      */
773     _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
774                      plat_res_int_priorities, sizeof(plat_res_int_priorities)));
775 }
776 
spapr_xive_activate(SpaprInterruptController * intc,uint32_t nr_servers,Error ** errp)777 static int spapr_xive_activate(SpaprInterruptController *intc,
778                                uint32_t nr_servers, Error **errp)
779 {
780     SpaprXive *xive = SPAPR_XIVE(intc);
781 
782     if (kvm_enabled()) {
783         int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
784                                     errp);
785         if (rc < 0) {
786             return rc;
787         }
788     }
789 
790     /* Activate the XIVE MMIOs */
791     spapr_xive_mmio_set_enabled(xive, true);
792 
793     return 0;
794 }
795 
spapr_xive_deactivate(SpaprInterruptController * intc)796 static void spapr_xive_deactivate(SpaprInterruptController *intc)
797 {
798     SpaprXive *xive = SPAPR_XIVE(intc);
799 
800     spapr_xive_mmio_set_enabled(xive, false);
801 
802     if (spapr_xive_in_kernel(xive)) {
803         kvmppc_xive_disconnect(intc);
804     }
805 }
806 
spapr_xive_in_kernel_xptr(const XivePresenter * xptr)807 static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
808 {
809     return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
810 }
811 
spapr_xive_class_init(ObjectClass * klass,const void * data)812 static void spapr_xive_class_init(ObjectClass *klass, const void *data)
813 {
814     DeviceClass *dc = DEVICE_CLASS(klass);
815     XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
816     SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
817     XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
818     SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
819 
820     dc->desc    = "sPAPR XIVE Interrupt Controller";
821     device_class_set_props(dc, spapr_xive_properties);
822     device_class_set_parent_realize(dc, spapr_xive_realize,
823                                     &sxc->parent_realize);
824     dc->vmsd    = &vmstate_spapr_xive;
825 
826     xrc->get_eas = spapr_xive_get_eas;
827     xrc->get_pq  = spapr_xive_get_pq;
828     xrc->set_pq  = spapr_xive_set_pq;
829     xrc->get_end = spapr_xive_get_end;
830     xrc->write_end = spapr_xive_write_end;
831     xrc->get_nvt = spapr_xive_get_nvt;
832     xrc->write_nvt = spapr_xive_write_nvt;
833     xrc->get_block_id = spapr_xive_get_block_id;
834 
835     sicc->activate = spapr_xive_activate;
836     sicc->deactivate = spapr_xive_deactivate;
837     sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
838     sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
839     sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
840     sicc->claim_irq = spapr_xive_claim_irq;
841     sicc->free_irq = spapr_xive_free_irq;
842     sicc->set_irq = spapr_xive_set_irq;
843     sicc->print_info = spapr_xive_print_info;
844     sicc->dt = spapr_xive_dt;
845     sicc->post_load = spapr_xive_post_load;
846 
847     xpc->match_nvt  = spapr_xive_match_nvt;
848     xpc->get_config = spapr_xive_presenter_get_config;
849     xpc->in_kernel  = spapr_xive_in_kernel_xptr;
850 }
851 
852 static const TypeInfo spapr_xive_info = {
853     .name = TYPE_SPAPR_XIVE,
854     .parent = TYPE_XIVE_ROUTER,
855     .instance_init = spapr_xive_instance_init,
856     .instance_size = sizeof(SpaprXive),
857     .class_init = spapr_xive_class_init,
858     .class_size = sizeof(SpaprXiveClass),
859     .interfaces = (const InterfaceInfo[]) {
860         { TYPE_SPAPR_INTC },
861         { }
862     },
863 };
864 
spapr_xive_register_types(void)865 static void spapr_xive_register_types(void)
866 {
867     type_register_static(&spapr_xive_info);
868 }
869 
type_init(spapr_xive_register_types)870 type_init(spapr_xive_register_types)
871 
872 /*
873  * XIVE hcalls
874  *
875  * The terminology used by the XIVE hcalls is the following :
876  *
877  *   TARGET vCPU number
878  *   EQ     Event Queue assigned by OS to receive event data
879  *   ESB    page for source interrupt management
880  *   LISN   Logical Interrupt Source Number identifying a source in the
881  *          machine
882  *   EISN   Effective Interrupt Source Number used by guest OS to
883  *          identify source in the guest
884  *
885  * The EAS, END, NVT structures are not exposed.
886  */
887 
888 /*
889  * On POWER9, the KVM XIVE device uses priority 7 for the escalation
890  * interrupts. So we only allow the guest to use priorities [0..6].
891  */
892 static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
893 {
894     return priority >= xive->hv_prio;
895 }
896 
897 /*
898  * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
899  * real address of the MMIO page through which the Event State Buffer
900  * entry associated with the value of the "lisn" parameter is managed.
901  *
902  * Parameters:
903  * Input
904  * - R4: "flags"
905  *         Bits 0-63 reserved
906  * - R5: "lisn" is per "interrupts", "interrupt-map", or
907  *       "ibm,xive-lisn-ranges" properties, or as returned by the
908  *       ibm,query-interrupt-source-number RTAS call, or as returned
909  *       by the H_ALLOCATE_VAS_WINDOW hcall
910  *
911  * Output
912  * - R4: "flags"
913  *         Bits 0-59: Reserved
914  *         Bit 60: H_INT_ESB must be used for Event State Buffer
915  *                 management
916  *         Bit 61: 1 == LSI  0 == MSI
917  *         Bit 62: the full function page supports trigger
918  *         Bit 63: Store EOI Supported
919  * - R5: Logical Real address of full function Event State Buffer
920  *       management page, -1 if H_INT_ESB hcall flag is set to 1.
921  * - R6: Logical Real Address of trigger only Event State Buffer
922  *       management page or -1.
923  * - R7: Power of 2 page size for the ESB management pages returned in
924  *       R5 and R6.
925  */
926 
927 #define SPAPR_XIVE_SRC_H_INT_ESB     PPC_BIT(60) /* ESB manage with H_INT_ESB */
928 #define SPAPR_XIVE_SRC_LSI           PPC_BIT(61) /* Virtual LSI type */
929 #define SPAPR_XIVE_SRC_TRIGGER       PPC_BIT(62) /* Trigger and management
930                                                     on same page */
931 #define SPAPR_XIVE_SRC_STORE_EOI     PPC_BIT(63) /* Store EOI support */
932 
h_int_get_source_info(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)933 static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
934                                           SpaprMachineState *spapr,
935                                           target_ulong opcode,
936                                           target_ulong *args)
937 {
938     SpaprXive *xive = spapr->xive;
939     XiveSource *xsrc = &xive->source;
940     target_ulong flags  = args[0];
941     target_ulong lisn   = args[1];
942 
943     trace_spapr_xive_get_source_info(flags, lisn);
944 
945     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
946         return H_FUNCTION;
947     }
948 
949     if (flags) {
950         return H_PARAMETER;
951     }
952 
953     if (lisn >= xive->nr_irqs) {
954         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
955                       lisn);
956         return H_P2;
957     }
958 
959     if (!xive_eas_is_valid(&xive->eat[lisn])) {
960         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
961                       lisn);
962         return H_P2;
963     }
964 
965     /*
966      * All sources are emulated under the main XIVE object and share
967      * the same characteristics.
968      */
969     args[0] = 0;
970     if (!xive_source_esb_has_2page(xsrc)) {
971         args[0] |= SPAPR_XIVE_SRC_TRIGGER;
972     }
973     if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
974         args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
975     }
976 
977     /*
978      * Force the use of the H_INT_ESB hcall in case of an LSI
979      * interrupt. This is necessary under KVM to re-trigger the
980      * interrupt if the level is still asserted
981      */
982     if (xive_source_irq_is_lsi(xsrc, lisn)) {
983         args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
984     }
985 
986     if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
987         args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
988     } else {
989         args[1] = -1;
990     }
991 
992     if (xive_source_esb_has_2page(xsrc) &&
993         !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
994         args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
995     } else {
996         args[2] = -1;
997     }
998 
999     if (xive_source_esb_has_2page(xsrc)) {
1000         args[3] = xsrc->esb_shift - 1;
1001     } else {
1002         args[3] = xsrc->esb_shift;
1003     }
1004 
1005     return H_SUCCESS;
1006 }
1007 
1008 /*
1009  * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
1010  * Interrupt Source to a target. The Logical Interrupt Source is
1011  * designated with the "lisn" parameter and the target is designated
1012  * with the "target" and "priority" parameters.  Upon return from the
1013  * hcall(), no additional interrupts will be directed to the old EQ.
1014  *
1015  * Parameters:
1016  * Input:
1017  * - R4: "flags"
1018  *         Bits 0-61: Reserved
1019  *         Bit 62: set the "eisn" in the EAS
1020  *         Bit 63: masks the interrupt source in the hardware interrupt
1021  *       control structure. An interrupt masked by this mechanism will
1022  *       be dropped, but it's source state bits will still be
1023  *       set. There is no race-free way of unmasking and restoring the
1024  *       source. Thus this should only be used in interrupts that are
1025  *       also masked at the source, and only in cases where the
1026  *       interrupt is not meant to be used for a large amount of time
1027  *       because no valid target exists for it for example
1028  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1029  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1030  *       ibm,query-interrupt-source-number RTAS call, or as returned by
1031  *       the H_ALLOCATE_VAS_WINDOW hcall
1032  * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
1033  *       "ibm,ppc-interrupt-gserver#s"
1034  * - R7: "priority" is a valid priority not in
1035  *       "ibm,plat-res-int-priorities"
1036  * - R8: "eisn" is the guest EISN associated with the "lisn"
1037  *
1038  * Output:
1039  * - None
1040  */
1041 
1042 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
1043 #define SPAPR_XIVE_SRC_MASK     PPC_BIT(63)
1044 
h_int_set_source_config(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1045 static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
1046                                             SpaprMachineState *spapr,
1047                                             target_ulong opcode,
1048                                             target_ulong *args)
1049 {
1050     SpaprXive *xive = spapr->xive;
1051     XiveEAS eas, new_eas;
1052     target_ulong flags    = args[0];
1053     target_ulong lisn     = args[1];
1054     target_ulong target   = args[2];
1055     target_ulong priority = args[3];
1056     target_ulong eisn     = args[4];
1057     uint8_t end_blk;
1058     uint32_t end_idx;
1059 
1060     trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
1061 
1062     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1063         return H_FUNCTION;
1064     }
1065 
1066     if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
1067         return H_PARAMETER;
1068     }
1069 
1070     if (lisn >= xive->nr_irqs) {
1071         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1072                       lisn);
1073         return H_P2;
1074     }
1075 
1076     eas = xive->eat[lisn];
1077     if (!xive_eas_is_valid(&eas)) {
1078         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1079                       lisn);
1080         return H_P2;
1081     }
1082 
1083     /* priority 0xff is used to reset the EAS */
1084     if (priority == 0xff) {
1085         new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
1086         goto out;
1087     }
1088 
1089     if (flags & SPAPR_XIVE_SRC_MASK) {
1090         new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
1091     } else {
1092         new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
1093     }
1094 
1095     if (spapr_xive_priority_is_reserved(xive, priority)) {
1096         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1097                       " is reserved\n", priority);
1098         return H_P4;
1099     }
1100 
1101     /*
1102      * Validate that "target" is part of the list of threads allocated
1103      * to the partition. For that, find the END corresponding to the
1104      * target.
1105      */
1106     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1107         return H_P3;
1108     }
1109 
1110     new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
1111     new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
1112 
1113     if (flags & SPAPR_XIVE_SRC_SET_EISN) {
1114         new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
1115     }
1116 
1117     if (spapr_xive_in_kernel(xive)) {
1118         Error *local_err = NULL;
1119 
1120         kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
1121         if (local_err) {
1122             error_report_err(local_err);
1123             return H_HARDWARE;
1124         }
1125     }
1126 
1127 out:
1128     xive->eat[lisn] = new_eas;
1129     return H_SUCCESS;
1130 }
1131 
1132 /*
1133  * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
1134  * target/priority pair is assigned to the specified Logical Interrupt
1135  * Source.
1136  *
1137  * Parameters:
1138  * Input:
1139  * - R4: "flags"
1140  *         Bits 0-63 Reserved
1141  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1142  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1143  *       ibm,query-interrupt-source-number RTAS call, or as
1144  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1145  *
1146  * Output:
1147  * - R4: Target to which the specified Logical Interrupt Source is
1148  *       assigned
1149  * - R5: Priority to which the specified Logical Interrupt Source is
1150  *       assigned
1151  * - R6: EISN for the specified Logical Interrupt Source (this will be
1152  *       equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
1153  */
h_int_get_source_config(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1154 static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
1155                                             SpaprMachineState *spapr,
1156                                             target_ulong opcode,
1157                                             target_ulong *args)
1158 {
1159     SpaprXive *xive = spapr->xive;
1160     target_ulong flags = args[0];
1161     target_ulong lisn = args[1];
1162     XiveEAS eas;
1163     XiveEND *end;
1164     uint8_t nvt_blk;
1165     uint32_t end_idx, nvt_idx;
1166 
1167     trace_spapr_xive_get_source_config(flags, lisn);
1168 
1169     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1170         return H_FUNCTION;
1171     }
1172 
1173     if (flags) {
1174         return H_PARAMETER;
1175     }
1176 
1177     if (lisn >= xive->nr_irqs) {
1178         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1179                       lisn);
1180         return H_P2;
1181     }
1182 
1183     eas = xive->eat[lisn];
1184     if (!xive_eas_is_valid(&eas)) {
1185         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1186                       lisn);
1187         return H_P2;
1188     }
1189 
1190     /* EAS_END_BLOCK is unused on sPAPR */
1191     end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
1192 
1193     assert(end_idx < xive->nr_ends);
1194     end = &xive->endt[end_idx];
1195 
1196     nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1197     nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1198     args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
1199 
1200     if (xive_eas_is_masked(&eas)) {
1201         args[1] = 0xff;
1202     } else {
1203         args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1204     }
1205 
1206     args[2] = xive_get_field64(EAS_END_DATA, eas.w);
1207 
1208     return H_SUCCESS;
1209 }
1210 
1211 /*
1212  * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
1213  * address of the notification management page associated with the
1214  * specified target and priority.
1215  *
1216  * Parameters:
1217  * Input:
1218  * - R4: "flags"
1219  *         Bits 0-63 Reserved
1220  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1221  *       "ibm,ppc-interrupt-gserver#s"
1222  * - R6: "priority" is a valid priority not in
1223  *       "ibm,plat-res-int-priorities"
1224  *
1225  * Output:
1226  * - R4: Logical real address of notification page
1227  * - R5: Power of 2 page size of the notification page
1228  */
h_int_get_queue_info(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1229 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
1230                                          SpaprMachineState *spapr,
1231                                          target_ulong opcode,
1232                                          target_ulong *args)
1233 {
1234     SpaprXive *xive = spapr->xive;
1235     XiveENDSource *end_xsrc = &xive->end_source;
1236     target_ulong flags = args[0];
1237     target_ulong target = args[1];
1238     target_ulong priority = args[2];
1239     XiveEND *end;
1240     uint8_t end_blk;
1241     uint32_t end_idx;
1242 
1243     trace_spapr_xive_get_queue_info(flags, target, priority);
1244 
1245     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1246         return H_FUNCTION;
1247     }
1248 
1249     if (flags) {
1250         return H_PARAMETER;
1251     }
1252 
1253     /*
1254      * H_STATE should be returned if a H_INT_RESET is in progress.
1255      * This is not needed when running the emulation under QEMU
1256      */
1257 
1258     if (spapr_xive_priority_is_reserved(xive, priority)) {
1259         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1260                       " is reserved\n", priority);
1261         return H_P3;
1262     }
1263 
1264     /*
1265      * Validate that "target" is part of the list of threads allocated
1266      * to the partition. For that, find the END corresponding to the
1267      * target.
1268      */
1269     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1270         return H_P2;
1271     }
1272 
1273     assert(end_idx < xive->nr_ends);
1274     end = &xive->endt[end_idx];
1275 
1276     args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
1277     if (xive_end_is_enqueue(end)) {
1278         args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1279     } else {
1280         args[1] = 0;
1281     }
1282 
1283     return H_SUCCESS;
1284 }
1285 
1286 /*
1287  * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
1288  * a given "target" and "priority".  It is also used to set the
1289  * notification config associated with the EQ.  An EQ size of 0 is
1290  * used to reset the EQ config for a given target and priority. If
1291  * resetting the EQ config, the END associated with the given "target"
1292  * and "priority" will be changed to disable queueing.
1293  *
1294  * Upon return from the hcall(), no additional interrupts will be
1295  * directed to the old EQ (if one was set). The old EQ (if one was
1296  * set) should be investigated for interrupts that occurred prior to
1297  * or during the hcall().
1298  *
1299  * Parameters:
1300  * Input:
1301  * - R4: "flags"
1302  *         Bits 0-62: Reserved
1303  *         Bit 63: Unconditional Notify (n) per the XIVE spec
1304  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1305  *       "ibm,ppc-interrupt-gserver#s"
1306  * - R6: "priority" is a valid priority not in
1307  *       "ibm,plat-res-int-priorities"
1308  * - R7: "eventQueue": The logical real address of the start of the EQ
1309  * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
1310  *
1311  * Output:
1312  * - None
1313  */
1314 
1315 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1316 
h_int_set_queue_config(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1317 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1318                                            SpaprMachineState *spapr,
1319                                            target_ulong opcode,
1320                                            target_ulong *args)
1321 {
1322     SpaprXive *xive = spapr->xive;
1323     target_ulong flags = args[0];
1324     target_ulong target = args[1];
1325     target_ulong priority = args[2];
1326     target_ulong qpage = args[3];
1327     target_ulong qsize = args[4];
1328     XiveEND end;
1329     uint8_t end_blk, nvt_blk;
1330     uint32_t end_idx, nvt_idx;
1331 
1332     trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
1333 
1334     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1335         return H_FUNCTION;
1336     }
1337 
1338     if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1339         return H_PARAMETER;
1340     }
1341 
1342     /*
1343      * H_STATE should be returned if a H_INT_RESET is in progress.
1344      * This is not needed when running the emulation under QEMU
1345      */
1346 
1347     if (spapr_xive_priority_is_reserved(xive, priority)) {
1348         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1349                       " is reserved\n", priority);
1350         return H_P3;
1351     }
1352 
1353     /*
1354      * Validate that "target" is part of the list of threads allocated
1355      * to the partition. For that, find the END corresponding to the
1356      * target.
1357      */
1358 
1359     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1360         return H_P2;
1361     }
1362 
1363     assert(end_idx < xive->nr_ends);
1364     memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1365 
1366     switch (qsize) {
1367     case 12:
1368     case 16:
1369     case 21:
1370     case 24:
1371         if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1372             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1373                           " is not naturally aligned with %" HWADDR_PRIx "\n",
1374                           qpage, (hwaddr)1 << qsize);
1375             return H_P4;
1376         }
1377         end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1378         end.w3 = cpu_to_be32(qpage & 0xffffffff);
1379         end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1380         end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1381         break;
1382     case 0:
1383         /* reset queue and disable queueing */
1384         spapr_xive_end_reset(&end);
1385         goto out;
1386 
1387     default:
1388         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1389                       qsize);
1390         return H_P5;
1391     }
1392 
1393     if (qsize) {
1394         hwaddr plen = 1 << qsize;
1395         void *eq;
1396 
1397         /*
1398          * Validate the guest EQ. We should also check that the queue
1399          * has been zeroed by the OS.
1400          */
1401         eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1402                                MEMTXATTRS_UNSPECIFIED);
1403         if (plen != 1 << qsize) {
1404             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1405                           HWADDR_PRIx "\n", qpage);
1406             return H_P4;
1407         }
1408         address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1409     }
1410 
1411     /* "target" should have been validated above */
1412     if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1413         g_assert_not_reached();
1414     }
1415 
1416     /*
1417      * Ensure the priority and target are correctly set (they will not
1418      * be right after allocation)
1419      */
1420     end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1421         xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1422     end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1423 
1424     if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1425         end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1426     } else {
1427         end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1428     }
1429 
1430     /*
1431      * The generation bit for the END starts at 1 and The END page
1432      * offset counter starts at 0.
1433      */
1434     end.w1 = cpu_to_be32(END_W1_GENERATION) |
1435         xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1436     end.w0 |= cpu_to_be32(END_W0_VALID);
1437 
1438     /*
1439      * TODO: issue syncs required to ensure all in-flight interrupts
1440      * are complete on the old END
1441      */
1442 
1443 out:
1444     if (spapr_xive_in_kernel(xive)) {
1445         Error *local_err = NULL;
1446 
1447         kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1448         if (local_err) {
1449             error_report_err(local_err);
1450             return H_HARDWARE;
1451         }
1452     }
1453 
1454     /* Update END */
1455     memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1456     return H_SUCCESS;
1457 }
1458 
1459 /*
1460  * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
1461  * target and priority.
1462  *
1463  * Parameters:
1464  * Input:
1465  * - R4: "flags"
1466  *         Bits 0-62: Reserved
1467  *         Bit 63: Debug: Return debug data
1468  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1469  *       "ibm,ppc-interrupt-gserver#s"
1470  * - R6: "priority" is a valid priority not in
1471  *       "ibm,plat-res-int-priorities"
1472  *
1473  * Output:
1474  * - R4: "flags":
1475  *       Bits 0-61: Reserved
1476  *       Bit 62: The value of Event Queue Generation Number (g) per
1477  *              the XIVE spec if "Debug" = 1
1478  *       Bit 63: The value of Unconditional Notify (n) per the XIVE spec
1479  * - R5: The logical real address of the start of the EQ
1480  * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
1481  * - R7: The value of Event Queue Offset Counter per XIVE spec
1482  *       if "Debug" = 1, else 0
1483  *
1484  */
1485 
1486 #define SPAPR_XIVE_END_DEBUG     PPC_BIT(63)
1487 
h_int_get_queue_config(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1488 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1489                                            SpaprMachineState *spapr,
1490                                            target_ulong opcode,
1491                                            target_ulong *args)
1492 {
1493     SpaprXive *xive = spapr->xive;
1494     target_ulong flags = args[0];
1495     target_ulong target = args[1];
1496     target_ulong priority = args[2];
1497     XiveEND *end;
1498     uint8_t end_blk;
1499     uint32_t end_idx;
1500 
1501     trace_spapr_xive_get_queue_config(flags, target, priority);
1502 
1503     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1504         return H_FUNCTION;
1505     }
1506 
1507     if (flags & ~SPAPR_XIVE_END_DEBUG) {
1508         return H_PARAMETER;
1509     }
1510 
1511     /*
1512      * H_STATE should be returned if a H_INT_RESET is in progress.
1513      * This is not needed when running the emulation under QEMU
1514      */
1515 
1516     if (spapr_xive_priority_is_reserved(xive, priority)) {
1517         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1518                       " is reserved\n", priority);
1519         return H_P3;
1520     }
1521 
1522     /*
1523      * Validate that "target" is part of the list of threads allocated
1524      * to the partition. For that, find the END corresponding to the
1525      * target.
1526      */
1527     if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1528         return H_P2;
1529     }
1530 
1531     assert(end_idx < xive->nr_ends);
1532     end = &xive->endt[end_idx];
1533 
1534     args[0] = 0;
1535     if (xive_end_is_notify(end)) {
1536         args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1537     }
1538 
1539     if (xive_end_is_enqueue(end)) {
1540         args[1] = xive_end_qaddr(end);
1541         args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1542     } else {
1543         args[1] = 0;
1544         args[2] = 0;
1545     }
1546 
1547     if (spapr_xive_in_kernel(xive)) {
1548         Error *local_err = NULL;
1549 
1550         kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1551         if (local_err) {
1552             error_report_err(local_err);
1553             return H_HARDWARE;
1554         }
1555     }
1556 
1557     /* TODO: do we need any locking on the END ? */
1558     if (flags & SPAPR_XIVE_END_DEBUG) {
1559         /* Load the event queue generation number into the return flags */
1560         args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1561 
1562         /* Load R7 with the event queue offset counter */
1563         args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1564     } else {
1565         args[3] = 0;
1566     }
1567 
1568     return H_SUCCESS;
1569 }
1570 
1571 /*
1572  * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
1573  * reporting cache line pair for the calling thread.  The reporting
1574  * cache lines will contain the OS interrupt context when the OS
1575  * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
1576  * interrupt. The reporting cache lines can be reset by inputting -1
1577  * in "reportingLine".  Issuing the CI store byte without reporting
1578  * cache lines registered will result in the data not being accessible
1579  * to the OS.
1580  *
1581  * Parameters:
1582  * Input:
1583  * - R4: "flags"
1584  *         Bits 0-63: Reserved
1585  * - R5: "reportingLine": The logical real address of the reporting cache
1586  *       line pair
1587  *
1588  * Output:
1589  * - None
1590  */
h_int_set_os_reporting_line(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1591 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1592                                                 SpaprMachineState *spapr,
1593                                                 target_ulong opcode,
1594                                                 target_ulong *args)
1595 {
1596     target_ulong flags   = args[0];
1597 
1598     trace_spapr_xive_set_os_reporting_line(flags);
1599 
1600     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1601         return H_FUNCTION;
1602     }
1603 
1604     /*
1605      * H_STATE should be returned if a H_INT_RESET is in progress.
1606      * This is not needed when running the emulation under QEMU
1607      */
1608 
1609     /* TODO: H_INT_SET_OS_REPORTING_LINE */
1610     return H_FUNCTION;
1611 }
1612 
1613 /*
1614  * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
1615  * real address of the reporting cache line pair set for the input
1616  * "target".  If no reporting cache line pair has been set, -1 is
1617  * returned.
1618  *
1619  * Parameters:
1620  * Input:
1621  * - R4: "flags"
1622  *         Bits 0-63: Reserved
1623  * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1624  *       "ibm,ppc-interrupt-gserver#s"
1625  * - R6: "reportingLine": The logical real address of the reporting
1626  *        cache line pair
1627  *
1628  * Output:
1629  * - R4: The logical real address of the reporting line if set, else -1
1630  */
h_int_get_os_reporting_line(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1631 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1632                                                 SpaprMachineState *spapr,
1633                                                 target_ulong opcode,
1634                                                 target_ulong *args)
1635 {
1636     target_ulong flags   = args[0];
1637 
1638     trace_spapr_xive_get_os_reporting_line(flags);
1639 
1640     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1641         return H_FUNCTION;
1642     }
1643 
1644     /*
1645      * H_STATE should be returned if a H_INT_RESET is in progress.
1646      * This is not needed when running the emulation under QEMU
1647      */
1648 
1649     /* TODO: H_INT_GET_OS_REPORTING_LINE */
1650     return H_FUNCTION;
1651 }
1652 
1653 /*
1654  * The H_INT_ESB hcall() is used to issue a load or store to the ESB
1655  * page for the input "lisn".  This hcall is only supported for LISNs
1656  * that have the ESB hcall flag set to 1 when returned from hcall()
1657  * H_INT_GET_SOURCE_INFO.
1658  *
1659  * Parameters:
1660  * Input:
1661  * - R4: "flags"
1662  *         Bits 0-62: Reserved
1663  *         bit 63: Store: Store=1, store operation, else load operation
1664  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1665  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1666  *       ibm,query-interrupt-source-number RTAS call, or as
1667  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1668  * - R6: "esbOffset" is the offset into the ESB page for the load or
1669  *       store operation
1670  * - R7: "storeData" is the data to write for a store operation
1671  *
1672  * Output:
1673  * - R4: The value of the load if load operation, else -1
1674  */
1675 
1676 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1677 
h_int_esb(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1678 static target_ulong h_int_esb(PowerPCCPU *cpu,
1679                               SpaprMachineState *spapr,
1680                               target_ulong opcode,
1681                               target_ulong *args)
1682 {
1683     SpaprXive *xive = spapr->xive;
1684     XiveEAS eas;
1685     target_ulong flags  = args[0];
1686     target_ulong lisn   = args[1];
1687     target_ulong offset = args[2];
1688     target_ulong data   = args[3];
1689     hwaddr mmio_addr;
1690     XiveSource *xsrc = &xive->source;
1691 
1692     trace_spapr_xive_esb(flags, lisn, offset, data);
1693 
1694     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1695         return H_FUNCTION;
1696     }
1697 
1698     if (flags & ~SPAPR_XIVE_ESB_STORE) {
1699         return H_PARAMETER;
1700     }
1701 
1702     if (lisn >= xive->nr_irqs) {
1703         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1704                       lisn);
1705         return H_P2;
1706     }
1707 
1708     eas = xive->eat[lisn];
1709     if (!xive_eas_is_valid(&eas)) {
1710         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1711                       lisn);
1712         return H_P2;
1713     }
1714 
1715     if (offset > (1ull << xsrc->esb_shift)) {
1716         return H_P3;
1717     }
1718 
1719     if (spapr_xive_in_kernel(xive)) {
1720         args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1721                                      flags & SPAPR_XIVE_ESB_STORE);
1722     } else {
1723         mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1724 
1725         if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1726                           (flags & SPAPR_XIVE_ESB_STORE),
1727                           MEMTXATTRS_UNSPECIFIED)) {
1728             qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1729                           HWADDR_PRIx "\n", mmio_addr);
1730             return H_HARDWARE;
1731         }
1732         args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1733     }
1734     return H_SUCCESS;
1735 }
1736 
1737 /*
1738  * The H_INT_SYNC hcall() is used to issue hardware syncs that will
1739  * ensure any in flight events for the input lisn are in the event
1740  * queue.
1741  *
1742  * Parameters:
1743  * Input:
1744  * - R4: "flags"
1745  *         Bits 0-63: Reserved
1746  * - R5: "lisn" is per "interrupts", "interrupt-map", or
1747  *       "ibm,xive-lisn-ranges" properties, or as returned by the
1748  *       ibm,query-interrupt-source-number RTAS call, or as
1749  *       returned by the H_ALLOCATE_VAS_WINDOW hcall
1750  *
1751  * Output:
1752  * - None
1753  */
h_int_sync(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1754 static target_ulong h_int_sync(PowerPCCPU *cpu,
1755                                SpaprMachineState *spapr,
1756                                target_ulong opcode,
1757                                target_ulong *args)
1758 {
1759     SpaprXive *xive = spapr->xive;
1760     XiveEAS eas;
1761     target_ulong flags = args[0];
1762     target_ulong lisn = args[1];
1763 
1764     trace_spapr_xive_sync(flags, lisn);
1765 
1766     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1767         return H_FUNCTION;
1768     }
1769 
1770     if (flags) {
1771         return H_PARAMETER;
1772     }
1773 
1774     if (lisn >= xive->nr_irqs) {
1775         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1776                       lisn);
1777         return H_P2;
1778     }
1779 
1780     eas = xive->eat[lisn];
1781     if (!xive_eas_is_valid(&eas)) {
1782         qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1783                       lisn);
1784         return H_P2;
1785     }
1786 
1787     /*
1788      * H_STATE should be returned if a H_INT_RESET is in progress.
1789      * This is not needed when running the emulation under QEMU
1790      */
1791 
1792     /*
1793      * This is not real hardware. Nothing to be done unless when
1794      * under KVM
1795      */
1796 
1797     if (spapr_xive_in_kernel(xive)) {
1798         Error *local_err = NULL;
1799 
1800         kvmppc_xive_sync_source(xive, lisn, &local_err);
1801         if (local_err) {
1802             error_report_err(local_err);
1803             return H_HARDWARE;
1804         }
1805     }
1806     return H_SUCCESS;
1807 }
1808 
1809 /*
1810  * The H_INT_RESET hcall() is used to reset all of the partition's
1811  * interrupt exploitation structures to their initial state.  This
1812  * means losing all previously set interrupt state set via
1813  * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
1814  *
1815  * Parameters:
1816  * Input:
1817  * - R4: "flags"
1818  *         Bits 0-63: Reserved
1819  *
1820  * Output:
1821  * - None
1822  */
h_int_reset(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1823 static target_ulong h_int_reset(PowerPCCPU *cpu,
1824                                 SpaprMachineState *spapr,
1825                                 target_ulong opcode,
1826                                 target_ulong *args)
1827 {
1828     SpaprXive *xive = spapr->xive;
1829     target_ulong flags   = args[0];
1830 
1831     trace_spapr_xive_reset(flags);
1832 
1833     if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1834         return H_FUNCTION;
1835     }
1836 
1837     if (flags) {
1838         return H_PARAMETER;
1839     }
1840 
1841     device_cold_reset(DEVICE(xive));
1842 
1843     if (spapr_xive_in_kernel(xive)) {
1844         Error *local_err = NULL;
1845 
1846         kvmppc_xive_reset(xive, &local_err);
1847         if (local_err) {
1848             error_report_err(local_err);
1849             return H_HARDWARE;
1850         }
1851     }
1852     return H_SUCCESS;
1853 }
1854 
spapr_xive_hcall_init(SpaprMachineState * spapr)1855 void spapr_xive_hcall_init(SpaprMachineState *spapr)
1856 {
1857     spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1858     spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1859     spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1860     spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1861     spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1862     spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1863     spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1864                              h_int_set_os_reporting_line);
1865     spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1866                              h_int_get_os_reporting_line);
1867     spapr_register_hypercall(H_INT_ESB, h_int_esb);
1868     spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1869     spapr_register_hypercall(H_INT_RESET, h_int_reset);
1870 }
1871