1 /*
2 * QEMU PowerPC XIVE interrupt controller model
3 *
4 *
5 * The POWER9 processor comes with a new interrupt controller, called
6 * XIVE as "eXternal Interrupt Virtualization Engine".
7 *
8 * = Overall architecture
9 *
10 *
11 * XIVE Interrupt Controller
12 * +------------------------------------+ IPIs
13 * | +---------+ +---------+ +--------+ | +-------+
14 * | |VC | |CQ | |PC |----> | CORES |
15 * | | esb | | | | |----> | |
16 * | | eas | | Bridge | | tctx |----> | |
17 * | |SC end | | | | nvt | | | |
18 * +------+ | +---------+ +----+----+ +--------+ | +-+-+-+-+
19 * | RAM | +------------------|-----------------+ | | |
20 * | | | | | |
21 * | | | | | |
22 * | | +--------------------v------------------------v-v-v--+ other
23 * | <--+ Power Bus +--> chips
24 * | esb | +---------+-----------------------+------------------+
25 * | eas | | |
26 * | end | +--|------+ |
27 * | nvt | +----+----+ | +----+----+
28 * +------+ |SC | | |SC |
29 * | | | | |
30 * | PQ-bits | | | PQ-bits |
31 * | local |-+ | in VC |
32 * +---------+ +---------+
33 * PCIe NX,NPU,CAPI
34 *
35 * SC: Source Controller (aka. IVSE)
36 * VC: Virtualization Controller (aka. IVRE)
37 * PC: Presentation Controller (aka. IVPE)
38 * CQ: Common Queue (Bridge)
39 *
40 * PQ-bits: 2 bits source state machine (P:pending Q:queued)
41 * esb: Event State Buffer (Array of PQ bits in an IVSE)
42 * eas: Event Assignment Structure
43 * end: Event Notification Descriptor
44 * nvt: Notification Virtual Target
45 * tctx: Thread interrupt Context
46 *
47 *
48 * The XIVE IC is composed of three sub-engines :
49 *
50 * - Interrupt Virtualization Source Engine (IVSE), or Source
51 * Controller (SC). These are found in PCI PHBs, in the PSI host
52 * bridge controller, but also inside the main controller for the
53 * core IPIs and other sub-chips (NX, CAP, NPU) of the
54 * chip/processor. They are configured to feed the IVRE with events.
55 *
56 * - Interrupt Virtualization Routing Engine (IVRE) or Virtualization
57 * Controller (VC). Its job is to match an event source with an
58 * Event Notification Descriptor (END).
59 *
60 * - Interrupt Virtualization Presentation Engine (IVPE) or
61 * Presentation Controller (PC). It maintains the interrupt context
62 * state of each thread and handles the delivery of the external
63 * exception to the thread.
64 *
65 * In XIVE 1.0, the sub-engines used to be referred as:
66 *
67 * SC Source Controller
68 * VC Virtualization Controller
69 * PC Presentation Controller
70 * CQ Common Queue (PowerBUS Bridge)
71 *
72 *
73 * = XIVE internal tables
74 *
75 * Each of the sub-engines uses a set of tables to redirect exceptions
76 * from event sources to CPU threads.
77 *
78 * +-------+
79 * User or OS | EQ |
80 * or +------>|entries|
81 * Hypervisor | | .. |
82 * Memory | +-------+
83 * | ^
84 * | |
85 * +-------------------------------------------------+
86 * | |
87 * Hypervisor +------+ +---+--+ +---+--+ +------+
88 * Memory | ESB | | EAT | | ENDT | | NVTT |
89 * (skiboot) +----+-+ +----+-+ +----+-+ +------+
90 * ^ | ^ | ^ | ^
91 * | | | | | | |
92 * +-------------------------------------------------+
93 * | | | | | | |
94 * | | | | | | |
95 * +----|--|--------|--|--------|--|-+ +-|-----+ +------+
96 * | | | | | | | | | | tctx| |Thread|
97 * IPI or --> | + v + v + v |---| + .. |-----> |
98 * HW events --> | | | | | |
99 * IVSE | IVRE | | IVPE | +------+
100 * +---------------------------------+ +-------+
101 *
102 *
103 *
104 * The IVSE have a 2-bits state machine, P for pending and Q for queued,
105 * for each source that allows events to be triggered. They are stored in
106 * an Event State Buffer (ESB) array and can be controlled by MMIOs.
107 *
108 * If the event is let through, the IVRE looks up in the Event Assignment
109 * Structure (EAS) table for an Event Notification Descriptor (END)
110 * configured for the source. Each Event Notification Descriptor defines
111 * a notification path to a CPU and an in-memory Event Queue, in which
112 * will be enqueued an EQ data for the OS to pull.
113 *
114 * The IVPE determines if a Notification Virtual Target (NVT) can
115 * handle the event by scanning the thread contexts of the VCPUs
116 * dispatched on the processor HW threads. It maintains the state of
117 * the thread interrupt context (TCTX) of each thread in a NVT table.
118 *
119 * = Acronyms
120 *
121 * Description In XIVE 1.0, used to be referred as
122 *
123 * EAS Event Assignment Structure IVE Interrupt Virt. Entry
124 * EAT Event Assignment Table IVT Interrupt Virt. Table
125 * ENDT Event Notif. Descriptor Table EQDT Event Queue Desc. Table
126 * EQ Event Queue same
127 * ESB Event State Buffer SBE State Bit Entry
128 * NVT Notif. Virtual Target VPD Virtual Processor Desc.
129 * NVTT Notif. Virtual Target Table VPDT Virtual Processor Desc. Table
130 * TCTX Thread interrupt Context
131 *
132 *
133 * Copyright (c) 2017-2024, IBM Corporation.
134 *
135 * SPDX-License-Identifier: GPL-2.0-or-later
136 */
137
138 #ifndef PPC_XIVE_H
139 #define PPC_XIVE_H
140
141 #include "system/kvm.h"
142 #include "hw/sysbus.h"
143 #include "hw/ppc/xive_regs.h"
144 #include "qom/object.h"
145
146 /*
147 * XIVE Notifier (Interface between Source and Router)
148 */
149
150 typedef struct XiveNotifier XiveNotifier;
151
152 #define TYPE_XIVE_NOTIFIER "xive-notifier"
153 #define XIVE_NOTIFIER(obj) \
154 INTERFACE_CHECK(XiveNotifier, (obj), TYPE_XIVE_NOTIFIER)
155 typedef struct XiveNotifierClass XiveNotifierClass;
156 DECLARE_CLASS_CHECKERS(XiveNotifierClass, XIVE_NOTIFIER,
157 TYPE_XIVE_NOTIFIER)
158
159 struct XiveNotifierClass {
160 InterfaceClass parent;
161 void (*notify)(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
162 };
163
164 /*
165 * XIVE Interrupt Source
166 */
167
168 #define TYPE_XIVE_SOURCE "xive-source"
169 OBJECT_DECLARE_SIMPLE_TYPE(XiveSource, XIVE_SOURCE)
170
171 /*
172 * XIVE Interrupt Source characteristics, which define how the ESB are
173 * controlled.
174 */
175 #define XIVE_SRC_H_INT_ESB 0x1 /* ESB managed with hcall H_INT_ESB */
176 #define XIVE_SRC_STORE_EOI 0x2 /* Store EOI supported */
177 #define XIVE_SRC_PQ_DISABLE 0x4 /* Disable check on the PQ state bits */
178
179 struct XiveSource {
180 DeviceState parent;
181
182 /* IRQs */
183 uint32_t nr_irqs;
184 unsigned long *lsi_map;
185
186 /* PQ bits and LSI assertion bit */
187 uint8_t *status;
188 uint8_t reset_pq; /* PQ state on reset */
189
190 /* ESB memory region */
191 uint64_t esb_flags;
192 uint32_t esb_shift;
193 MemoryRegion esb_mmio;
194 MemoryRegion esb_mmio_emulated;
195
196 /* KVM support */
197 void *esb_mmap;
198 MemoryRegion esb_mmio_kvm;
199
200 XiveNotifier *xive;
201 };
202
203 /*
204 * ESB MMIO setting. Can be one page, for both source triggering and
205 * source management, or two different pages. See below for magic
206 * values.
207 */
208 #define XIVE_ESB_4K 12 /* PSI HB only */
209 #define XIVE_ESB_4K_2PAGE 13
210 #define XIVE_ESB_64K 16
211 #define XIVE_ESB_64K_2PAGE 17
212
xive_source_esb_has_2page(XiveSource * xsrc)213 static inline bool xive_source_esb_has_2page(XiveSource *xsrc)
214 {
215 return xsrc->esb_shift == XIVE_ESB_64K_2PAGE ||
216 xsrc->esb_shift == XIVE_ESB_4K_2PAGE;
217 }
218
xive_source_esb_len(XiveSource * xsrc)219 static inline uint64_t xive_source_esb_len(XiveSource *xsrc)
220 {
221 return (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
222 }
223
224 /* The trigger page is always the first/even page */
xive_source_esb_page(XiveSource * xsrc,uint32_t srcno)225 static inline hwaddr xive_source_esb_page(XiveSource *xsrc, uint32_t srcno)
226 {
227 assert(srcno < xsrc->nr_irqs);
228 return (1ull << xsrc->esb_shift) * srcno;
229 }
230
231 /* In a two pages ESB MMIO setting, the odd page is for management */
xive_source_esb_mgmt(XiveSource * xsrc,int srcno)232 static inline hwaddr xive_source_esb_mgmt(XiveSource *xsrc, int srcno)
233 {
234 hwaddr addr = xive_source_esb_page(xsrc, srcno);
235
236 if (xive_source_esb_has_2page(xsrc)) {
237 addr += (1 << (xsrc->esb_shift - 1));
238 }
239
240 return addr;
241 }
242
243 /*
244 * Each interrupt source has a 2-bit state machine which can be
245 * controlled by MMIO. P indicates that an interrupt is pending (has
246 * been sent to a queue and is waiting for an EOI). Q indicates that
247 * the interrupt has been triggered while pending.
248 *
249 * This acts as a coalescing mechanism in order to guarantee that a
250 * given interrupt only occurs at most once in a queue.
251 *
252 * When doing an EOI, the Q bit will indicate if the interrupt
253 * needs to be re-triggered.
254 */
255 #define XIVE_STATUS_ASSERTED 0x4 /* Extra bit for LSI */
256 #define XIVE_ESB_VAL_P 0x2
257 #define XIVE_ESB_VAL_Q 0x1
258
259 #define XIVE_ESB_RESET 0x0
260 #define XIVE_ESB_PENDING XIVE_ESB_VAL_P
261 #define XIVE_ESB_QUEUED (XIVE_ESB_VAL_P | XIVE_ESB_VAL_Q)
262 #define XIVE_ESB_OFF XIVE_ESB_VAL_Q
263
264 bool xive_esb_trigger(uint8_t *pq);
265 bool xive_esb_eoi(uint8_t *pq);
266 uint8_t xive_esb_set(uint8_t *pq, uint8_t value);
267
268 /*
269 * "magic" Event State Buffer (ESB) MMIO offsets.
270 *
271 * The following offsets into the ESB MMIO allow to read or manipulate
272 * the PQ bits. They must be used with an 8-byte load instruction.
273 * They all return the previous state of the interrupt (atomically).
274 *
275 * Additionally, some ESB pages support doing an EOI via a store and
276 * some ESBs support doing a trigger via a separate trigger page.
277 */
278 #define XIVE_ESB_STORE_EOI 0x400 /* Store */
279 #define XIVE_ESB_LOAD_EOI 0x000 /* Load */
280 #define XIVE_ESB_GET 0x800 /* Load */
281 #define XIVE_ESB_INJECT 0x800 /* Store */
282 #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
283 #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
284 #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
285 #define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
286
287 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno);
288 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq);
289
290 /*
291 * Source status helpers
292 */
xive_source_set_status(XiveSource * xsrc,uint32_t srcno,uint8_t status,bool enable)293 static inline void xive_source_set_status(XiveSource *xsrc, uint32_t srcno,
294 uint8_t status, bool enable)
295 {
296 if (enable) {
297 xsrc->status[srcno] |= status;
298 } else {
299 xsrc->status[srcno] &= ~status;
300 }
301 }
302
xive_source_set_asserted(XiveSource * xsrc,uint32_t srcno,bool enable)303 static inline void xive_source_set_asserted(XiveSource *xsrc, uint32_t srcno,
304 bool enable)
305 {
306 xive_source_set_status(xsrc, srcno, XIVE_STATUS_ASSERTED, enable);
307 }
308
xive_source_is_asserted(XiveSource * xsrc,uint32_t srcno)309 static inline bool xive_source_is_asserted(XiveSource *xsrc, uint32_t srcno)
310 {
311 return xsrc->status[srcno] & XIVE_STATUS_ASSERTED;
312 }
313
314 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset,
315 GString *buf);
316
xive_source_irq_is_lsi(XiveSource * xsrc,uint32_t srcno)317 static inline bool xive_source_irq_is_lsi(XiveSource *xsrc, uint32_t srcno)
318 {
319 assert(srcno < xsrc->nr_irqs);
320 return test_bit(srcno, xsrc->lsi_map);
321 }
322
xive_source_irq_set_lsi(XiveSource * xsrc,uint32_t srcno)323 static inline void xive_source_irq_set_lsi(XiveSource *xsrc, uint32_t srcno)
324 {
325 assert(srcno < xsrc->nr_irqs);
326 bitmap_set(xsrc->lsi_map, srcno, 1);
327 }
328
329 void xive_source_set_irq(void *opaque, int srcno, int val);
330
331 /*
332 * XIVE Thread interrupt Management (TM) context
333 */
334
335 #define TYPE_XIVE_TCTX "xive-tctx"
336 OBJECT_DECLARE_SIMPLE_TYPE(XiveTCTX, XIVE_TCTX)
337
338 /*
339 * XIVE Thread interrupt Management register rings :
340 *
341 * QW-0 User event-based exception state
342 * QW-1 O/S OS context for priority management, interrupt acks
343 * QW-2 Pool hypervisor pool context for virtual processors dispatched
344 * QW-3 Physical physical thread context and security context
345 */
346 #define XIVE_TM_RING_COUNT 4
347 #define XIVE_TM_RING_SIZE 0x10
348
349 typedef struct XivePresenter XivePresenter;
350
351 struct XiveTCTX {
352 DeviceState parent_obj;
353
354 CPUState *cs;
355 qemu_irq hv_output;
356 qemu_irq os_output;
357
358 uint8_t regs[XIVE_TM_RING_COUNT * XIVE_TM_RING_SIZE];
359
360 XivePresenter *xptr;
361 };
362
xive_tctx_word2(uint8_t * ring)363 static inline uint32_t xive_tctx_word2(uint8_t *ring)
364 {
365 return *((uint32_t *) &ring[TM_WORD2]);
366 }
367
368 /*
369 * XIVE Router
370 */
371 typedef struct XiveFabric XiveFabric;
372
373 struct XiveRouter {
374 SysBusDevice parent;
375
376 XiveFabric *xfb;
377 };
378
379 #define TYPE_XIVE_ROUTER "xive-router"
380 OBJECT_DECLARE_TYPE(XiveRouter, XiveRouterClass,
381 XIVE_ROUTER)
382
383 struct XiveRouterClass {
384 SysBusDeviceClass parent;
385
386 /* XIVE table accessors */
387 int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
388 XiveEAS *eas);
389 int (*get_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
390 uint8_t *pq);
391 int (*set_pq)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
392 uint8_t *pq);
393 int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
394 XiveEND *end);
395 int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
396 XiveEND *end, uint8_t word_number);
397 int (*get_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
398 XiveNVT *nvt);
399 int (*write_nvt)(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
400 XiveNVT *nvt, uint8_t word_number);
401 uint8_t (*get_block_id)(XiveRouter *xrtr);
402 void (*end_notify)(XiveRouter *xrtr, XiveEAS *eas);
403 };
404
405 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
406 XiveEAS *eas);
407 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
408 XiveEND *end);
409 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
410 XiveEND *end, uint8_t word_number);
411 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
412 XiveNVT *nvt);
413 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
414 XiveNVT *nvt, uint8_t word_number);
415 void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
416 void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas);
417
418 /*
419 * XIVE Presenter
420 */
421
422 typedef struct XiveTCTXMatch {
423 XiveTCTX *tctx;
424 uint8_t ring;
425 bool precluded;
426 } XiveTCTXMatch;
427
428 #define TYPE_XIVE_PRESENTER "xive-presenter"
429 #define XIVE_PRESENTER(obj) \
430 INTERFACE_CHECK(XivePresenter, (obj), TYPE_XIVE_PRESENTER)
431 typedef struct XivePresenterClass XivePresenterClass;
432 DECLARE_CLASS_CHECKERS(XivePresenterClass, XIVE_PRESENTER,
433 TYPE_XIVE_PRESENTER)
434
435 #define XIVE_PRESENTER_GEN1_TIMA_OS 0x1
436
437 struct XivePresenterClass {
438 InterfaceClass parent;
439 int (*match_nvt)(XivePresenter *xptr, uint8_t format,
440 uint8_t nvt_blk, uint32_t nvt_idx,
441 bool crowd, bool cam_ignore, uint8_t priority,
442 uint32_t logic_serv, XiveTCTXMatch *match);
443 bool (*in_kernel)(const XivePresenter *xptr);
444 uint32_t (*get_config)(XivePresenter *xptr);
445 int (*broadcast)(XivePresenter *xptr,
446 uint8_t nvt_blk, uint32_t nvt_idx,
447 bool crowd, bool cam_ignore, uint8_t priority);
448 };
449
450 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
451 uint8_t format,
452 uint8_t nvt_blk, uint32_t nvt_idx,
453 bool cam_ignore, uint32_t logic_serv);
454 bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
455 uint8_t nvt_blk, uint32_t nvt_idx,
456 bool crowd, bool cam_ignore, uint8_t priority,
457 uint32_t logic_serv, bool *precluded);
458
459 uint32_t xive_get_vpgroup_size(uint32_t nvp_index);
460
461 /*
462 * XIVE Fabric (Interface between Interrupt Controller and Machine)
463 */
464
465 #define TYPE_XIVE_FABRIC "xive-fabric"
466 #define XIVE_FABRIC(obj) \
467 INTERFACE_CHECK(XiveFabric, (obj), TYPE_XIVE_FABRIC)
468 typedef struct XiveFabricClass XiveFabricClass;
469 DECLARE_CLASS_CHECKERS(XiveFabricClass, XIVE_FABRIC,
470 TYPE_XIVE_FABRIC)
471
472 struct XiveFabricClass {
473 InterfaceClass parent;
474 int (*match_nvt)(XiveFabric *xfb, uint8_t format,
475 uint8_t nvt_blk, uint32_t nvt_idx,
476 bool crowd, bool cam_ignore, uint8_t priority,
477 uint32_t logic_serv, XiveTCTXMatch *match);
478 int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx,
479 bool crowd, bool cam_ignore, uint8_t priority);
480 };
481
482 /*
483 * XIVE END ESBs
484 */
485
486 #define TYPE_XIVE_END_SOURCE "xive-end-source"
487 OBJECT_DECLARE_SIMPLE_TYPE(XiveENDSource, XIVE_END_SOURCE)
488
489 struct XiveENDSource {
490 DeviceState parent;
491
492 uint32_t nr_ends;
493
494 /* ESB memory region */
495 uint32_t esb_shift;
496 MemoryRegion esb_mmio;
497
498 XiveRouter *xrtr;
499 };
500
501 /*
502 * For legacy compatibility, the exceptions define up to 256 different
503 * priorities. P9 implements only 9 levels : 8 active levels [0 - 7]
504 * and the least favored level 0xFF.
505 */
506 #define XIVE_PRIORITY_MAX 7
507
508 /*
509 * Convert a priority number to an Interrupt Pending Buffer (IPB)
510 * register, which indicates a pending interrupt at the priority
511 * corresponding to the bit number
512 */
xive_priority_to_ipb(uint8_t priority)513 static inline uint8_t xive_priority_to_ipb(uint8_t priority)
514 {
515 return priority > XIVE_PRIORITY_MAX ?
516 0 : 1 << (XIVE_PRIORITY_MAX - priority);
517 }
518
xive_priority_to_pipr(uint8_t priority)519 static inline uint8_t xive_priority_to_pipr(uint8_t priority)
520 {
521 return priority > XIVE_PRIORITY_MAX ? 0xFF : priority;
522 }
523
524 /*
525 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
526 * Interrupt Priority Register (PIPR), which contains the priority of
527 * the most favored pending notification.
528 */
xive_ipb_to_pipr(uint8_t ibp)529 static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
530 {
531 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
532 }
533
534 /*
535 * XIVE Thread Interrupt Management Aera (TIMA)
536 *
537 * This region gives access to the registers of the thread interrupt
538 * management context. It is four page wide, each page providing a
539 * different view of the registers. The page with the lower offset is
540 * the most privileged and gives access to the entire context.
541 */
542 #define XIVE_TM_HW_PAGE 0x0
543 #define XIVE_TM_HV_PAGE 0x1
544 #define XIVE_TM_OS_PAGE 0x2
545 #define XIVE_TM_USER_PAGE 0x3
546
547 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
548 uint64_t value, unsigned size);
549 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
550 unsigned size);
551
552 void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf);
553 Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
554 void xive_tctx_reset(XiveTCTX *tctx);
555 void xive_tctx_destroy(XiveTCTX *tctx);
556 void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
557 uint8_t group_level);
558 void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring);
559 void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level);
560
561 /*
562 * KVM XIVE device helpers
563 */
564
565 int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp);
566 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val);
567 int kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp);
568 int kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp);
569 int kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp);
570 int kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp);
571
572 #endif /* PPC_XIVE_H */
573