1 /*
2 * QEMU PowerPC XIVE interrupt controller model
3 *
4 * Copyright (c) 2017-2018, IBM Corporation.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/module.h"
12 #include "qapi/error.h"
13 #include "target/ppc/cpu.h"
14 #include "system/cpus.h"
15 #include "system/dma.h"
16 #include "system/reset.h"
17 #include "hw/qdev-properties.h"
18 #include "migration/vmstate.h"
19 #include "hw/irq.h"
20 #include "hw/ppc/xive.h"
21 #include "hw/ppc/xive2.h"
22 #include "hw/ppc/xive_regs.h"
23 #include "trace.h"
24
25 /*
26 * XIVE Thread Interrupt Management context
27 */
28
xive_tctx_output(XiveTCTX * tctx,uint8_t ring)29 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
30 {
31 switch (ring) {
32 case TM_QW0_USER:
33 return 0; /* Not supported */
34 case TM_QW1_OS:
35 return tctx->os_output;
36 case TM_QW2_HV_POOL:
37 case TM_QW3_HV_PHYS:
38 return tctx->hv_output;
39 default:
40 return 0;
41 }
42 }
43
xive_tctx_accept(XiveTCTX * tctx,uint8_t ring)44 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
45 {
46 uint8_t *regs = &tctx->regs[ring];
47 uint8_t nsr = regs[TM_NSR];
48
49 qemu_irq_lower(xive_tctx_output(tctx, ring));
50
51 if (regs[TM_NSR] != 0) {
52 uint8_t cppr = regs[TM_PIPR];
53 uint8_t alt_ring;
54 uint8_t *alt_regs;
55
56 /* POOL interrupt uses IPB in QW2, POOL ring */
57 if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) {
58 alt_ring = TM_QW2_HV_POOL;
59 } else {
60 alt_ring = ring;
61 }
62 alt_regs = &tctx->regs[alt_ring];
63
64 regs[TM_CPPR] = cppr;
65
66 /*
67 * If the interrupt was for a specific VP, reset the pending
68 * buffer bit, otherwise clear the logical server indicator
69 */
70 if (regs[TM_NSR] & TM_NSR_GRP_LVL) {
71 regs[TM_NSR] &= ~TM_NSR_GRP_LVL;
72 } else {
73 alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
74 }
75
76 /* Drop the exception bit and any group/crowd */
77 regs[TM_NSR] = 0;
78
79 trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring,
80 alt_regs[TM_IPB], regs[TM_PIPR],
81 regs[TM_CPPR], regs[TM_NSR]);
82 }
83
84 return ((uint64_t)nsr << 8) | regs[TM_CPPR];
85 }
86
xive_tctx_notify(XiveTCTX * tctx,uint8_t ring,uint8_t group_level)87 void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level)
88 {
89 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
90 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
91 uint8_t *alt_regs = &tctx->regs[alt_ring];
92 uint8_t *regs = &tctx->regs[ring];
93
94 if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) {
95 switch (ring) {
96 case TM_QW1_OS:
97 regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
98 break;
99 case TM_QW2_HV_POOL:
100 alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
101 break;
102 case TM_QW3_HV_PHYS:
103 regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
104 break;
105 default:
106 g_assert_not_reached();
107 }
108 trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
109 regs[TM_IPB], alt_regs[TM_PIPR],
110 alt_regs[TM_CPPR], alt_regs[TM_NSR]);
111 qemu_irq_raise(xive_tctx_output(tctx, ring));
112 }
113 }
114
xive_tctx_reset_signal(XiveTCTX * tctx,uint8_t ring)115 void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring)
116 {
117 /*
118 * Lower the External interrupt. Used when pulling a context. It is
119 * necessary to avoid catching it in the higher privilege context. It
120 * should be raised again when re-pushing the lower privilege context.
121 */
122 qemu_irq_lower(xive_tctx_output(tctx, ring));
123 }
124
xive_tctx_set_cppr(XiveTCTX * tctx,uint8_t ring,uint8_t cppr)125 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
126 {
127 uint8_t *regs = &tctx->regs[ring];
128 uint8_t pipr_min;
129 uint8_t ring_min;
130
131 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
132 regs[TM_IPB], regs[TM_PIPR],
133 cppr, regs[TM_NSR]);
134
135 if (cppr > XIVE_PRIORITY_MAX) {
136 cppr = 0xff;
137 }
138
139 tctx->regs[ring + TM_CPPR] = cppr;
140
141 /*
142 * Recompute the PIPR based on local pending interrupts. The PHYS
143 * ring must take the minimum of both the PHYS and POOL PIPR values.
144 */
145 pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
146 ring_min = ring;
147
148 /* PHYS updates also depend on POOL values */
149 if (ring == TM_QW3_HV_PHYS) {
150 uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL];
151
152 /* POOL values only matter if POOL ctx is valid */
153 if (pool_regs[TM_WORD2] & 0x80) {
154
155 uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]);
156
157 /*
158 * Determine highest priority interrupt and
159 * remember which ring has it.
160 */
161 if (pool_pipr < pipr_min) {
162 pipr_min = pool_pipr;
163 ring_min = TM_QW2_HV_POOL;
164 }
165 }
166 }
167
168 regs[TM_PIPR] = pipr_min;
169
170 /* CPPR has changed, check if we need to raise a pending exception */
171 xive_tctx_notify(tctx, ring_min, 0);
172 }
173
xive_tctx_pipr_update(XiveTCTX * tctx,uint8_t ring,uint8_t priority,uint8_t group_level)174 void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
175 uint8_t group_level)
176 {
177 /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
178 uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
179 uint8_t *alt_regs = &tctx->regs[alt_ring];
180 uint8_t *regs = &tctx->regs[ring];
181
182 if (group_level == 0) {
183 /* VP-specific */
184 regs[TM_IPB] |= xive_priority_to_ipb(priority);
185 alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
186 } else {
187 /* VP-group */
188 alt_regs[TM_PIPR] = xive_priority_to_pipr(priority);
189 }
190 xive_tctx_notify(tctx, ring, group_level);
191 }
192
193 /*
194 * XIVE Thread Interrupt Management Area (TIMA)
195 */
196
xive_tm_set_hv_cppr(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)197 static void xive_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
198 hwaddr offset, uint64_t value, unsigned size)
199 {
200 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
201 }
202
xive_tm_ack_hv_reg(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)203 static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
204 hwaddr offset, unsigned size)
205 {
206 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
207 }
208
xive_tm_pull_pool_ctx(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)209 static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
210 hwaddr offset, unsigned size)
211 {
212 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
213 uint32_t qw2w2;
214
215 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
216 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
217 return qw2w2;
218 }
219
xive_tm_pull_phys_ctx(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)220 static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
221 hwaddr offset, unsigned size)
222 {
223 uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
224 uint8_t qw3b8;
225
226 qw3b8 = qw3b8_prev & ~TM_QW3B8_VT;
227 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8;
228 return qw3b8;
229 }
230
xive_tm_vt_push(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)231 static void xive_tm_vt_push(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
232 uint64_t value, unsigned size)
233 {
234 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
235 }
236
xive_tm_vt_poll(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)237 static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
238 hwaddr offset, unsigned size)
239 {
240 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
241 }
242
243 /*
244 * Define an access map for each page of the TIMA that we will use in
245 * the memory region ops to filter values when doing loads and stores
246 * of raw registers values
247 *
248 * Registers accessibility bits :
249 *
250 * 0x0 - no access
251 * 0x1 - write only
252 * 0x2 - read only
253 * 0x3 - read/write
254 */
255
256 static const uint8_t xive_tm_hw_view[] = {
257 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
258 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
259 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
260 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
261 };
262
263 static const uint8_t xive_tm_hv_view[] = {
264 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
265 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
266 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
267 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
268 };
269
270 static const uint8_t xive_tm_os_view[] = {
271 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
272 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
273 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
274 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
275 };
276
277 static const uint8_t xive_tm_user_view[] = {
278 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
282 };
283
284 /*
285 * Overall TIMA access map for the thread interrupt management context
286 * registers
287 */
288 static const uint8_t *xive_tm_views[] = {
289 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
290 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
291 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
292 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
293 };
294
295 /*
296 * Computes a register access mask for a given offset in the TIMA
297 */
xive_tm_mask(hwaddr offset,unsigned size,bool write)298 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
299 {
300 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
301 uint8_t reg_offset = offset & TM_REG_OFFSET;
302 uint8_t reg_mask = write ? 0x1 : 0x2;
303 uint64_t mask = 0x0;
304 int i;
305
306 for (i = 0; i < size; i++) {
307 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
308 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
309 }
310 }
311
312 return mask;
313 }
314
xive_tm_raw_write(XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)315 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
316 unsigned size)
317 {
318 uint8_t ring_offset = offset & TM_RING_OFFSET;
319 uint8_t reg_offset = offset & TM_REG_OFFSET;
320 uint64_t mask = xive_tm_mask(offset, size, true);
321 int i;
322
323 /*
324 * Only 4 or 8 bytes stores are allowed and the User ring is
325 * excluded
326 */
327 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
328 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
329 HWADDR_PRIx"\n", offset);
330 return;
331 }
332
333 /*
334 * Use the register offset for the raw values and filter out
335 * reserved values
336 */
337 for (i = 0; i < size; i++) {
338 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
339 if (byte_mask) {
340 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
341 byte_mask;
342 }
343 }
344 }
345
xive_tm_raw_read(XiveTCTX * tctx,hwaddr offset,unsigned size)346 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
347 {
348 uint8_t ring_offset = offset & TM_RING_OFFSET;
349 uint8_t reg_offset = offset & TM_REG_OFFSET;
350 uint64_t mask = xive_tm_mask(offset, size, false);
351 uint64_t ret;
352 int i;
353
354 /*
355 * Only 4 or 8 bytes loads are allowed and the User ring is
356 * excluded
357 */
358 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
359 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
360 HWADDR_PRIx"\n", offset);
361 return -1;
362 }
363
364 /* Use the register offset for the raw values */
365 ret = 0;
366 for (i = 0; i < size; i++) {
367 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
368 }
369
370 /* filter out reserved values */
371 return ret & mask;
372 }
373
374 /*
375 * The TM context is mapped twice within each page. Stores and loads
376 * to the first mapping below 2K write and read the specified values
377 * without modification. The second mapping above 2K performs specific
378 * state changes (side effects) in addition to setting/returning the
379 * interrupt management area context of the processor thread.
380 */
xive_tm_ack_os_reg(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)381 static uint64_t xive_tm_ack_os_reg(XivePresenter *xptr, XiveTCTX *tctx,
382 hwaddr offset, unsigned size)
383 {
384 return xive_tctx_accept(tctx, TM_QW1_OS);
385 }
386
xive_tm_set_os_cppr(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)387 static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
388 hwaddr offset, uint64_t value, unsigned size)
389 {
390 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
391 }
392
xive_tctx_set_lgs(XiveTCTX * tctx,uint8_t ring,uint8_t lgs)393 static void xive_tctx_set_lgs(XiveTCTX *tctx, uint8_t ring, uint8_t lgs)
394 {
395 uint8_t *regs = &tctx->regs[ring];
396
397 regs[TM_LGS] = lgs;
398 }
399
xive_tm_set_os_lgs(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)400 static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx,
401 hwaddr offset, uint64_t value, unsigned size)
402 {
403 xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff);
404 }
405
406 /*
407 * Adjust the PIPR to allow a CPU to process event queues of other
408 * priorities during one physical interrupt cycle.
409 */
xive_tm_set_os_pending(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)410 static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
411 hwaddr offset, uint64_t value, unsigned size)
412 {
413 xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0);
414 }
415
xive_os_cam_decode(uint32_t cam,uint8_t * nvt_blk,uint32_t * nvt_idx,bool * vo)416 static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
417 uint32_t *nvt_idx, bool *vo)
418 {
419 if (nvt_blk) {
420 *nvt_blk = xive_nvt_blk(cam);
421 }
422 if (nvt_idx) {
423 *nvt_idx = xive_nvt_idx(cam);
424 }
425 if (vo) {
426 *vo = !!(cam & TM_QW1W2_VO);
427 }
428 }
429
xive_tctx_get_os_cam(XiveTCTX * tctx,uint8_t * nvt_blk,uint32_t * nvt_idx,bool * vo)430 static uint32_t xive_tctx_get_os_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
431 uint32_t *nvt_idx, bool *vo)
432 {
433 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
434 uint32_t cam = be32_to_cpu(qw1w2);
435
436 xive_os_cam_decode(cam, nvt_blk, nvt_idx, vo);
437 return qw1w2;
438 }
439
xive_tctx_set_os_cam(XiveTCTX * tctx,uint32_t qw1w2)440 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t qw1w2)
441 {
442 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
443 }
444
xive_tm_pull_os_ctx(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)445 static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
446 hwaddr offset, unsigned size)
447 {
448 uint32_t qw1w2;
449 uint32_t qw1w2_new;
450 uint8_t nvt_blk;
451 uint32_t nvt_idx;
452 bool vo;
453
454 qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
455
456 if (!vo) {
457 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
458 nvt_blk, nvt_idx);
459 }
460
461 /* Invalidate CAM line */
462 qw1w2_new = xive_set_field32(TM_QW1W2_VO, qw1w2, 0);
463 xive_tctx_set_os_cam(tctx, qw1w2_new);
464
465 xive_tctx_reset_signal(tctx, TM_QW1_OS);
466 return qw1w2;
467 }
468
xive_tctx_need_resend(XiveRouter * xrtr,XiveTCTX * tctx,uint8_t nvt_blk,uint32_t nvt_idx)469 static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
470 uint8_t nvt_blk, uint32_t nvt_idx)
471 {
472 XiveNVT nvt;
473 uint8_t ipb;
474
475 /*
476 * Grab the associated NVT to pull the pending bits, and merge
477 * them with the IPB of the thread interrupt context registers
478 */
479 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
480 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVT %x/%x\n",
481 nvt_blk, nvt_idx);
482 return;
483 }
484
485 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4);
486
487 if (ipb) {
488 /* Reset the NVT value */
489 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0);
490 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
491
492 uint8_t *regs = &tctx->regs[TM_QW1_OS];
493 regs[TM_IPB] |= ipb;
494 }
495
496 /*
497 * Always call xive_tctx_pipr_update(). Even if there were no
498 * escalation triggered, there could be a pending interrupt which
499 * was saved when the context was pulled and that we need to take
500 * into account by recalculating the PIPR (which is not
501 * saved/restored).
502 * It will also raise the External interrupt signal if needed.
503 */
504 xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */
505 }
506
507 /*
508 * Updating the OS CAM line can trigger a resend of interrupt
509 */
xive_tm_push_os_ctx(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)510 static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
511 hwaddr offset, uint64_t value, unsigned size)
512 {
513 uint32_t cam = value;
514 uint32_t qw1w2 = cpu_to_be32(cam);
515 uint8_t nvt_blk;
516 uint32_t nvt_idx;
517 bool vo;
518
519 xive_os_cam_decode(cam, &nvt_blk, &nvt_idx, &vo);
520
521 /* First update the registers */
522 xive_tctx_set_os_cam(tctx, qw1w2);
523
524 /* Check the interrupt pending bits */
525 if (vo) {
526 xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
527 }
528 }
529
xive_presenter_get_config(XivePresenter * xptr)530 static uint32_t xive_presenter_get_config(XivePresenter *xptr)
531 {
532 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
533
534 return xpc->get_config(xptr);
535 }
536
537 /*
538 * Define a mapping of "special" operations depending on the TIMA page
539 * offset and the size of the operation.
540 */
541 typedef struct XiveTmOp {
542 uint8_t page_offset;
543 uint32_t op_offset;
544 unsigned size;
545 void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
546 hwaddr offset,
547 uint64_t value, unsigned size);
548 uint64_t (*read_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
549 unsigned size);
550 } XiveTmOp;
551
552 static const XiveTmOp xive_tm_operations[] = {
553 /*
554 * MMIOs below 2K : raw values and special operations without side
555 * effects
556 */
557 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
558 NULL },
559 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx,
560 NULL },
561 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
562 NULL },
563 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
564 NULL },
565 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
566 xive_tm_vt_poll },
567
568 /* MMIOs above 2K : special operations with side effects */
569 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
570 xive_tm_ack_os_reg },
571 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
572 NULL },
573 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
574 xive_tm_pull_os_ctx },
575 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
576 xive_tm_pull_os_ctx },
577 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
578 xive_tm_ack_hv_reg },
579 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
580 xive_tm_pull_pool_ctx },
581 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
582 xive_tm_pull_pool_ctx },
583 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
584 xive_tm_pull_phys_ctx },
585 };
586
587 static const XiveTmOp xive2_tm_operations[] = {
588 /*
589 * MMIOs below 2K : raw values and special operations without side
590 * effects
591 */
592 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr,
593 NULL },
594 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx,
595 NULL },
596 { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx,
597 NULL },
598 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs,
599 NULL },
600 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr,
601 NULL },
602 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
603 NULL },
604 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
605 xive_tm_vt_poll },
606 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target,
607 NULL },
608
609 /* MMIOs above 2K : special operations with side effects */
610 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
611 xive_tm_ack_os_reg },
612 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
613 NULL },
614 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL,
615 xive2_tm_pull_os_ctx },
616 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
617 xive2_tm_pull_os_ctx },
618 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
619 xive2_tm_pull_os_ctx },
620 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
621 xive_tm_ack_hv_reg },
622 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL,
623 xive_tm_pull_pool_ctx },
624 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
625 xive_tm_pull_pool_ctx },
626 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
627 xive_tm_pull_pool_ctx },
628 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol,
629 NULL },
630 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL,
631 xive_tm_pull_phys_ctx },
632 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
633 xive_tm_pull_phys_ctx },
634 { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol,
635 NULL },
636 };
637
xive_tm_find_op(XivePresenter * xptr,hwaddr offset,unsigned size,bool write)638 static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,
639 unsigned size, bool write)
640 {
641 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
642 uint32_t op_offset = offset & TM_ADDRESS_MASK;
643 const XiveTmOp *tm_ops;
644 int i, tm_ops_count;
645 uint32_t cfg;
646
647 cfg = xive_presenter_get_config(xptr);
648 if (cfg & XIVE_PRESENTER_GEN1_TIMA_OS) {
649 tm_ops = xive_tm_operations;
650 tm_ops_count = ARRAY_SIZE(xive_tm_operations);
651 } else {
652 tm_ops = xive2_tm_operations;
653 tm_ops_count = ARRAY_SIZE(xive2_tm_operations);
654 }
655
656 for (i = 0; i < tm_ops_count; i++) {
657 const XiveTmOp *xto = &tm_ops[i];
658
659 /* Accesses done from a more privileged TIMA page is allowed */
660 if (xto->page_offset >= page_offset &&
661 xto->op_offset == op_offset &&
662 xto->size == size &&
663 ((write && xto->write_handler) || (!write && xto->read_handler))) {
664 return xto;
665 }
666 }
667 return NULL;
668 }
669
670 /*
671 * TIMA MMIO handlers
672 */
xive_tctx_tm_write(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,uint64_t value,unsigned size)673 void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
674 uint64_t value, unsigned size)
675 {
676 const XiveTmOp *xto;
677
678 trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value);
679
680 /*
681 * TODO: check V bit in Q[0-3]W2
682 */
683
684 /*
685 * First, check for special operations in the 2K region
686 */
687 if (offset & TM_SPECIAL_OP) {
688 xto = xive_tm_find_op(tctx->xptr, offset, size, true);
689 if (!xto) {
690 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
691 "@%"HWADDR_PRIx"\n", offset);
692 } else {
693 xto->write_handler(xptr, tctx, offset, value, size);
694 }
695 return;
696 }
697
698 /*
699 * Then, for special operations in the region below 2K.
700 */
701 xto = xive_tm_find_op(tctx->xptr, offset, size, true);
702 if (xto) {
703 xto->write_handler(xptr, tctx, offset, value, size);
704 return;
705 }
706
707 /*
708 * Finish with raw access to the register values
709 */
710 xive_tm_raw_write(tctx, offset, value, size);
711 }
712
xive_tctx_tm_read(XivePresenter * xptr,XiveTCTX * tctx,hwaddr offset,unsigned size)713 uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
714 unsigned size)
715 {
716 const XiveTmOp *xto;
717 uint64_t ret;
718
719 /*
720 * TODO: check V bit in Q[0-3]W2
721 */
722
723 /*
724 * First, check for special operations in the 2K region
725 */
726 if (offset & TM_SPECIAL_OP) {
727 xto = xive_tm_find_op(tctx->xptr, offset, size, false);
728 if (!xto) {
729 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
730 "@%"HWADDR_PRIx"\n", offset);
731 return -1;
732 }
733 ret = xto->read_handler(xptr, tctx, offset, size);
734 goto out;
735 }
736
737 /*
738 * Then, for special operations in the region below 2K.
739 */
740 xto = xive_tm_find_op(tctx->xptr, offset, size, false);
741 if (xto) {
742 ret = xto->read_handler(xptr, tctx, offset, size);
743 goto out;
744 }
745
746 /*
747 * Finish with raw access to the register values
748 */
749 ret = xive_tm_raw_read(tctx, offset, size);
750 out:
751 trace_xive_tctx_tm_read(tctx->cs->cpu_index, offset, size, ret);
752 return ret;
753 }
754
xive_tctx_ring_print(uint8_t * ring)755 static char *xive_tctx_ring_print(uint8_t *ring)
756 {
757 uint32_t w2 = xive_tctx_word2(ring);
758
759 return g_strdup_printf("%02x %02x %02x %02x %02x "
760 "%02x %02x %02x %08x",
761 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
762 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
763 be32_to_cpu(w2));
764 }
765
766 static const char * const xive_tctx_ring_names[] = {
767 "USER", "OS", "POOL", "PHYS",
768 };
769
770 /*
771 * kvm_irqchip_in_kernel() will cause the compiler to turn this
772 * info a nop if CONFIG_KVM isn't defined.
773 */
774 #define xive_in_kernel(xptr) \
775 (kvm_irqchip_in_kernel() && \
776 ({ \
777 XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); \
778 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
779 }))
780
xive_tctx_pic_print_info(XiveTCTX * tctx,GString * buf)781 void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf)
782 {
783 int cpu_index;
784 int i;
785
786 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
787 * are hot plugged or unplugged.
788 */
789 if (!tctx) {
790 return;
791 }
792
793 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
794
795 if (xive_in_kernel(tctx->xptr)) {
796 Error *local_err = NULL;
797
798 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
799 if (local_err) {
800 error_report_err(local_err);
801 return;
802 }
803 }
804
805 if (xive_presenter_get_config(tctx->xptr) & XIVE_PRESENTER_GEN1_TIMA_OS) {
806 g_string_append_printf(buf, "CPU[%04x]: "
807 "QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
808 " W2\n", cpu_index);
809 } else {
810 g_string_append_printf(buf, "CPU[%04x]: "
811 "QW NSR CPPR IPB LSMFB - LGS T PIPR"
812 " W2\n", cpu_index);
813 }
814
815 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
816 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
817 g_string_append_printf(buf, "CPU[%04x]: %4s %s\n",
818 cpu_index, xive_tctx_ring_names[i], s);
819 g_free(s);
820 }
821 }
822
xive_tctx_reset(XiveTCTX * tctx)823 void xive_tctx_reset(XiveTCTX *tctx)
824 {
825 memset(tctx->regs, 0, sizeof(tctx->regs));
826
827 /* Set some defaults */
828 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
829 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
830 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
831 if (!(xive_presenter_get_config(tctx->xptr) &
832 XIVE_PRESENTER_GEN1_TIMA_OS)) {
833 tctx->regs[TM_QW1_OS + TM_OGEN] = 2;
834 }
835
836 /*
837 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
838 * CPPR is first set.
839 */
840 tctx->regs[TM_QW1_OS + TM_PIPR] =
841 xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
842 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
843 xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
844 }
845
xive_tctx_realize(DeviceState * dev,Error ** errp)846 static void xive_tctx_realize(DeviceState *dev, Error **errp)
847 {
848 XiveTCTX *tctx = XIVE_TCTX(dev);
849 PowerPCCPU *cpu;
850 CPUPPCState *env;
851
852 assert(tctx->cs);
853 assert(tctx->xptr);
854
855 cpu = POWERPC_CPU(tctx->cs);
856 env = &cpu->env;
857 switch (PPC_INPUT(env)) {
858 case PPC_FLAGS_INPUT_POWER9:
859 tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT);
860 tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT);
861 break;
862
863 default:
864 error_setg(errp, "XIVE interrupt controller does not support "
865 "this CPU bus model");
866 return;
867 }
868
869 /* Connect the presenter to the VCPU (required for CPU hotplug) */
870 if (xive_in_kernel(tctx->xptr)) {
871 if (kvmppc_xive_cpu_connect(tctx, errp) < 0) {
872 return;
873 }
874 }
875 }
876
vmstate_xive_tctx_pre_save(void * opaque)877 static int vmstate_xive_tctx_pre_save(void *opaque)
878 {
879 XiveTCTX *tctx = XIVE_TCTX(opaque);
880 Error *local_err = NULL;
881 int ret;
882
883 if (xive_in_kernel(tctx->xptr)) {
884 ret = kvmppc_xive_cpu_get_state(tctx, &local_err);
885 if (ret < 0) {
886 error_report_err(local_err);
887 return ret;
888 }
889 }
890
891 return 0;
892 }
893
vmstate_xive_tctx_post_load(void * opaque,int version_id)894 static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
895 {
896 XiveTCTX *tctx = XIVE_TCTX(opaque);
897 Error *local_err = NULL;
898 int ret;
899
900 if (xive_in_kernel(tctx->xptr)) {
901 /*
902 * Required for hotplugged CPU, for which the state comes
903 * after all states of the machine.
904 */
905 ret = kvmppc_xive_cpu_set_state(tctx, &local_err);
906 if (ret < 0) {
907 error_report_err(local_err);
908 return ret;
909 }
910 }
911
912 return 0;
913 }
914
915 static const VMStateDescription vmstate_xive_tctx = {
916 .name = TYPE_XIVE_TCTX,
917 .version_id = 1,
918 .minimum_version_id = 1,
919 .pre_save = vmstate_xive_tctx_pre_save,
920 .post_load = vmstate_xive_tctx_post_load,
921 .fields = (const VMStateField[]) {
922 VMSTATE_BUFFER(regs, XiveTCTX),
923 VMSTATE_END_OF_LIST()
924 },
925 };
926
927 static const Property xive_tctx_properties[] = {
928 DEFINE_PROP_LINK("cpu", XiveTCTX, cs, TYPE_CPU, CPUState *),
929 DEFINE_PROP_LINK("presenter", XiveTCTX, xptr, TYPE_XIVE_PRESENTER,
930 XivePresenter *),
931 };
932
xive_tctx_class_init(ObjectClass * klass,const void * data)933 static void xive_tctx_class_init(ObjectClass *klass, const void *data)
934 {
935 DeviceClass *dc = DEVICE_CLASS(klass);
936
937 dc->desc = "XIVE Interrupt Thread Context";
938 dc->realize = xive_tctx_realize;
939 dc->vmsd = &vmstate_xive_tctx;
940 device_class_set_props(dc, xive_tctx_properties);
941 /*
942 * Reason: part of XIVE interrupt controller, needs to be wired up
943 * by xive_tctx_create().
944 */
945 dc->user_creatable = false;
946 }
947
948 static const TypeInfo xive_tctx_info = {
949 .name = TYPE_XIVE_TCTX,
950 .parent = TYPE_DEVICE,
951 .instance_size = sizeof(XiveTCTX),
952 .class_init = xive_tctx_class_init,
953 };
954
xive_tctx_create(Object * cpu,XivePresenter * xptr,Error ** errp)955 Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp)
956 {
957 Object *obj;
958
959 obj = object_new(TYPE_XIVE_TCTX);
960 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj);
961 object_unref(obj);
962 object_property_set_link(obj, "cpu", cpu, &error_abort);
963 object_property_set_link(obj, "presenter", OBJECT(xptr), &error_abort);
964 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
965 object_unparent(obj);
966 return NULL;
967 }
968 return obj;
969 }
970
xive_tctx_destroy(XiveTCTX * tctx)971 void xive_tctx_destroy(XiveTCTX *tctx)
972 {
973 Object *obj = OBJECT(tctx);
974
975 object_unparent(obj);
976 }
977
978 /*
979 * XIVE ESB helpers
980 */
981
xive_esb_set(uint8_t * pq,uint8_t value)982 uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
983 {
984 uint8_t old_pq = *pq & 0x3;
985
986 *pq &= ~0x3;
987 *pq |= value & 0x3;
988
989 return old_pq;
990 }
991
xive_esb_trigger(uint8_t * pq)992 bool xive_esb_trigger(uint8_t *pq)
993 {
994 uint8_t old_pq = *pq & 0x3;
995
996 switch (old_pq) {
997 case XIVE_ESB_RESET:
998 xive_esb_set(pq, XIVE_ESB_PENDING);
999 return true;
1000 case XIVE_ESB_PENDING:
1001 case XIVE_ESB_QUEUED:
1002 xive_esb_set(pq, XIVE_ESB_QUEUED);
1003 return false;
1004 case XIVE_ESB_OFF:
1005 xive_esb_set(pq, XIVE_ESB_OFF);
1006 return false;
1007 default:
1008 g_assert_not_reached();
1009 }
1010 }
1011
xive_esb_eoi(uint8_t * pq)1012 bool xive_esb_eoi(uint8_t *pq)
1013 {
1014 uint8_t old_pq = *pq & 0x3;
1015
1016 switch (old_pq) {
1017 case XIVE_ESB_RESET:
1018 case XIVE_ESB_PENDING:
1019 xive_esb_set(pq, XIVE_ESB_RESET);
1020 return false;
1021 case XIVE_ESB_QUEUED:
1022 xive_esb_set(pq, XIVE_ESB_PENDING);
1023 return true;
1024 case XIVE_ESB_OFF:
1025 xive_esb_set(pq, XIVE_ESB_OFF);
1026 return false;
1027 default:
1028 g_assert_not_reached();
1029 }
1030 }
1031
1032 /*
1033 * XIVE Interrupt Source (or IVSE)
1034 */
1035
xive_source_esb_get(XiveSource * xsrc,uint32_t srcno)1036 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
1037 {
1038 assert(srcno < xsrc->nr_irqs);
1039
1040 return xsrc->status[srcno] & 0x3;
1041 }
1042
xive_source_esb_set(XiveSource * xsrc,uint32_t srcno,uint8_t pq)1043 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
1044 {
1045 assert(srcno < xsrc->nr_irqs);
1046
1047 return xive_esb_set(&xsrc->status[srcno], pq);
1048 }
1049
1050 /*
1051 * Returns whether the event notification should be forwarded.
1052 */
xive_source_lsi_trigger(XiveSource * xsrc,uint32_t srcno)1053 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
1054 {
1055 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
1056
1057 xive_source_set_asserted(xsrc, srcno, true);
1058
1059 switch (old_pq) {
1060 case XIVE_ESB_RESET:
1061 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
1062 return true;
1063 default:
1064 return false;
1065 }
1066 }
1067
1068 /*
1069 * Sources can be configured with PQ offloading in which case the check
1070 * on the PQ state bits of MSIs is disabled
1071 */
xive_source_esb_disabled(XiveSource * xsrc,uint32_t srcno)1072 static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno)
1073 {
1074 return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) &&
1075 !xive_source_irq_is_lsi(xsrc, srcno);
1076 }
1077
1078 /*
1079 * Returns whether the event notification should be forwarded.
1080 */
xive_source_esb_trigger(XiveSource * xsrc,uint32_t srcno)1081 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
1082 {
1083 bool ret;
1084
1085 assert(srcno < xsrc->nr_irqs);
1086
1087 if (xive_source_esb_disabled(xsrc, srcno)) {
1088 return true;
1089 }
1090
1091 ret = xive_esb_trigger(&xsrc->status[srcno]);
1092
1093 if (xive_source_irq_is_lsi(xsrc, srcno) &&
1094 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
1095 qemu_log_mask(LOG_GUEST_ERROR,
1096 "XIVE: queued an event on LSI IRQ %d\n", srcno);
1097 }
1098
1099 return ret;
1100 }
1101
1102 /*
1103 * Returns whether the event notification should be forwarded.
1104 */
xive_source_esb_eoi(XiveSource * xsrc,uint32_t srcno)1105 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
1106 {
1107 bool ret;
1108
1109 assert(srcno < xsrc->nr_irqs);
1110
1111 if (xive_source_esb_disabled(xsrc, srcno)) {
1112 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno);
1113 return false;
1114 }
1115
1116 ret = xive_esb_eoi(&xsrc->status[srcno]);
1117
1118 /*
1119 * LSI sources do not set the Q bit but they can still be
1120 * asserted, in which case we should forward a new event
1121 * notification
1122 */
1123 if (xive_source_irq_is_lsi(xsrc, srcno) &&
1124 xive_source_is_asserted(xsrc, srcno)) {
1125 ret = xive_source_lsi_trigger(xsrc, srcno);
1126 }
1127
1128 return ret;
1129 }
1130
1131 /*
1132 * Forward the source event notification to the Router
1133 */
xive_source_notify(XiveSource * xsrc,int srcno)1134 static void xive_source_notify(XiveSource *xsrc, int srcno)
1135 {
1136 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
1137 bool pq_checked = !xive_source_esb_disabled(xsrc, srcno);
1138
1139 if (xnc->notify) {
1140 xnc->notify(xsrc->xive, srcno, pq_checked);
1141 }
1142 }
1143
1144 /*
1145 * In a two pages ESB MMIO setting, even page is the trigger page, odd
1146 * page is for management
1147 */
addr_is_even(hwaddr addr,uint32_t shift)1148 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
1149 {
1150 return !((addr >> shift) & 1);
1151 }
1152
xive_source_is_trigger_page(XiveSource * xsrc,hwaddr addr)1153 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
1154 {
1155 return xive_source_esb_has_2page(xsrc) &&
1156 addr_is_even(addr, xsrc->esb_shift - 1);
1157 }
1158
1159 /*
1160 * ESB MMIO loads
1161 * Trigger page Management/EOI page
1162 *
1163 * ESB MMIO setting 2 pages 1 or 2 pages
1164 *
1165 * 0x000 .. 0x3FF -1 EOI and return 0|1
1166 * 0x400 .. 0x7FF -1 EOI and return 0|1
1167 * 0x800 .. 0xBFF -1 return PQ
1168 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
1169 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
1170 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
1171 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
1172 */
xive_source_esb_read(void * opaque,hwaddr addr,unsigned size)1173 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
1174 {
1175 XiveSource *xsrc = XIVE_SOURCE(opaque);
1176 uint32_t offset = addr & 0xFFF;
1177 uint32_t srcno = addr >> xsrc->esb_shift;
1178 uint64_t ret = -1;
1179
1180 /* In a two pages ESB MMIO setting, trigger page should not be read */
1181 if (xive_source_is_trigger_page(xsrc, addr)) {
1182 qemu_log_mask(LOG_GUEST_ERROR,
1183 "XIVE: invalid load on IRQ %d trigger page at "
1184 "0x%"HWADDR_PRIx"\n", srcno, addr);
1185 return -1;
1186 }
1187
1188 switch (offset) {
1189 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1190 ret = xive_source_esb_eoi(xsrc, srcno);
1191
1192 /* Forward the source event notification for routing */
1193 if (ret) {
1194 xive_source_notify(xsrc, srcno);
1195 }
1196 break;
1197
1198 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1199 ret = xive_source_esb_get(xsrc, srcno);
1200 break;
1201
1202 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1203 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1204 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1205 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1206 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1207 break;
1208 default:
1209 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
1210 offset);
1211 }
1212
1213 trace_xive_source_esb_read(addr, srcno, ret);
1214
1215 return ret;
1216 }
1217
1218 /*
1219 * ESB MMIO stores
1220 * Trigger page Management/EOI page
1221 *
1222 * ESB MMIO setting 2 pages 1 or 2 pages
1223 *
1224 * 0x000 .. 0x3FF Trigger Trigger
1225 * 0x400 .. 0x7FF Trigger EOI
1226 * 0x800 .. 0xBFF Trigger undefined
1227 * 0xC00 .. 0xCFF Trigger PQ=00
1228 * 0xD00 .. 0xDFF Trigger PQ=01
1229 * 0xE00 .. 0xDFF Trigger PQ=10
1230 * 0xF00 .. 0xDFF Trigger PQ=11
1231 */
xive_source_esb_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)1232 static void xive_source_esb_write(void *opaque, hwaddr addr,
1233 uint64_t value, unsigned size)
1234 {
1235 XiveSource *xsrc = XIVE_SOURCE(opaque);
1236 uint32_t offset = addr & 0xFFF;
1237 uint32_t srcno = addr >> xsrc->esb_shift;
1238 bool notify = false;
1239
1240 trace_xive_source_esb_write(addr, srcno, value);
1241
1242 /* In a two pages ESB MMIO setting, trigger page only triggers */
1243 if (xive_source_is_trigger_page(xsrc, addr)) {
1244 notify = xive_source_esb_trigger(xsrc, srcno);
1245 goto out;
1246 }
1247
1248 switch (offset) {
1249 case 0 ... 0x3FF:
1250 notify = xive_source_esb_trigger(xsrc, srcno);
1251 break;
1252
1253 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
1254 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
1255 qemu_log_mask(LOG_GUEST_ERROR,
1256 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
1257 return;
1258 }
1259
1260 notify = xive_source_esb_eoi(xsrc, srcno);
1261 break;
1262
1263 /*
1264 * This is an internal offset used to inject triggers when the PQ
1265 * state bits are not controlled locally. Such as for LSIs when
1266 * under ABT mode.
1267 */
1268 case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
1269 notify = true;
1270 break;
1271
1272 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1273 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1274 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1275 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1276 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
1277 break;
1278
1279 default:
1280 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
1281 offset);
1282 return;
1283 }
1284
1285 out:
1286 /* Forward the source event notification for routing */
1287 if (notify) {
1288 xive_source_notify(xsrc, srcno);
1289 }
1290 }
1291
1292 static const MemoryRegionOps xive_source_esb_ops = {
1293 .read = xive_source_esb_read,
1294 .write = xive_source_esb_write,
1295 .endianness = DEVICE_BIG_ENDIAN,
1296 .valid = {
1297 .min_access_size = 1,
1298 .max_access_size = 8,
1299 },
1300 .impl = {
1301 .min_access_size = 1,
1302 .max_access_size = 8,
1303 },
1304 };
1305
xive_source_set_irq(void * opaque,int srcno,int val)1306 void xive_source_set_irq(void *opaque, int srcno, int val)
1307 {
1308 XiveSource *xsrc = XIVE_SOURCE(opaque);
1309 bool notify = false;
1310
1311 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1312 if (val) {
1313 notify = xive_source_lsi_trigger(xsrc, srcno);
1314 } else {
1315 xive_source_set_asserted(xsrc, srcno, false);
1316 }
1317 } else {
1318 if (val) {
1319 notify = xive_source_esb_trigger(xsrc, srcno);
1320 }
1321 }
1322
1323 /* Forward the source event notification for routing */
1324 if (notify) {
1325 xive_source_notify(xsrc, srcno);
1326 }
1327 }
1328
xive_source_pic_print_info(XiveSource * xsrc,uint32_t offset,GString * buf)1329 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, GString *buf)
1330 {
1331 for (unsigned i = 0; i < xsrc->nr_irqs; i++) {
1332 uint8_t pq = xive_source_esb_get(xsrc, i);
1333
1334 if (pq == XIVE_ESB_OFF) {
1335 continue;
1336 }
1337
1338 g_string_append_printf(buf, " %08x %s %c%c%c\n", i + offset,
1339 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1340 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1341 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1342 xive_source_is_asserted(xsrc, i) ? 'A' : ' ');
1343 }
1344 }
1345
xive_source_reset(void * dev)1346 static void xive_source_reset(void *dev)
1347 {
1348 XiveSource *xsrc = XIVE_SOURCE(dev);
1349
1350 /* Do not clear the LSI bitmap */
1351
1352 memset(xsrc->status, xsrc->reset_pq, xsrc->nr_irqs);
1353 }
1354
xive_source_realize(DeviceState * dev,Error ** errp)1355 static void xive_source_realize(DeviceState *dev, Error **errp)
1356 {
1357 XiveSource *xsrc = XIVE_SOURCE(dev);
1358 uint64_t esb_len = xive_source_esb_len(xsrc);
1359
1360 assert(xsrc->xive);
1361
1362 if (!xsrc->nr_irqs) {
1363 error_setg(errp, "Number of interrupt needs to be greater than 0");
1364 return;
1365 }
1366
1367 if (xsrc->esb_shift != XIVE_ESB_4K &&
1368 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1369 xsrc->esb_shift != XIVE_ESB_64K &&
1370 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1371 error_setg(errp, "Invalid ESB shift setting");
1372 return;
1373 }
1374
1375 xsrc->status = g_malloc0(xsrc->nr_irqs);
1376 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1377
1378 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len);
1379 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc),
1380 &xive_source_esb_ops, xsrc, "xive.esb-emulated",
1381 esb_len);
1382 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated);
1383
1384 qemu_register_reset(xive_source_reset, dev);
1385 }
1386
1387 static const VMStateDescription vmstate_xive_source = {
1388 .name = TYPE_XIVE_SOURCE,
1389 .version_id = 1,
1390 .minimum_version_id = 1,
1391 .fields = (const VMStateField[]) {
1392 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1393 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1394 VMSTATE_END_OF_LIST()
1395 },
1396 };
1397
1398 /*
1399 * The default XIVE interrupt source setting for the ESB MMIOs is two
1400 * 64k pages without Store EOI, to be in sync with KVM.
1401 */
1402 static const Property xive_source_properties[] = {
1403 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1404 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1405 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1406 /*
1407 * By default, PQs are initialized to 0b01 (Q=1) which corresponds
1408 * to "ints off"
1409 */
1410 DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF),
1411 DEFINE_PROP_LINK("xive", XiveSource, xive, TYPE_XIVE_NOTIFIER,
1412 XiveNotifier *),
1413 };
1414
xive_source_class_init(ObjectClass * klass,const void * data)1415 static void xive_source_class_init(ObjectClass *klass, const void *data)
1416 {
1417 DeviceClass *dc = DEVICE_CLASS(klass);
1418
1419 dc->desc = "XIVE Interrupt Source";
1420 device_class_set_props(dc, xive_source_properties);
1421 dc->realize = xive_source_realize;
1422 dc->vmsd = &vmstate_xive_source;
1423 /*
1424 * Reason: part of XIVE interrupt controller, needs to be wired up,
1425 * e.g. by spapr_xive_instance_init().
1426 */
1427 dc->user_creatable = false;
1428 }
1429
1430 static const TypeInfo xive_source_info = {
1431 .name = TYPE_XIVE_SOURCE,
1432 .parent = TYPE_DEVICE,
1433 .instance_size = sizeof(XiveSource),
1434 .class_init = xive_source_class_init,
1435 };
1436
1437 /*
1438 * XiveEND helpers
1439 */
1440
xive_end_queue_pic_print_info(XiveEND * end,uint32_t width,GString * buf)1441 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, GString *buf)
1442 {
1443 uint64_t qaddr_base = xive_end_qaddr(end);
1444 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1445 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1446 uint32_t qentries = 1 << (qsize + 10);
1447 int i;
1448
1449 /*
1450 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1451 */
1452 g_string_append_printf(buf, " [ ");
1453 qindex = (qindex - (width - 1)) & (qentries - 1);
1454 for (i = 0; i < width; i++) {
1455 uint64_t qaddr = qaddr_base + (qindex << 2);
1456 uint32_t qdata = -1;
1457
1458 if (dma_memory_read(&address_space_memory, qaddr,
1459 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
1460 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1461 HWADDR_PRIx "\n", qaddr);
1462 return;
1463 }
1464 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
1465 be32_to_cpu(qdata));
1466 qindex = (qindex + 1) & (qentries - 1);
1467 }
1468 g_string_append_c(buf, ']');
1469 }
1470
xive_end_pic_print_info(XiveEND * end,uint32_t end_idx,GString * buf)1471 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf)
1472 {
1473 uint64_t qaddr_base = xive_end_qaddr(end);
1474 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1475 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1476 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1477 uint32_t qentries = 1 << (qsize + 10);
1478
1479 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1480 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1481 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1482 uint8_t pq;
1483
1484 if (!xive_end_is_valid(end)) {
1485 return;
1486 }
1487
1488 pq = xive_get_field32(END_W1_ESn, end->w1);
1489
1490 g_string_append_printf(buf,
1491 " %08x %c%c %c%c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1492 end_idx,
1493 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1494 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1495 xive_end_is_valid(end) ? 'v' : '-',
1496 xive_end_is_enqueue(end) ? 'q' : '-',
1497 xive_end_is_notify(end) ? 'n' : '-',
1498 xive_end_is_backlog(end) ? 'b' : '-',
1499 xive_end_is_escalate(end) ? 'e' : '-',
1500 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1501 xive_end_is_silent_escalation(end) ? 's' : '-',
1502 xive_end_is_firmware(end) ? 'f' : '-',
1503 priority, nvt_blk, nvt_idx);
1504
1505 if (qaddr_base) {
1506 g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1507 qaddr_base, qindex, qentries, qgen);
1508 xive_end_queue_pic_print_info(end, 6, buf);
1509 }
1510 g_string_append_c(buf, '\n');
1511 }
1512
xive_end_enqueue(XiveEND * end,uint32_t data)1513 static void xive_end_enqueue(XiveEND *end, uint32_t data)
1514 {
1515 uint64_t qaddr_base = xive_end_qaddr(end);
1516 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1517 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1518 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1519
1520 uint64_t qaddr = qaddr_base + (qindex << 2);
1521 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1522 uint32_t qentries = 1 << (qsize + 10);
1523
1524 if (dma_memory_write(&address_space_memory, qaddr,
1525 &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
1526 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1527 HWADDR_PRIx "\n", qaddr);
1528 return;
1529 }
1530
1531 qindex = (qindex + 1) & (qentries - 1);
1532 if (qindex == 0) {
1533 qgen ^= 1;
1534 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1535 }
1536 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1537 }
1538
xive_end_eas_pic_print_info(XiveEND * end,uint32_t end_idx,GString * buf)1539 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, GString *buf)
1540 {
1541 XiveEAS *eas = (XiveEAS *) &end->w4;
1542 uint8_t pq;
1543
1544 if (!xive_end_is_escalate(end)) {
1545 return;
1546 }
1547
1548 pq = xive_get_field32(END_W1_ESe, end->w1);
1549
1550 g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1551 end_idx,
1552 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1553 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1554 xive_eas_is_valid(eas) ? 'V' : ' ',
1555 xive_eas_is_masked(eas) ? 'M' : ' ',
1556 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1557 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1558 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1559 }
1560
1561 /*
1562 * XIVE Router (aka. Virtualization Controller or IVRE)
1563 */
1564
xive_router_get_eas(XiveRouter * xrtr,uint8_t eas_blk,uint32_t eas_idx,XiveEAS * eas)1565 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1566 XiveEAS *eas)
1567 {
1568 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1569
1570 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1571 }
1572
1573 static
xive_router_get_pq(XiveRouter * xrtr,uint8_t eas_blk,uint32_t eas_idx,uint8_t * pq)1574 int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1575 uint8_t *pq)
1576 {
1577 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1578
1579 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
1580 }
1581
1582 static
xive_router_set_pq(XiveRouter * xrtr,uint8_t eas_blk,uint32_t eas_idx,uint8_t * pq)1583 int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1584 uint8_t *pq)
1585 {
1586 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1587
1588 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
1589 }
1590
xive_router_get_end(XiveRouter * xrtr,uint8_t end_blk,uint32_t end_idx,XiveEND * end)1591 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1592 XiveEND *end)
1593 {
1594 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1595
1596 return xrc->get_end(xrtr, end_blk, end_idx, end);
1597 }
1598
xive_router_write_end(XiveRouter * xrtr,uint8_t end_blk,uint32_t end_idx,XiveEND * end,uint8_t word_number)1599 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1600 XiveEND *end, uint8_t word_number)
1601 {
1602 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1603
1604 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1605 }
1606
xive_router_get_nvt(XiveRouter * xrtr,uint8_t nvt_blk,uint32_t nvt_idx,XiveNVT * nvt)1607 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1608 XiveNVT *nvt)
1609 {
1610 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1611
1612 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1613 }
1614
xive_router_write_nvt(XiveRouter * xrtr,uint8_t nvt_blk,uint32_t nvt_idx,XiveNVT * nvt,uint8_t word_number)1615 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1616 XiveNVT *nvt, uint8_t word_number)
1617 {
1618 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1619
1620 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1621 }
1622
xive_router_get_block_id(XiveRouter * xrtr)1623 static int xive_router_get_block_id(XiveRouter *xrtr)
1624 {
1625 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1626
1627 return xrc->get_block_id(xrtr);
1628 }
1629
xive_router_realize(DeviceState * dev,Error ** errp)1630 static void xive_router_realize(DeviceState *dev, Error **errp)
1631 {
1632 XiveRouter *xrtr = XIVE_ROUTER(dev);
1633
1634 assert(xrtr->xfb);
1635 }
1636
xive_router_end_notify_handler(XiveRouter * xrtr,XiveEAS * eas)1637 static void xive_router_end_notify_handler(XiveRouter *xrtr, XiveEAS *eas)
1638 {
1639 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1640
1641 return xrc->end_notify(xrtr, eas);
1642 }
1643
1644 /*
1645 * Encode the HW CAM line in the block group mode format :
1646 *
1647 * chip << 19 | 0000000 0 0001 thread (7Bit)
1648 */
xive_tctx_hw_cam_line(XivePresenter * xptr,XiveTCTX * tctx)1649 static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
1650 {
1651 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1652 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1653 uint8_t blk = xive_router_get_block_id(XIVE_ROUTER(xptr));
1654
1655 return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f));
1656 }
1657
xive_get_vpgroup_size(uint32_t nvp_index)1658 uint32_t xive_get_vpgroup_size(uint32_t nvp_index)
1659 {
1660 /*
1661 * Group size is a power of 2. The position of the first 0
1662 * (starting with the least significant bits) in the NVP index
1663 * gives the size of the group.
1664 */
1665 int first_zero = cto32(nvp_index);
1666 if (first_zero >= 31) {
1667 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x",
1668 nvp_index);
1669 return 0;
1670 }
1671
1672 return 1U << (first_zero + 1);
1673 }
1674
xive_get_group_level(bool crowd,bool ignore,uint32_t nvp_blk,uint32_t nvp_index)1675 static uint8_t xive_get_group_level(bool crowd, bool ignore,
1676 uint32_t nvp_blk, uint32_t nvp_index)
1677 {
1678 int first_zero;
1679 uint8_t level;
1680
1681 if (!ignore) {
1682 g_assert(!crowd);
1683 return 0;
1684 }
1685
1686 first_zero = cto32(nvp_index);
1687 if (first_zero >= 31) {
1688 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid group index 0x%08x",
1689 nvp_index);
1690 return 0;
1691 }
1692
1693 level = (first_zero + 1) & 0b1111;
1694 if (crowd) {
1695 uint32_t blk;
1696
1697 /* crowd level is bit position of first 0 from the right in nvp_blk */
1698 first_zero = cto32(nvp_blk);
1699 if (first_zero >= 31) {
1700 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd block 0x%08x",
1701 nvp_blk);
1702 return 0;
1703 }
1704 blk = first_zero + 1;
1705
1706 /*
1707 * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported.
1708 * HW will encode level 4 as the value 3. See xive2_pgofnext().
1709 */
1710 switch (blk) {
1711 case 1:
1712 case 2:
1713 break;
1714 case 4:
1715 blk = 3;
1716 break;
1717 default:
1718 g_assert_not_reached();
1719 }
1720
1721 /* Crowd level bits reside in upper 2 bits of the 6 bit group level */
1722 level |= blk << 4;
1723 }
1724 return level;
1725 }
1726
1727 /*
1728 * The thread context register words are in big-endian format.
1729 */
xive_presenter_tctx_match(XivePresenter * xptr,XiveTCTX * tctx,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool cam_ignore,uint32_t logic_serv)1730 int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
1731 uint8_t format,
1732 uint8_t nvt_blk, uint32_t nvt_idx,
1733 bool cam_ignore, uint32_t logic_serv)
1734 {
1735 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1736 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1737 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1738 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1739 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1740
1741 /*
1742 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1743 * identifier are ignored in the "CAM" match.
1744 */
1745
1746 if (format == 0) {
1747 if (cam_ignore == true) {
1748 /*
1749 * F=0 & i=1: Logical server notification (bits ignored at
1750 * the end of the NVT identifier)
1751 */
1752 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1753 nvt_blk, nvt_idx);
1754 return -1;
1755 }
1756
1757 /* F=0 & i=0: Specific NVT notification */
1758
1759 /* PHYS ring */
1760 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1761 cam == xive_tctx_hw_cam_line(xptr, tctx)) {
1762 return TM_QW3_HV_PHYS;
1763 }
1764
1765 /* HV POOL ring */
1766 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1767 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1768 return TM_QW2_HV_POOL;
1769 }
1770
1771 /* OS ring */
1772 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1773 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1774 return TM_QW1_OS;
1775 }
1776 } else {
1777 /* F=1 : User level Event-Based Branch (EBB) notification */
1778
1779 /* USER ring */
1780 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1781 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1782 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1783 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1784 return TM_QW0_USER;
1785 }
1786 }
1787 return -1;
1788 }
1789
1790 /*
1791 * This is our simple Xive Presenter Engine model. It is merged in the
1792 * Router as it does not require an extra object.
1793 */
xive_presenter_notify(XiveFabric * xfb,uint8_t format,uint8_t nvt_blk,uint32_t nvt_idx,bool crowd,bool cam_ignore,uint8_t priority,uint32_t logic_serv,bool * precluded)1794 bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
1795 uint8_t nvt_blk, uint32_t nvt_idx,
1796 bool crowd, bool cam_ignore, uint8_t priority,
1797 uint32_t logic_serv, bool *precluded)
1798 {
1799 XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
1800 XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false };
1801 uint8_t group_level;
1802 int count;
1803
1804 /*
1805 * Ask the machine to scan the interrupt controllers for a match.
1806 *
1807 * For VP-specific notification, we expect at most one match and
1808 * one call to the presenters is all we need (abbreviated notify
1809 * sequence documented by the architecture).
1810 *
1811 * For VP-group notification, match_nvt() is the equivalent of the
1812 * "histogram" and "poll" commands sent to the power bus to the
1813 * presenters. 'count' could be more than one, but we always
1814 * select the first match for now. 'precluded' tells if (at least)
1815 * one thread matches but can't take the interrupt now because
1816 * it's running at a more favored priority. We return the
1817 * information to the router so that it can take appropriate
1818 * actions (backlog, escalation, broadcast, etc...)
1819 *
1820 * If we were to implement a better way of dispatching the
1821 * interrupt in case of multiple matches (instead of the first
1822 * match), we would need a heuristic to elect a thread (for
1823 * example, the hardware keeps track of an 'age' in the TIMA) and
1824 * a new command to the presenters (the equivalent of the "assign"
1825 * power bus command in the documented full notify sequence.
1826 */
1827 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore,
1828 priority, logic_serv, &match);
1829 if (count < 0) {
1830 return false;
1831 }
1832
1833 /* handle CPU exception delivery */
1834 if (count) {
1835 group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx);
1836 trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level);
1837 xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level);
1838 } else {
1839 *precluded = match.precluded;
1840 }
1841
1842 return !!count;
1843 }
1844
1845 /*
1846 * Notification using the END ESe/ESn bit (Event State Buffer for
1847 * escalation and notification). Provide further coalescing in the
1848 * Router.
1849 */
xive_router_end_es_notify(XiveRouter * xrtr,uint8_t end_blk,uint32_t end_idx,XiveEND * end,uint32_t end_esmask)1850 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1851 uint32_t end_idx, XiveEND *end,
1852 uint32_t end_esmask)
1853 {
1854 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1855 bool notify = xive_esb_trigger(&pq);
1856
1857 if (pq != xive_get_field32(end_esmask, end->w1)) {
1858 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1859 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1860 }
1861
1862 /* ESe/n[Q]=1 : end of notification */
1863 return notify;
1864 }
1865
1866 /*
1867 * An END trigger can come from an event trigger (IPI or HW) or from
1868 * another chip. We don't model the PowerBus but the END trigger
1869 * message has the same parameters than in the function below.
1870 */
xive_router_end_notify(XiveRouter * xrtr,XiveEAS * eas)1871 void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
1872 {
1873 XiveEND end;
1874 uint8_t priority;
1875 uint8_t format;
1876 uint8_t nvt_blk;
1877 uint32_t nvt_idx;
1878 XiveNVT nvt;
1879 bool found, precluded;
1880
1881 uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
1882 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
1883 uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
1884
1885 /* END cache lookup */
1886 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1887 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1888 end_idx);
1889 return;
1890 }
1891
1892 if (!xive_end_is_valid(&end)) {
1893 trace_xive_router_end_notify(end_blk, end_idx, end_data);
1894 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1895 end_blk, end_idx);
1896 return;
1897 }
1898
1899 if (xive_end_is_enqueue(&end)) {
1900 xive_end_enqueue(&end, end_data);
1901 /* Enqueuing event data modifies the EQ toggle and index */
1902 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1903 }
1904
1905 /*
1906 * When the END is silent, we skip the notification part.
1907 */
1908 if (xive_end_is_silent_escalation(&end)) {
1909 goto do_escalation;
1910 }
1911
1912 /*
1913 * The W7 format depends on the F bit in W6. It defines the type
1914 * of the notification :
1915 *
1916 * F=0 : single or multiple NVT notification
1917 * F=1 : User level Event-Based Branch (EBB) notification, no
1918 * priority
1919 */
1920 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1921 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1922
1923 /* The END is masked */
1924 if (format == 0 && priority == 0xff) {
1925 return;
1926 }
1927
1928 /*
1929 * Check the END ESn (Event State Buffer for notification) for
1930 * even further coalescing in the Router
1931 */
1932 if (!xive_end_is_notify(&end)) {
1933 /* ESn[Q]=1 : end of notification */
1934 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1935 &end, END_W1_ESn)) {
1936 return;
1937 }
1938 }
1939
1940 /*
1941 * Follows IVPE notification
1942 */
1943 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1944 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1945
1946 /* NVT cache lookup */
1947 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1948 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1949 nvt_blk, nvt_idx);
1950 return;
1951 }
1952
1953 if (!xive_nvt_is_valid(&nvt)) {
1954 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1955 nvt_blk, nvt_idx);
1956 return;
1957 }
1958
1959 found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
1960 false /* crowd */,
1961 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1962 priority,
1963 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7),
1964 &precluded);
1965 /* we don't support VP-group notification on P9, so precluded is not used */
1966 /* TODO: Auto EOI. */
1967
1968 if (found) {
1969 return;
1970 }
1971
1972 /*
1973 * If no matching NVT is dispatched on a HW thread :
1974 * - specific VP: update the NVT structure if backlog is activated
1975 * - logical server : forward request to IVPE (not supported)
1976 */
1977 if (xive_end_is_backlog(&end)) {
1978 uint8_t ipb;
1979
1980 if (format == 1) {
1981 qemu_log_mask(LOG_GUEST_ERROR,
1982 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1983 end_blk, end_idx);
1984 return;
1985 }
1986 /*
1987 * Record the IPB in the associated NVT structure for later
1988 * use. The presenter will resend the interrupt when the vCPU
1989 * is dispatched again on a HW thread.
1990 */
1991 ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) |
1992 xive_priority_to_ipb(priority);
1993 nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb);
1994 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1995
1996 /*
1997 * On HW, follows a "Broadcast Backlog" to IVPEs
1998 */
1999 }
2000
2001 do_escalation:
2002 /*
2003 * If activated, escalate notification using the ESe PQ bits and
2004 * the EAS in w4-5
2005 */
2006 if (!xive_end_is_escalate(&end)) {
2007 return;
2008 }
2009
2010 /*
2011 * Check the END ESe (Event State Buffer for escalation) for even
2012 * further coalescing in the Router
2013 */
2014 if (!xive_end_is_uncond_escalation(&end)) {
2015 /* ESe[Q]=1 : end of notification */
2016 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
2017 &end, END_W1_ESe)) {
2018 return;
2019 }
2020 }
2021
2022 trace_xive_router_end_escalate(end_blk, end_idx,
2023 (uint8_t) xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
2024 (uint32_t) xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
2025 (uint32_t) xive_get_field32(END_W5_ESC_END_DATA, end.w5));
2026 /*
2027 * The END trigger becomes an Escalation trigger
2028 */
2029 xive_router_end_notify_handler(xrtr, (XiveEAS *) &end.w4);
2030 }
2031
xive_router_notify(XiveNotifier * xn,uint32_t lisn,bool pq_checked)2032 void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
2033 {
2034 XiveRouter *xrtr = XIVE_ROUTER(xn);
2035 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
2036 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
2037 XiveEAS eas;
2038
2039 /* EAS cache lookup */
2040 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
2041 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
2042 return;
2043 }
2044
2045 if (!pq_checked) {
2046 bool notify;
2047 uint8_t pq;
2048
2049 /* PQ cache lookup */
2050 if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
2051 /* Set FIR */
2052 g_assert_not_reached();
2053 }
2054
2055 notify = xive_esb_trigger(&pq);
2056
2057 if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
2058 /* Set FIR */
2059 g_assert_not_reached();
2060 }
2061
2062 if (!notify) {
2063 return;
2064 }
2065 }
2066
2067 if (!xive_eas_is_valid(&eas)) {
2068 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
2069 return;
2070 }
2071
2072 if (xive_eas_is_masked(&eas)) {
2073 /* Notification completed */
2074 return;
2075 }
2076
2077 /*
2078 * The event trigger becomes an END trigger
2079 */
2080 xive_router_end_notify_handler(xrtr, &eas);
2081 }
2082
2083 static const Property xive_router_properties[] = {
2084 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
2085 TYPE_XIVE_FABRIC, XiveFabric *),
2086 };
2087
xive_router_class_init(ObjectClass * klass,const void * data)2088 static void xive_router_class_init(ObjectClass *klass, const void *data)
2089 {
2090 DeviceClass *dc = DEVICE_CLASS(klass);
2091 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
2092 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
2093
2094 dc->desc = "XIVE Router Engine";
2095 device_class_set_props(dc, xive_router_properties);
2096 /* Parent is SysBusDeviceClass. No need to call its realize hook */
2097 dc->realize = xive_router_realize;
2098 xnc->notify = xive_router_notify;
2099
2100 /* By default, the router handles END triggers locally */
2101 xrc->end_notify = xive_router_end_notify;
2102 }
2103
2104 static const TypeInfo xive_router_info = {
2105 .name = TYPE_XIVE_ROUTER,
2106 .parent = TYPE_SYS_BUS_DEVICE,
2107 .abstract = true,
2108 .instance_size = sizeof(XiveRouter),
2109 .class_size = sizeof(XiveRouterClass),
2110 .class_init = xive_router_class_init,
2111 .interfaces = (const InterfaceInfo[]) {
2112 { TYPE_XIVE_NOTIFIER },
2113 { TYPE_XIVE_PRESENTER },
2114 { }
2115 }
2116 };
2117
xive_eas_pic_print_info(XiveEAS * eas,uint32_t lisn,GString * buf)2118 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, GString *buf)
2119 {
2120 if (!xive_eas_is_valid(eas)) {
2121 return;
2122 }
2123
2124 g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n",
2125 lisn, xive_eas_is_masked(eas) ? "M" : " ",
2126 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
2127 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
2128 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
2129 }
2130
2131 /*
2132 * END ESB MMIO loads
2133 */
xive_end_source_read(void * opaque,hwaddr addr,unsigned size)2134 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
2135 {
2136 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
2137 uint32_t offset = addr & 0xFFF;
2138 uint8_t end_blk;
2139 uint32_t end_idx;
2140 XiveEND end;
2141 uint32_t end_esmask;
2142 uint8_t pq;
2143 uint64_t ret = -1;
2144
2145 /*
2146 * The block id should be deduced from the load address on the END
2147 * ESB MMIO but our model only supports a single block per XIVE chip.
2148 */
2149 end_blk = xive_router_get_block_id(xsrc->xrtr);
2150 end_idx = addr >> (xsrc->esb_shift + 1);
2151
2152 trace_xive_end_source_read(end_blk, end_idx, addr);
2153
2154 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
2155 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
2156 end_idx);
2157 return -1;
2158 }
2159
2160 if (!xive_end_is_valid(&end)) {
2161 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
2162 end_blk, end_idx);
2163 return -1;
2164 }
2165
2166 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
2167 pq = xive_get_field32(end_esmask, end.w1);
2168
2169 switch (offset) {
2170 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
2171 ret = xive_esb_eoi(&pq);
2172
2173 /* Forward the source event notification for routing ?? */
2174 break;
2175
2176 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
2177 ret = pq;
2178 break;
2179
2180 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
2181 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
2182 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
2183 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
2184 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
2185 break;
2186 default:
2187 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
2188 offset);
2189 return -1;
2190 }
2191
2192 if (pq != xive_get_field32(end_esmask, end.w1)) {
2193 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
2194 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
2195 }
2196
2197 return ret;
2198 }
2199
2200 /*
2201 * END ESB MMIO stores are invalid
2202 */
xive_end_source_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)2203 static void xive_end_source_write(void *opaque, hwaddr addr,
2204 uint64_t value, unsigned size)
2205 {
2206 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
2207 HWADDR_PRIx"\n", addr);
2208 }
2209
2210 static const MemoryRegionOps xive_end_source_ops = {
2211 .read = xive_end_source_read,
2212 .write = xive_end_source_write,
2213 .endianness = DEVICE_BIG_ENDIAN,
2214 .valid = {
2215 .min_access_size = 1,
2216 .max_access_size = 8,
2217 },
2218 .impl = {
2219 .min_access_size = 1,
2220 .max_access_size = 8,
2221 },
2222 };
2223
xive_end_source_realize(DeviceState * dev,Error ** errp)2224 static void xive_end_source_realize(DeviceState *dev, Error **errp)
2225 {
2226 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
2227
2228 assert(xsrc->xrtr);
2229
2230 if (!xsrc->nr_ends) {
2231 error_setg(errp, "Number of interrupt needs to be greater than 0");
2232 return;
2233 }
2234
2235 if (xsrc->esb_shift != XIVE_ESB_4K &&
2236 xsrc->esb_shift != XIVE_ESB_64K) {
2237 error_setg(errp, "Invalid ESB shift setting");
2238 return;
2239 }
2240
2241 /*
2242 * Each END is assigned an even/odd pair of MMIO pages, the even page
2243 * manages the ESn field while the odd page manages the ESe field.
2244 */
2245 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
2246 &xive_end_source_ops, xsrc, "xive.end",
2247 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
2248 }
2249
2250 static const Property xive_end_source_properties[] = {
2251 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
2252 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
2253 DEFINE_PROP_LINK("xive", XiveENDSource, xrtr, TYPE_XIVE_ROUTER,
2254 XiveRouter *),
2255 };
2256
xive_end_source_class_init(ObjectClass * klass,const void * data)2257 static void xive_end_source_class_init(ObjectClass *klass, const void *data)
2258 {
2259 DeviceClass *dc = DEVICE_CLASS(klass);
2260
2261 dc->desc = "XIVE END Source";
2262 device_class_set_props(dc, xive_end_source_properties);
2263 dc->realize = xive_end_source_realize;
2264 /*
2265 * Reason: part of XIVE interrupt controller, needs to be wired up,
2266 * e.g. by spapr_xive_instance_init().
2267 */
2268 dc->user_creatable = false;
2269 }
2270
2271 static const TypeInfo xive_end_source_info = {
2272 .name = TYPE_XIVE_END_SOURCE,
2273 .parent = TYPE_DEVICE,
2274 .instance_size = sizeof(XiveENDSource),
2275 .class_init = xive_end_source_class_init,
2276 };
2277
2278 /*
2279 * XIVE Notifier
2280 */
2281 static const TypeInfo xive_notifier_info = {
2282 .name = TYPE_XIVE_NOTIFIER,
2283 .parent = TYPE_INTERFACE,
2284 .class_size = sizeof(XiveNotifierClass),
2285 };
2286
2287 /*
2288 * XIVE Presenter
2289 */
2290 static const TypeInfo xive_presenter_info = {
2291 .name = TYPE_XIVE_PRESENTER,
2292 .parent = TYPE_INTERFACE,
2293 .class_size = sizeof(XivePresenterClass),
2294 };
2295
2296 /*
2297 * XIVE Fabric
2298 */
2299 static const TypeInfo xive_fabric_info = {
2300 .name = TYPE_XIVE_FABRIC,
2301 .parent = TYPE_INTERFACE,
2302 .class_size = sizeof(XiveFabricClass),
2303 };
2304
xive_register_types(void)2305 static void xive_register_types(void)
2306 {
2307 type_register_static(&xive_fabric_info);
2308 type_register_static(&xive_source_info);
2309 type_register_static(&xive_notifier_info);
2310 type_register_static(&xive_presenter_info);
2311 type_register_static(&xive_router_info);
2312 type_register_static(&xive_end_source_info);
2313 type_register_static(&xive_tctx_info);
2314 }
2315
2316 type_init(xive_register_types)
2317