Lines Matching +full:default +full:- +full:trigger
4 * Copyright (c) 2017-2018, IBM Corporation.
6 * SPDX-License-Identifier: GPL-2.0-or-later
17 #include "hw/qdev-properties.h"
35 return tctx->os_output; in xive_tctx_output()
38 return tctx->hv_output; in xive_tctx_output()
39 default: in xive_tctx_output()
46 uint8_t *regs = &tctx->regs[ring]; in xive_tctx_accept()
62 alt_regs = &tctx->regs[alt_ring]; in xive_tctx_accept()
79 trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, in xive_tctx_accept()
91 uint8_t *alt_regs = &tctx->regs[alt_ring]; in xive_tctx_notify()
92 uint8_t *regs = &tctx->regs[ring]; in xive_tctx_notify()
105 default: in xive_tctx_notify()
108 trace_xive_tctx_notify(tctx->cs->cpu_index, ring, in xive_tctx_notify()
120 * should be raised again when re-pushing the lower privilege context. in xive_tctx_reset_signal()
127 uint8_t *regs = &tctx->regs[ring]; in xive_tctx_set_cppr()
131 trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, in xive_tctx_set_cppr()
139 tctx->regs[ring + TM_CPPR] = cppr; in xive_tctx_set_cppr()
150 uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; in xive_tctx_set_cppr()
179 uint8_t *alt_regs = &tctx->regs[alt_ring]; in xive_tctx_pipr_update()
180 uint8_t *regs = &tctx->regs[ring]; in xive_tctx_pipr_update()
183 /* VP-specific */ in xive_tctx_pipr_update()
187 /* VP-group */ in xive_tctx_pipr_update()
212 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); in xive_tm_pull_pool_ctx()
216 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); in xive_tm_pull_pool_ctx()
223 uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; in xive_tm_pull_phys_ctx()
227 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8; in xive_tm_pull_phys_ctx()
234 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; in xive_tm_vt_push()
240 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; in xive_tm_vt_poll()
250 * 0x0 - no access
251 * 0x1 - write only
252 * 0x2 - read only
253 * 0x3 - read/write
257 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
258 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
259 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
260 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
264 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
265 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
266 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
267 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
271 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
272 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
273 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
274 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
278 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
279 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
280 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
281 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
308 mask |= (uint64_t) 0xff << (8 * (size - i - 1)); in xive_tm_mask()
338 uint8_t byte_mask = (mask >> (8 * (size - i - 1))); in xive_tm_raw_write()
340 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & in xive_tm_raw_write()
361 return -1; in xive_tm_raw_read()
367 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); in xive_tm_raw_read()
395 uint8_t *regs = &tctx->regs[ring]; in xive_tctx_set_lgs()
433 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); in xive_tctx_get_os_cam()
442 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); in xive_tctx_set_os_cam()
492 uint8_t *regs = &tctx->regs[TM_QW1_OS]; in xive_tctx_need_resend()
508 * Updating the OS CAM line can trigger a resend of interrupt
534 return xpc->get_config(xptr); in xive_presenter_get_config()
660 if (xto->page_offset >= page_offset && in xive_tm_find_op()
661 xto->op_offset == op_offset && in xive_tm_find_op()
662 xto->size == size && in xive_tm_find_op()
663 ((write && xto->write_handler) || (!write && xto->read_handler))) { in xive_tm_find_op()
678 trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value); in xive_tctx_tm_write()
681 * TODO: check V bit in Q[0-3]W2 in xive_tctx_tm_write()
688 xto = xive_tm_find_op(tctx->xptr, offset, size, true); in xive_tctx_tm_write()
693 xto->write_handler(xptr, tctx, offset, value, size); in xive_tctx_tm_write()
701 xto = xive_tm_find_op(tctx->xptr, offset, size, true); in xive_tctx_tm_write()
703 xto->write_handler(xptr, tctx, offset, value, size); in xive_tctx_tm_write()
720 * TODO: check V bit in Q[0-3]W2 in xive_tctx_tm_read()
727 xto = xive_tm_find_op(tctx->xptr, offset, size, false); in xive_tctx_tm_read()
731 return -1; in xive_tctx_tm_read()
733 ret = xto->read_handler(xptr, tctx, offset, size); in xive_tctx_tm_read()
740 xto = xive_tm_find_op(tctx->xptr, offset, size, false); in xive_tctx_tm_read()
742 ret = xto->read_handler(xptr, tctx, offset, size); in xive_tctx_tm_read()
751 trace_xive_tctx_tm_read(tctx->cs->cpu_index, offset, size, ret); in xive_tctx_tm_read()
778 xpc->in_kernel ? xpc->in_kernel(xptr) : false; \
793 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; in xive_tctx_pic_print_info()
795 if (xive_in_kernel(tctx->xptr)) { in xive_tctx_pic_print_info()
805 if (xive_presenter_get_config(tctx->xptr) & XIVE_PRESENTER_GEN1_TIMA_OS) { in xive_tctx_pic_print_info()
811 "QW NSR CPPR IPB LSMFB - LGS T PIPR" in xive_tctx_pic_print_info()
816 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); in xive_tctx_pic_print_info()
825 memset(tctx->regs, 0, sizeof(tctx->regs)); in xive_tctx_reset()
828 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; in xive_tctx_reset()
829 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; in xive_tctx_reset()
830 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; in xive_tctx_reset()
831 if (!(xive_presenter_get_config(tctx->xptr) & in xive_tctx_reset()
833 tctx->regs[TM_QW1_OS + TM_OGEN] = 2; in xive_tctx_reset()
840 tctx->regs[TM_QW1_OS + TM_PIPR] = in xive_tctx_reset()
841 xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); in xive_tctx_reset()
842 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = in xive_tctx_reset()
843 xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); in xive_tctx_reset()
852 assert(tctx->cs); in xive_tctx_realize()
853 assert(tctx->xptr); in xive_tctx_realize()
855 cpu = POWERPC_CPU(tctx->cs); in xive_tctx_realize()
856 env = &cpu->env; in xive_tctx_realize()
859 tctx->hv_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_HINT); in xive_tctx_realize()
860 tctx->os_output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT); in xive_tctx_realize()
863 default: in xive_tctx_realize()
870 if (xive_in_kernel(tctx->xptr)) { in xive_tctx_realize()
883 if (xive_in_kernel(tctx->xptr)) { in vmstate_xive_tctx_pre_save()
900 if (xive_in_kernel(tctx->xptr)) { in vmstate_xive_tctx_post_load()
937 dc->desc = "XIVE Interrupt Thread Context"; in xive_tctx_class_init()
938 dc->realize = xive_tctx_realize; in xive_tctx_class_init()
939 dc->vmsd = &vmstate_xive_tctx; in xive_tctx_class_init()
945 dc->user_creatable = false; in xive_tctx_class_init()
1007 default: in xive_esb_trigger()
1027 default: in xive_esb_eoi()
1038 assert(srcno < xsrc->nr_irqs); in xive_source_esb_get()
1040 return xsrc->status[srcno] & 0x3; in xive_source_esb_get()
1045 assert(srcno < xsrc->nr_irqs); in xive_source_esb_set()
1047 return xive_esb_set(&xsrc->status[srcno], pq); in xive_source_esb_set()
1063 default: in xive_source_lsi_trigger()
1074 return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) && in xive_source_esb_disabled()
1085 assert(srcno < xsrc->nr_irqs); in xive_source_esb_trigger()
1091 ret = xive_esb_trigger(&xsrc->status[srcno]); in xive_source_esb_trigger()
1109 assert(srcno < xsrc->nr_irqs); in xive_source_esb_eoi()
1116 ret = xive_esb_eoi(&xsrc->status[srcno]); in xive_source_esb_eoi()
1136 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); in xive_source_notify()
1139 if (xnc->notify) { in xive_source_notify()
1140 xnc->notify(xsrc->xive, srcno, pq_checked); in xive_source_notify()
1145 * In a two pages ESB MMIO setting, even page is the trigger page, odd
1156 addr_is_even(addr, xsrc->esb_shift - 1); in xive_source_is_trigger_page()
1161 * Trigger page Management/EOI page
1165 * 0x000 .. 0x3FF -1 EOI and return 0|1
1166 * 0x400 .. 0x7FF -1 EOI and return 0|1
1167 * 0x800 .. 0xBFF -1 return PQ
1168 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
1169 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
1170 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
1171 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
1177 uint32_t srcno = addr >> xsrc->esb_shift; in xive_source_esb_read()
1178 uint64_t ret = -1; in xive_source_esb_read()
1180 /* In a two pages ESB MMIO setting, trigger page should not be read */ in xive_source_esb_read()
1183 "XIVE: invalid load on IRQ %d trigger page at " in xive_source_esb_read()
1185 return -1; in xive_source_esb_read()
1208 default: in xive_source_esb_read()
1220 * Trigger page Management/EOI page
1224 * 0x000 .. 0x3FF Trigger Trigger
1225 * 0x400 .. 0x7FF Trigger EOI
1226 * 0x800 .. 0xBFF Trigger undefined
1227 * 0xC00 .. 0xCFF Trigger PQ=00
1228 * 0xD00 .. 0xDFF Trigger PQ=01
1229 * 0xE00 .. 0xDFF Trigger PQ=10
1230 * 0xF00 .. 0xDFF Trigger PQ=11
1237 uint32_t srcno = addr >> xsrc->esb_shift; in xive_source_esb_write()
1242 /* In a two pages ESB MMIO setting, trigger page only triggers */ in xive_source_esb_write()
1254 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { in xive_source_esb_write()
1279 default: in xive_source_esb_write()
1331 for (unsigned i = 0; i < xsrc->nr_irqs; i++) { in xive_source_pic_print_info()
1340 pq & XIVE_ESB_VAL_P ? 'P' : '-', in xive_source_pic_print_info()
1341 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', in xive_source_pic_print_info()
1352 memset(xsrc->status, xsrc->reset_pq, xsrc->nr_irqs); in xive_source_reset()
1360 assert(xsrc->xive); in xive_source_realize()
1362 if (!xsrc->nr_irqs) { in xive_source_realize()
1367 if (xsrc->esb_shift != XIVE_ESB_4K && in xive_source_realize()
1368 xsrc->esb_shift != XIVE_ESB_4K_2PAGE && in xive_source_realize()
1369 xsrc->esb_shift != XIVE_ESB_64K && in xive_source_realize()
1370 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { in xive_source_realize()
1375 xsrc->status = g_malloc0(xsrc->nr_irqs); in xive_source_realize()
1376 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); in xive_source_realize()
1378 memory_region_init(&xsrc->esb_mmio, OBJECT(xsrc), "xive.esb", esb_len); in xive_source_realize()
1379 memory_region_init_io(&xsrc->esb_mmio_emulated, OBJECT(xsrc), in xive_source_realize()
1380 &xive_source_esb_ops, xsrc, "xive.esb-emulated", in xive_source_realize()
1382 memory_region_add_subregion(&xsrc->esb_mmio, 0, &xsrc->esb_mmio_emulated); in xive_source_realize()
1399 * The default XIVE interrupt source setting for the ESB MMIOs is two
1404 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1407 * By default, PQs are initialized to 0b01 (Q=1) which corresponds
1410 DEFINE_PROP_UINT8("reset-pq", XiveSource, reset_pq, XIVE_ESB_OFF),
1419 dc->desc = "XIVE Interrupt Source"; in xive_source_class_init()
1421 dc->realize = xive_source_realize; in xive_source_class_init()
1422 dc->vmsd = &vmstate_xive_source; in xive_source_class_init()
1427 dc->user_creatable = false; in xive_source_class_init()
1444 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); in xive_end_queue_pic_print_info()
1445 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); in xive_end_queue_pic_print_info()
1450 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window in xive_end_queue_pic_print_info()
1453 qindex = (qindex - (width - 1)) & (qentries - 1); in xive_end_queue_pic_print_info()
1456 uint32_t qdata = -1; in xive_end_queue_pic_print_info()
1464 g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "", in xive_end_queue_pic_print_info()
1466 qindex = (qindex + 1) & (qentries - 1); in xive_end_queue_pic_print_info()
1474 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); in xive_end_pic_print_info()
1475 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); in xive_end_pic_print_info()
1476 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); in xive_end_pic_print_info()
1479 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); in xive_end_pic_print_info()
1480 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); in xive_end_pic_print_info()
1481 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); in xive_end_pic_print_info()
1488 pq = xive_get_field32(END_W1_ESn, end->w1); in xive_end_pic_print_info()
1493 pq & XIVE_ESB_VAL_P ? 'P' : '-', in xive_end_pic_print_info()
1494 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', in xive_end_pic_print_info()
1495 xive_end_is_valid(end) ? 'v' : '-', in xive_end_pic_print_info()
1496 xive_end_is_enqueue(end) ? 'q' : '-', in xive_end_pic_print_info()
1497 xive_end_is_notify(end) ? 'n' : '-', in xive_end_pic_print_info()
1498 xive_end_is_backlog(end) ? 'b' : '-', in xive_end_pic_print_info()
1499 xive_end_is_escalate(end) ? 'e' : '-', in xive_end_pic_print_info()
1500 xive_end_is_uncond_escalation(end) ? 'u' : '-', in xive_end_pic_print_info()
1501 xive_end_is_silent_escalation(end) ? 's' : '-', in xive_end_pic_print_info()
1502 xive_end_is_firmware(end) ? 'f' : '-', in xive_end_pic_print_info()
1516 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); in xive_end_enqueue()
1517 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); in xive_end_enqueue()
1518 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); in xive_end_enqueue()
1531 qindex = (qindex + 1) & (qentries - 1); in xive_end_enqueue()
1534 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); in xive_end_enqueue()
1536 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); in xive_end_enqueue()
1541 XiveEAS *eas = (XiveEAS *) &end->w4; in xive_end_eas_pic_print_info()
1548 pq = xive_get_field32(END_W1_ESe, end->w1); in xive_end_eas_pic_print_info()
1552 pq & XIVE_ESB_VAL_P ? 'P' : '-', in xive_end_eas_pic_print_info()
1553 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', in xive_end_eas_pic_print_info()
1556 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), in xive_end_eas_pic_print_info()
1557 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), in xive_end_eas_pic_print_info()
1558 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); in xive_end_eas_pic_print_info()
1570 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); in xive_router_get_eas()
1579 return xrc->get_pq(xrtr, eas_blk, eas_idx, pq); in xive_router_get_pq()
1588 return xrc->set_pq(xrtr, eas_blk, eas_idx, pq); in xive_router_set_pq()
1596 return xrc->get_end(xrtr, end_blk, end_idx, end); in xive_router_get_end()
1604 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); in xive_router_write_end()
1612 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); in xive_router_get_nvt()
1620 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); in xive_router_write_nvt()
1627 return xrc->get_block_id(xrtr); in xive_router_get_block_id()
1634 assert(xrtr->xfb); in xive_router_realize()
1641 return xrc->end_notify(xrtr, eas); in xive_router_end_notify_handler()
1651 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; in xive_tctx_hw_cam_line()
1652 uint32_t pir = env->spr_cb[SPR_PIR].default_value; in xive_tctx_hw_cam_line()
1717 default: in xive_get_group_level()
1728 * The thread context register words are in big-endian format.
1736 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); in xive_presenter_tctx_match()
1737 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); in xive_presenter_tctx_match()
1738 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); in xive_presenter_tctx_match()
1739 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); in xive_presenter_tctx_match()
1754 return -1; in xive_presenter_tctx_match()
1777 /* F=1 : User level Event-Based Branch (EBB) notification */ in xive_presenter_tctx_match()
1787 return -1; in xive_presenter_tctx_match()
1807 * For VP-specific notification, we expect at most one match and in xive_presenter_notify()
1811 * For VP-group notification, match_nvt() is the equivalent of the in xive_presenter_notify()
1827 count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, in xive_presenter_notify()
1854 uint8_t pq = xive_get_field32(end_esmask, end->w1); in xive_router_end_es_notify()
1857 if (pq != xive_get_field32(end_esmask, end->w1)) { in xive_router_end_es_notify()
1858 end->w1 = xive_set_field32(end_esmask, end->w1, pq); in xive_router_end_es_notify()
1867 * An END trigger can come from an event trigger (IPI or HW) or from
1868 * another chip. We don't model the PowerBus but the END trigger
1881 uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); in xive_router_end_notify()
1882 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); in xive_router_end_notify()
1883 uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w); in xive_router_end_notify()
1917 * F=1 : User level Event-Based Branch (EBB) notification, no in xive_router_end_notify()
1959 found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, in xive_router_end_notify()
1965 /* we don't support VP-group notification on P9, so precluded is not used */ in xive_router_end_notify()
1974 * - specific VP: update the NVT structure if backlog is activated in xive_router_end_notify()
1975 * - logical server : forward request to IVPE (not supported) in xive_router_end_notify()
2004 * the EAS in w4-5 in xive_router_end_notify()
2027 * The END trigger becomes an Escalation trigger in xive_router_end_notify()
2078 * The event trigger becomes an END trigger in xive_router_notify()
2084 DEFINE_PROP_LINK("xive-fabric", XiveRouter, xfb,
2094 dc->desc = "XIVE Router Engine"; in xive_router_class_init()
2097 dc->realize = xive_router_realize; in xive_router_class_init()
2098 xnc->notify = xive_router_notify; in xive_router_class_init()
2100 /* By default, the router handles END triggers locally */ in xive_router_class_init()
2101 xrc->end_notify = xive_router_end_notify; in xive_router_class_init()
2126 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), in xive_eas_pic_print_info()
2127 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), in xive_eas_pic_print_info()
2128 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); in xive_eas_pic_print_info()
2143 uint64_t ret = -1; in xive_end_source_read()
2149 end_blk = xive_router_get_block_id(xsrc->xrtr); in xive_end_source_read()
2150 end_idx = addr >> (xsrc->esb_shift + 1); in xive_end_source_read()
2154 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { in xive_end_source_read()
2157 return -1; in xive_end_source_read()
2163 return -1; in xive_end_source_read()
2166 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; in xive_end_source_read()
2186 default: in xive_end_source_read()
2189 return -1; in xive_end_source_read()
2194 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); in xive_end_source_read()
2228 assert(xsrc->xrtr); in xive_end_source_realize()
2230 if (!xsrc->nr_ends) { in xive_end_source_realize()
2235 if (xsrc->esb_shift != XIVE_ESB_4K && in xive_end_source_realize()
2236 xsrc->esb_shift != XIVE_ESB_64K) { in xive_end_source_realize()
2245 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), in xive_end_source_realize()
2247 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); in xive_end_source_realize()
2251 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
2261 dc->desc = "XIVE END Source"; in xive_end_source_class_init()
2263 dc->realize = xive_end_source_realize; in xive_end_source_class_init()
2268 dc->user_creatable = false; in xive_end_source_class_init()