1 /* 2 * QEMU PowerPC sPAPR IRQ interface 3 * 4 * Copyright (c) 2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "qapi/error.h" 14 #include "hw/irq.h" 15 #include "hw/ppc/spapr.h" 16 #include "hw/ppc/spapr_cpu_core.h" 17 #include "hw/ppc/spapr_xive.h" 18 #include "hw/ppc/xics.h" 19 #include "hw/ppc/xics_spapr.h" 20 #include "hw/qdev-properties.h" 21 #include "cpu-models.h" 22 #include "sysemu/kvm.h" 23 24 #include "trace.h" 25 26 static const TypeInfo spapr_intc_info = { 27 .name = TYPE_SPAPR_INTC, 28 .parent = TYPE_INTERFACE, 29 .class_size = sizeof(SpaprInterruptControllerClass), 30 }; 31 32 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis) 33 { 34 spapr->irq_map_nr = nr_msis; 35 spapr->irq_map = bitmap_new(spapr->irq_map_nr); 36 } 37 38 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align, 39 Error **errp) 40 { 41 int irq; 42 43 /* 44 * The 'align_mask' parameter of bitmap_find_next_zero_area() 45 * should be one less than a power of 2; 0 means no 46 * alignment. Adapt the 'align' value of the former allocator 47 * to fit the requirements of bitmap_find_next_zero_area() 48 */ 49 align -= 1; 50 51 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num, 52 align); 53 if (irq == spapr->irq_map_nr) { 54 error_setg(errp, "can't find a free %d-IRQ block", num); 55 return -1; 56 } 57 58 bitmap_set(spapr->irq_map, irq, num); 59 60 return irq + SPAPR_IRQ_MSI; 61 } 62 63 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num) 64 { 65 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num); 66 } 67 68 static void spapr_irq_init_kvm(SpaprMachineState *spapr, 69 SpaprIrq *irq, Error **errp) 70 { 71 MachineState *machine = MACHINE(spapr); 72 Error *local_err = NULL; 73 74 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) { 75 irq->init_kvm(spapr, &local_err); 76 if (local_err && machine_kernel_irqchip_required(machine)) { 77 error_prepend(&local_err, 78 "kernel_irqchip requested but unavailable: "); 79 error_propagate(errp, local_err); 80 return; 81 } 82 83 if (!local_err) { 84 return; 85 } 86 87 /* 88 * We failed to initialize the KVM device, fallback to 89 * emulated mode 90 */ 91 error_prepend(&local_err, "kernel_irqchip allowed but unavailable: "); 92 error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n"); 93 warn_report_err(local_err); 94 } 95 } 96 97 /* 98 * XICS IRQ backend. 99 */ 100 101 static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon) 102 { 103 CPUState *cs; 104 105 CPU_FOREACH(cs) { 106 PowerPCCPU *cpu = POWERPC_CPU(cs); 107 108 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon); 109 } 110 111 ics_pic_print_info(spapr->ics, mon); 112 } 113 114 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id) 115 { 116 if (!kvm_irqchip_in_kernel()) { 117 CPUState *cs; 118 CPU_FOREACH(cs) { 119 PowerPCCPU *cpu = POWERPC_CPU(cs); 120 icp_resend(spapr_cpu_state(cpu)->icp); 121 } 122 } 123 return 0; 124 } 125 126 static void spapr_irq_set_irq_xics(void *opaque, int irq, int val) 127 { 128 SpaprMachineState *spapr = opaque; 129 uint32_t srcno = irq - spapr->ics->offset; 130 131 ics_set_irq(spapr->ics, srcno, val); 132 } 133 134 static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp) 135 { 136 Error *local_err = NULL; 137 138 spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err); 139 if (local_err) { 140 error_propagate(errp, local_err); 141 return; 142 } 143 } 144 145 static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp) 146 { 147 if (kvm_enabled()) { 148 xics_kvm_connect(spapr, errp); 149 } 150 } 151 152 SpaprIrq spapr_irq_xics = { 153 .nr_xirqs = SPAPR_NR_XIRQS, 154 .nr_msis = SPAPR_NR_MSIS, 155 .xics = true, 156 .xive = false, 157 158 .print_info = spapr_irq_print_info_xics, 159 .dt_populate = spapr_dt_xics, 160 .post_load = spapr_irq_post_load_xics, 161 .reset = spapr_irq_reset_xics, 162 .set_irq = spapr_irq_set_irq_xics, 163 .init_kvm = spapr_irq_init_kvm_xics, 164 }; 165 166 /* 167 * XIVE IRQ backend. 168 */ 169 170 static void spapr_irq_print_info_xive(SpaprMachineState *spapr, 171 Monitor *mon) 172 { 173 CPUState *cs; 174 175 CPU_FOREACH(cs) { 176 PowerPCCPU *cpu = POWERPC_CPU(cs); 177 178 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon); 179 } 180 181 spapr_xive_pic_print_info(spapr->xive, mon); 182 } 183 184 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id) 185 { 186 return spapr_xive_post_load(spapr->xive, version_id); 187 } 188 189 static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp) 190 { 191 CPUState *cs; 192 Error *local_err = NULL; 193 194 CPU_FOREACH(cs) { 195 PowerPCCPU *cpu = POWERPC_CPU(cs); 196 197 /* (TCG) Set the OS CAM line of the thread interrupt context. */ 198 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx); 199 } 200 201 spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err); 202 if (local_err) { 203 error_propagate(errp, local_err); 204 return; 205 } 206 207 /* Activate the XIVE MMIOs */ 208 spapr_xive_mmio_set_enabled(spapr->xive, true); 209 } 210 211 static void spapr_irq_set_irq_xive(void *opaque, int irq, int val) 212 { 213 SpaprMachineState *spapr = opaque; 214 215 if (kvm_irqchip_in_kernel()) { 216 kvmppc_xive_source_set_irq(&spapr->xive->source, irq, val); 217 } else { 218 xive_source_set_irq(&spapr->xive->source, irq, val); 219 } 220 } 221 222 static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp) 223 { 224 if (kvm_enabled()) { 225 kvmppc_xive_connect(spapr->xive, errp); 226 } 227 } 228 229 SpaprIrq spapr_irq_xive = { 230 .nr_xirqs = SPAPR_NR_XIRQS, 231 .nr_msis = SPAPR_NR_MSIS, 232 .xics = false, 233 .xive = true, 234 235 .print_info = spapr_irq_print_info_xive, 236 .dt_populate = spapr_dt_xive, 237 .post_load = spapr_irq_post_load_xive, 238 .reset = spapr_irq_reset_xive, 239 .set_irq = spapr_irq_set_irq_xive, 240 .init_kvm = spapr_irq_init_kvm_xive, 241 }; 242 243 /* 244 * Dual XIVE and XICS IRQ backend. 245 * 246 * Both interrupt mode, XIVE and XICS, objects are created but the 247 * machine starts in legacy interrupt mode (XICS). It can be changed 248 * by the CAS negotiation process and, in that case, the new mode is 249 * activated after an extra machine reset. 250 */ 251 252 /* 253 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the 254 * default. 255 */ 256 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr) 257 { 258 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ? 259 &spapr_irq_xive : &spapr_irq_xics; 260 } 261 262 static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon) 263 { 264 spapr_irq_current(spapr)->print_info(spapr, mon); 265 } 266 267 static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr, 268 uint32_t nr_servers, void *fdt, 269 uint32_t phandle) 270 { 271 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle); 272 } 273 274 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id) 275 { 276 /* 277 * Force a reset of the XIVE backend after migration. The machine 278 * defaults to XICS at startup. 279 */ 280 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 281 if (kvm_irqchip_in_kernel()) { 282 xics_kvm_disconnect(spapr, &error_fatal); 283 } 284 spapr_irq_xive.reset(spapr, &error_fatal); 285 } 286 287 return spapr_irq_current(spapr)->post_load(spapr, version_id); 288 } 289 290 static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp) 291 { 292 Error *local_err = NULL; 293 294 /* 295 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them 296 * if selected. 297 */ 298 spapr_xive_mmio_set_enabled(spapr->xive, false); 299 300 /* Destroy all KVM devices */ 301 if (kvm_irqchip_in_kernel()) { 302 xics_kvm_disconnect(spapr, &local_err); 303 if (local_err) { 304 error_propagate(errp, local_err); 305 error_prepend(errp, "KVM XICS disconnect failed: "); 306 return; 307 } 308 kvmppc_xive_disconnect(spapr->xive, &local_err); 309 if (local_err) { 310 error_propagate(errp, local_err); 311 error_prepend(errp, "KVM XIVE disconnect failed: "); 312 return; 313 } 314 } 315 316 spapr_irq_current(spapr)->reset(spapr, errp); 317 } 318 319 static void spapr_irq_set_irq_dual(void *opaque, int irq, int val) 320 { 321 SpaprMachineState *spapr = opaque; 322 323 spapr_irq_current(spapr)->set_irq(spapr, irq, val); 324 } 325 326 /* 327 * Define values in sync with the XIVE and XICS backend 328 */ 329 SpaprIrq spapr_irq_dual = { 330 .nr_xirqs = SPAPR_NR_XIRQS, 331 .nr_msis = SPAPR_NR_MSIS, 332 .xics = true, 333 .xive = true, 334 335 .print_info = spapr_irq_print_info_dual, 336 .dt_populate = spapr_irq_dt_populate_dual, 337 .post_load = spapr_irq_post_load_dual, 338 .reset = spapr_irq_reset_dual, 339 .set_irq = spapr_irq_set_irq_dual, 340 .init_kvm = NULL, /* should not be used */ 341 }; 342 343 344 static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) 345 { 346 MachineState *machine = MACHINE(spapr); 347 348 /* 349 * Sanity checks on non-P9 machines. On these, XIVE is not 350 * advertised, see spapr_dt_ov5_platform_support() 351 */ 352 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 353 0, spapr->max_compat_pvr)) { 354 /* 355 * If the 'dual' interrupt mode is selected, force XICS as CAS 356 * negotiation is useless. 357 */ 358 if (spapr->irq == &spapr_irq_dual) { 359 spapr->irq = &spapr_irq_xics; 360 return 0; 361 } 362 363 /* 364 * Non-P9 machines using only XIVE is a bogus setup. We have two 365 * scenarios to take into account because of the compat mode: 366 * 367 * 1. POWER7/8 machines should fail to init later on when creating 368 * the XIVE interrupt presenters because a POWER9 exception 369 * model is required. 370 371 * 2. POWER9 machines using the POWER8 compat mode won't fail and 372 * will let the OS boot with a partial XIVE setup : DT 373 * properties but no hcalls. 374 * 375 * To cover both and not confuse the OS, add an early failure in 376 * QEMU. 377 */ 378 if (spapr->irq == &spapr_irq_xive) { 379 error_setg(errp, "XIVE-only machines require a POWER9 CPU"); 380 return -1; 381 } 382 } 383 384 /* 385 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and 386 * re-created. Detect that early to avoid QEMU to exit later when the 387 * guest reboots. 388 */ 389 if (kvm_enabled() && 390 spapr->irq == &spapr_irq_dual && 391 machine_kernel_irqchip_required(machine) && 392 xics_kvm_has_broken_disconnect(spapr)) { 393 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on"); 394 return -1; 395 } 396 397 return 0; 398 } 399 400 /* 401 * sPAPR IRQ frontend routines for devices 402 */ 403 #define ALL_INTCS(spapr_) \ 404 { SPAPR_INTC((spapr_)->ics), SPAPR_INTC((spapr_)->xive), } 405 406 int spapr_irq_cpu_intc_create(SpaprMachineState *spapr, 407 PowerPCCPU *cpu, Error **errp) 408 { 409 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 410 int i; 411 int rc; 412 413 for (i = 0; i < ARRAY_SIZE(intcs); i++) { 414 SpaprInterruptController *intc = intcs[i]; 415 if (intc) { 416 SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); 417 rc = sicc->cpu_intc_create(intc, cpu, errp); 418 if (rc < 0) { 419 return rc; 420 } 421 } 422 } 423 424 return 0; 425 } 426 427 void spapr_irq_init(SpaprMachineState *spapr, Error **errp) 428 { 429 MachineState *machine = MACHINE(spapr); 430 431 if (machine_kernel_irqchip_split(machine)) { 432 error_setg(errp, "kernel_irqchip split mode not supported on pseries"); 433 return; 434 } 435 436 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) { 437 error_setg(errp, 438 "kernel_irqchip requested but only available with KVM"); 439 return; 440 } 441 442 if (spapr_irq_check(spapr, errp) < 0) { 443 return; 444 } 445 446 /* Initialize the MSI IRQ allocator. */ 447 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 448 spapr_irq_msi_init(spapr, spapr->irq->nr_msis); 449 } 450 451 if (spapr->irq->xics) { 452 Error *local_err = NULL; 453 Object *obj; 454 455 obj = object_new(TYPE_ICS_SPAPR); 456 object_property_add_child(OBJECT(spapr), "ics", obj, &local_err); 457 if (local_err) { 458 error_propagate(errp, local_err); 459 return; 460 } 461 462 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 463 &local_err); 464 if (local_err) { 465 error_propagate(errp, local_err); 466 return; 467 } 468 469 object_property_set_int(obj, spapr->irq->nr_xirqs, "nr-irqs", 470 &local_err); 471 if (local_err) { 472 error_propagate(errp, local_err); 473 return; 474 } 475 476 object_property_set_bool(obj, true, "realized", &local_err); 477 if (local_err) { 478 error_propagate(errp, local_err); 479 return; 480 } 481 482 spapr->ics = ICS_SPAPR(obj); 483 } 484 485 if (spapr->irq->xive) { 486 uint32_t nr_servers = spapr_max_server_number(spapr); 487 DeviceState *dev; 488 int i; 489 490 dev = qdev_create(NULL, TYPE_SPAPR_XIVE); 491 qdev_prop_set_uint32(dev, "nr-irqs", 492 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 493 /* 494 * 8 XIVE END structures per CPU. One for each available 495 * priority 496 */ 497 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3); 498 qdev_init_nofail(dev); 499 500 spapr->xive = SPAPR_XIVE(dev); 501 502 /* Enable the CPU IPIs */ 503 for (i = 0; i < nr_servers; ++i) { 504 SpaprInterruptControllerClass *sicc 505 = SPAPR_INTC_GET_CLASS(spapr->xive); 506 507 if (sicc->claim_irq(SPAPR_INTC(spapr->xive), SPAPR_IRQ_IPI + i, 508 false, errp) < 0) { 509 return; 510 } 511 } 512 513 spapr_xive_hcall_init(spapr); 514 } 515 516 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr, 517 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 518 } 519 520 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp) 521 { 522 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 523 int i; 524 int rc; 525 526 assert(irq >= SPAPR_XIRQ_BASE); 527 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 528 529 for (i = 0; i < ARRAY_SIZE(intcs); i++) { 530 SpaprInterruptController *intc = intcs[i]; 531 if (intc) { 532 SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); 533 rc = sicc->claim_irq(intc, irq, lsi, errp); 534 if (rc < 0) { 535 return rc; 536 } 537 } 538 } 539 540 return 0; 541 } 542 543 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num) 544 { 545 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 546 int i, j; 547 548 assert(irq >= SPAPR_XIRQ_BASE); 549 assert((irq + num) <= (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 550 551 for (i = irq; i < (irq + num); i++) { 552 for (j = 0; j < ARRAY_SIZE(intcs); j++) { 553 SpaprInterruptController *intc = intcs[j]; 554 555 if (intc) { 556 SpaprInterruptControllerClass *sicc 557 = SPAPR_INTC_GET_CLASS(intc); 558 sicc->free_irq(intc, i); 559 } 560 } 561 } 562 } 563 564 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq) 565 { 566 /* 567 * This interface is basically for VIO and PHB devices to find the 568 * right qemu_irq to manipulate, so we only allow access to the 569 * external irqs for now. Currently anything which needs to 570 * access the IPIs most naturally gets there via the guest side 571 * interfaces, we can change this if we need to in future. 572 */ 573 assert(irq >= SPAPR_XIRQ_BASE); 574 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 575 576 if (spapr->ics) { 577 assert(ics_valid_irq(spapr->ics, irq)); 578 } 579 if (spapr->xive) { 580 assert(irq < spapr->xive->nr_irqs); 581 assert(xive_eas_is_valid(&spapr->xive->eat[irq])); 582 } 583 584 return spapr->qirqs[irq]; 585 } 586 587 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id) 588 { 589 spapr_irq_update_active_intc(spapr); 590 return spapr->irq->post_load(spapr, version_id); 591 } 592 593 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp) 594 { 595 assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr)); 596 597 spapr_irq_update_active_intc(spapr); 598 599 if (spapr->irq->reset) { 600 spapr->irq->reset(spapr, errp); 601 } 602 } 603 604 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp) 605 { 606 const char *nodename = "interrupt-controller"; 607 int offset, phandle; 608 609 offset = fdt_subnode_offset(fdt, 0, nodename); 610 if (offset < 0) { 611 error_setg(errp, "Can't find node \"%s\": %s", 612 nodename, fdt_strerror(offset)); 613 return -1; 614 } 615 616 phandle = fdt_get_phandle(fdt, offset); 617 if (!phandle) { 618 error_setg(errp, "Can't get phandle of node \"%s\"", nodename); 619 return -1; 620 } 621 622 return phandle; 623 } 624 625 static void set_active_intc(SpaprMachineState *spapr, 626 SpaprInterruptController *new_intc) 627 { 628 SpaprInterruptControllerClass *sicc; 629 630 assert(new_intc); 631 632 if (new_intc == spapr->active_intc) { 633 /* Nothing to do */ 634 return; 635 } 636 637 if (spapr->active_intc) { 638 sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc); 639 if (sicc->deactivate) { 640 sicc->deactivate(spapr->active_intc); 641 } 642 } 643 644 sicc = SPAPR_INTC_GET_CLASS(new_intc); 645 if (sicc->activate) { 646 sicc->activate(new_intc, &error_fatal); 647 } 648 649 spapr->active_intc = new_intc; 650 } 651 652 void spapr_irq_update_active_intc(SpaprMachineState *spapr) 653 { 654 SpaprInterruptController *new_intc; 655 656 if (!spapr->ics) { 657 /* 658 * XXX before we run CAS, ov5_cas is initialized empty, which 659 * indicates XICS, even if we have ic-mode=xive. TODO: clean 660 * up the CAS path so that we have a clearer way of handling 661 * this. 662 */ 663 new_intc = SPAPR_INTC(spapr->xive); 664 } else if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 665 new_intc = SPAPR_INTC(spapr->xive); 666 } else { 667 new_intc = SPAPR_INTC(spapr->ics); 668 } 669 670 set_active_intc(spapr, new_intc); 671 } 672 673 /* 674 * XICS legacy routines - to deprecate one day 675 */ 676 677 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 678 { 679 int first, i; 680 681 for (first = 0; first < ics->nr_irqs; first += alignnum) { 682 if (num > (ics->nr_irqs - first)) { 683 return -1; 684 } 685 for (i = first; i < first + num; ++i) { 686 if (!ics_irq_free(ics, i)) { 687 break; 688 } 689 } 690 if (i == (first + num)) { 691 return first; 692 } 693 } 694 695 return -1; 696 } 697 698 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp) 699 { 700 ICSState *ics = spapr->ics; 701 int first = -1; 702 703 assert(ics); 704 705 /* 706 * MSIMesage::data is used for storing VIRQ so 707 * it has to be aligned to num to support multiple 708 * MSI vectors. MSI-X is not affected by this. 709 * The hint is used for the first IRQ, the rest should 710 * be allocated continuously. 711 */ 712 if (align) { 713 assert((num == 1) || (num == 2) || (num == 4) || 714 (num == 8) || (num == 16) || (num == 32)); 715 first = ics_find_free_block(ics, num, num); 716 } else { 717 first = ics_find_free_block(ics, num, 1); 718 } 719 720 if (first < 0) { 721 error_setg(errp, "can't find a free %d-IRQ block", num); 722 return -1; 723 } 724 725 return first + ics->offset; 726 } 727 728 #define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400 729 730 SpaprIrq spapr_irq_xics_legacy = { 731 .nr_xirqs = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 732 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 733 .xics = true, 734 .xive = false, 735 736 .print_info = spapr_irq_print_info_xics, 737 .dt_populate = spapr_dt_xics, 738 .post_load = spapr_irq_post_load_xics, 739 .reset = spapr_irq_reset_xics, 740 .set_irq = spapr_irq_set_irq_xics, 741 .init_kvm = spapr_irq_init_kvm_xics, 742 }; 743 744 static void spapr_irq_register_types(void) 745 { 746 type_register_static(&spapr_intc_info); 747 } 748 749 type_init(spapr_irq_register_types) 750