1 /* 2 * QEMU PowerPC sPAPR IRQ interface 3 * 4 * Copyright (c) 2018, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #include "qemu/osdep.h" 11 #include "qemu/log.h" 12 #include "qemu/error-report.h" 13 #include "qapi/error.h" 14 #include "hw/irq.h" 15 #include "hw/ppc/spapr.h" 16 #include "hw/ppc/spapr_cpu_core.h" 17 #include "hw/ppc/spapr_xive.h" 18 #include "hw/ppc/xics.h" 19 #include "hw/ppc/xics_spapr.h" 20 #include "hw/qdev-properties.h" 21 #include "cpu-models.h" 22 #include "sysemu/kvm.h" 23 24 #include "trace.h" 25 26 static const TypeInfo spapr_intc_info = { 27 .name = TYPE_SPAPR_INTC, 28 .parent = TYPE_INTERFACE, 29 .class_size = sizeof(SpaprInterruptControllerClass), 30 }; 31 32 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis) 33 { 34 spapr->irq_map_nr = nr_msis; 35 spapr->irq_map = bitmap_new(spapr->irq_map_nr); 36 } 37 38 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align, 39 Error **errp) 40 { 41 int irq; 42 43 /* 44 * The 'align_mask' parameter of bitmap_find_next_zero_area() 45 * should be one less than a power of 2; 0 means no 46 * alignment. Adapt the 'align' value of the former allocator 47 * to fit the requirements of bitmap_find_next_zero_area() 48 */ 49 align -= 1; 50 51 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num, 52 align); 53 if (irq == spapr->irq_map_nr) { 54 error_setg(errp, "can't find a free %d-IRQ block", num); 55 return -1; 56 } 57 58 bitmap_set(spapr->irq_map, irq, num); 59 60 return irq + SPAPR_IRQ_MSI; 61 } 62 63 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num) 64 { 65 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num); 66 } 67 68 static int spapr_irq_init_kvm(int (*fn)(SpaprInterruptController *, Error **), 69 SpaprInterruptController *intc, 70 Error **errp) 71 { 72 MachineState *machine = MACHINE(qdev_get_machine()); 73 Error *local_err = NULL; 74 75 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) { 76 if (fn(intc, &local_err) < 0) { 77 if (machine_kernel_irqchip_required(machine)) { 78 error_prepend(&local_err, 79 "kernel_irqchip requested but unavailable: "); 80 error_propagate(errp, local_err); 81 return -1; 82 } 83 84 /* 85 * We failed to initialize the KVM device, fallback to 86 * emulated mode 87 */ 88 error_prepend(&local_err, 89 "kernel_irqchip allowed but unavailable: "); 90 error_append_hint(&local_err, 91 "Falling back to kernel-irqchip=off\n"); 92 warn_report_err(local_err); 93 } 94 } 95 96 return 0; 97 } 98 99 /* 100 * XICS IRQ backend. 101 */ 102 103 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id) 104 { 105 if (!kvm_irqchip_in_kernel()) { 106 CPUState *cs; 107 CPU_FOREACH(cs) { 108 PowerPCCPU *cpu = POWERPC_CPU(cs); 109 icp_resend(spapr_cpu_state(cpu)->icp); 110 } 111 } 112 return 0; 113 } 114 115 static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp) 116 { 117 spapr_irq_init_kvm(xics_kvm_connect, SPAPR_INTC(spapr->ics), errp); 118 } 119 120 SpaprIrq spapr_irq_xics = { 121 .nr_xirqs = SPAPR_NR_XIRQS, 122 .nr_msis = SPAPR_NR_MSIS, 123 .xics = true, 124 .xive = false, 125 126 .post_load = spapr_irq_post_load_xics, 127 .reset = spapr_irq_reset_xics, 128 }; 129 130 /* 131 * XIVE IRQ backend. 132 */ 133 134 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id) 135 { 136 return spapr_xive_post_load(spapr->xive, version_id); 137 } 138 139 static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp) 140 { 141 CPUState *cs; 142 143 CPU_FOREACH(cs) { 144 PowerPCCPU *cpu = POWERPC_CPU(cs); 145 146 /* (TCG) Set the OS CAM line of the thread interrupt context. */ 147 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx); 148 } 149 150 if (spapr_irq_init_kvm(kvmppc_xive_connect, 151 SPAPR_INTC(spapr->xive), errp) < 0) { 152 return; 153 } 154 155 /* Activate the XIVE MMIOs */ 156 spapr_xive_mmio_set_enabled(spapr->xive, true); 157 } 158 159 SpaprIrq spapr_irq_xive = { 160 .nr_xirqs = SPAPR_NR_XIRQS, 161 .nr_msis = SPAPR_NR_MSIS, 162 .xics = false, 163 .xive = true, 164 165 .post_load = spapr_irq_post_load_xive, 166 .reset = spapr_irq_reset_xive, 167 }; 168 169 /* 170 * Dual XIVE and XICS IRQ backend. 171 * 172 * Both interrupt mode, XIVE and XICS, objects are created but the 173 * machine starts in legacy interrupt mode (XICS). It can be changed 174 * by the CAS negotiation process and, in that case, the new mode is 175 * activated after an extra machine reset. 176 */ 177 178 /* 179 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the 180 * default. 181 */ 182 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr) 183 { 184 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ? 185 &spapr_irq_xive : &spapr_irq_xics; 186 } 187 188 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id) 189 { 190 /* 191 * Force a reset of the XIVE backend after migration. The machine 192 * defaults to XICS at startup. 193 */ 194 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 195 if (kvm_irqchip_in_kernel()) { 196 xics_kvm_disconnect(SPAPR_INTC(spapr->ics)); 197 } 198 spapr_irq_xive.reset(spapr, &error_fatal); 199 } 200 201 return spapr_irq_current(spapr)->post_load(spapr, version_id); 202 } 203 204 static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp) 205 { 206 /* 207 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them 208 * if selected. 209 */ 210 spapr_xive_mmio_set_enabled(spapr->xive, false); 211 212 /* Destroy all KVM devices */ 213 if (kvm_irqchip_in_kernel()) { 214 xics_kvm_disconnect(SPAPR_INTC(spapr->ics)); 215 kvmppc_xive_disconnect(SPAPR_INTC(spapr->xive)); 216 } 217 218 spapr_irq_current(spapr)->reset(spapr, errp); 219 } 220 221 /* 222 * Define values in sync with the XIVE and XICS backend 223 */ 224 SpaprIrq spapr_irq_dual = { 225 .nr_xirqs = SPAPR_NR_XIRQS, 226 .nr_msis = SPAPR_NR_MSIS, 227 .xics = true, 228 .xive = true, 229 230 .post_load = spapr_irq_post_load_dual, 231 .reset = spapr_irq_reset_dual, 232 }; 233 234 235 static int spapr_irq_check(SpaprMachineState *spapr, Error **errp) 236 { 237 MachineState *machine = MACHINE(spapr); 238 239 /* 240 * Sanity checks on non-P9 machines. On these, XIVE is not 241 * advertised, see spapr_dt_ov5_platform_support() 242 */ 243 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 244 0, spapr->max_compat_pvr)) { 245 /* 246 * If the 'dual' interrupt mode is selected, force XICS as CAS 247 * negotiation is useless. 248 */ 249 if (spapr->irq == &spapr_irq_dual) { 250 spapr->irq = &spapr_irq_xics; 251 return 0; 252 } 253 254 /* 255 * Non-P9 machines using only XIVE is a bogus setup. We have two 256 * scenarios to take into account because of the compat mode: 257 * 258 * 1. POWER7/8 machines should fail to init later on when creating 259 * the XIVE interrupt presenters because a POWER9 exception 260 * model is required. 261 262 * 2. POWER9 machines using the POWER8 compat mode won't fail and 263 * will let the OS boot with a partial XIVE setup : DT 264 * properties but no hcalls. 265 * 266 * To cover both and not confuse the OS, add an early failure in 267 * QEMU. 268 */ 269 if (spapr->irq == &spapr_irq_xive) { 270 error_setg(errp, "XIVE-only machines require a POWER9 CPU"); 271 return -1; 272 } 273 } 274 275 /* 276 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and 277 * re-created. Detect that early to avoid QEMU to exit later when the 278 * guest reboots. 279 */ 280 if (kvm_enabled() && 281 spapr->irq == &spapr_irq_dual && 282 machine_kernel_irqchip_required(machine) && 283 xics_kvm_has_broken_disconnect(spapr)) { 284 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on"); 285 return -1; 286 } 287 288 return 0; 289 } 290 291 /* 292 * sPAPR IRQ frontend routines for devices 293 */ 294 #define ALL_INTCS(spapr_) \ 295 { SPAPR_INTC((spapr_)->ics), SPAPR_INTC((spapr_)->xive), } 296 297 int spapr_irq_cpu_intc_create(SpaprMachineState *spapr, 298 PowerPCCPU *cpu, Error **errp) 299 { 300 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 301 int i; 302 int rc; 303 304 for (i = 0; i < ARRAY_SIZE(intcs); i++) { 305 SpaprInterruptController *intc = intcs[i]; 306 if (intc) { 307 SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); 308 rc = sicc->cpu_intc_create(intc, cpu, errp); 309 if (rc < 0) { 310 return rc; 311 } 312 } 313 } 314 315 return 0; 316 } 317 318 static void spapr_set_irq(void *opaque, int irq, int level) 319 { 320 SpaprMachineState *spapr = SPAPR_MACHINE(opaque); 321 SpaprInterruptControllerClass *sicc 322 = SPAPR_INTC_GET_CLASS(spapr->active_intc); 323 324 sicc->set_irq(spapr->active_intc, irq, level); 325 } 326 327 void spapr_irq_print_info(SpaprMachineState *spapr, Monitor *mon) 328 { 329 SpaprInterruptControllerClass *sicc 330 = SPAPR_INTC_GET_CLASS(spapr->active_intc); 331 332 sicc->print_info(spapr->active_intc, mon); 333 } 334 335 void spapr_irq_dt(SpaprMachineState *spapr, uint32_t nr_servers, 336 void *fdt, uint32_t phandle) 337 { 338 SpaprInterruptControllerClass *sicc 339 = SPAPR_INTC_GET_CLASS(spapr->active_intc); 340 341 sicc->dt(spapr->active_intc, nr_servers, fdt, phandle); 342 } 343 344 void spapr_irq_init(SpaprMachineState *spapr, Error **errp) 345 { 346 MachineState *machine = MACHINE(spapr); 347 348 if (machine_kernel_irqchip_split(machine)) { 349 error_setg(errp, "kernel_irqchip split mode not supported on pseries"); 350 return; 351 } 352 353 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) { 354 error_setg(errp, 355 "kernel_irqchip requested but only available with KVM"); 356 return; 357 } 358 359 if (spapr_irq_check(spapr, errp) < 0) { 360 return; 361 } 362 363 /* Initialize the MSI IRQ allocator. */ 364 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) { 365 spapr_irq_msi_init(spapr, spapr->irq->nr_msis); 366 } 367 368 if (spapr->irq->xics) { 369 Error *local_err = NULL; 370 Object *obj; 371 372 obj = object_new(TYPE_ICS_SPAPR); 373 object_property_add_child(OBJECT(spapr), "ics", obj, &local_err); 374 if (local_err) { 375 error_propagate(errp, local_err); 376 return; 377 } 378 379 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 380 &local_err); 381 if (local_err) { 382 error_propagate(errp, local_err); 383 return; 384 } 385 386 object_property_set_int(obj, spapr->irq->nr_xirqs, "nr-irqs", 387 &local_err); 388 if (local_err) { 389 error_propagate(errp, local_err); 390 return; 391 } 392 393 object_property_set_bool(obj, true, "realized", &local_err); 394 if (local_err) { 395 error_propagate(errp, local_err); 396 return; 397 } 398 399 spapr->ics = ICS_SPAPR(obj); 400 } 401 402 if (spapr->irq->xive) { 403 uint32_t nr_servers = spapr_max_server_number(spapr); 404 DeviceState *dev; 405 int i; 406 407 dev = qdev_create(NULL, TYPE_SPAPR_XIVE); 408 qdev_prop_set_uint32(dev, "nr-irqs", 409 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 410 /* 411 * 8 XIVE END structures per CPU. One for each available 412 * priority 413 */ 414 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3); 415 qdev_init_nofail(dev); 416 417 spapr->xive = SPAPR_XIVE(dev); 418 419 /* Enable the CPU IPIs */ 420 for (i = 0; i < nr_servers; ++i) { 421 SpaprInterruptControllerClass *sicc 422 = SPAPR_INTC_GET_CLASS(spapr->xive); 423 424 if (sicc->claim_irq(SPAPR_INTC(spapr->xive), SPAPR_IRQ_IPI + i, 425 false, errp) < 0) { 426 return; 427 } 428 } 429 430 spapr_xive_hcall_init(spapr); 431 } 432 433 spapr->qirqs = qemu_allocate_irqs(spapr_set_irq, spapr, 434 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE); 435 } 436 437 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp) 438 { 439 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 440 int i; 441 int rc; 442 443 assert(irq >= SPAPR_XIRQ_BASE); 444 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 445 446 for (i = 0; i < ARRAY_SIZE(intcs); i++) { 447 SpaprInterruptController *intc = intcs[i]; 448 if (intc) { 449 SpaprInterruptControllerClass *sicc = SPAPR_INTC_GET_CLASS(intc); 450 rc = sicc->claim_irq(intc, irq, lsi, errp); 451 if (rc < 0) { 452 return rc; 453 } 454 } 455 } 456 457 return 0; 458 } 459 460 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num) 461 { 462 SpaprInterruptController *intcs[] = ALL_INTCS(spapr); 463 int i, j; 464 465 assert(irq >= SPAPR_XIRQ_BASE); 466 assert((irq + num) <= (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 467 468 for (i = irq; i < (irq + num); i++) { 469 for (j = 0; j < ARRAY_SIZE(intcs); j++) { 470 SpaprInterruptController *intc = intcs[j]; 471 472 if (intc) { 473 SpaprInterruptControllerClass *sicc 474 = SPAPR_INTC_GET_CLASS(intc); 475 sicc->free_irq(intc, i); 476 } 477 } 478 } 479 } 480 481 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq) 482 { 483 /* 484 * This interface is basically for VIO and PHB devices to find the 485 * right qemu_irq to manipulate, so we only allow access to the 486 * external irqs for now. Currently anything which needs to 487 * access the IPIs most naturally gets there via the guest side 488 * interfaces, we can change this if we need to in future. 489 */ 490 assert(irq >= SPAPR_XIRQ_BASE); 491 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE)); 492 493 if (spapr->ics) { 494 assert(ics_valid_irq(spapr->ics, irq)); 495 } 496 if (spapr->xive) { 497 assert(irq < spapr->xive->nr_irqs); 498 assert(xive_eas_is_valid(&spapr->xive->eat[irq])); 499 } 500 501 return spapr->qirqs[irq]; 502 } 503 504 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id) 505 { 506 spapr_irq_update_active_intc(spapr); 507 return spapr->irq->post_load(spapr, version_id); 508 } 509 510 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp) 511 { 512 assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr)); 513 514 spapr_irq_update_active_intc(spapr); 515 516 if (spapr->irq->reset) { 517 spapr->irq->reset(spapr, errp); 518 } 519 } 520 521 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp) 522 { 523 const char *nodename = "interrupt-controller"; 524 int offset, phandle; 525 526 offset = fdt_subnode_offset(fdt, 0, nodename); 527 if (offset < 0) { 528 error_setg(errp, "Can't find node \"%s\": %s", 529 nodename, fdt_strerror(offset)); 530 return -1; 531 } 532 533 phandle = fdt_get_phandle(fdt, offset); 534 if (!phandle) { 535 error_setg(errp, "Can't get phandle of node \"%s\"", nodename); 536 return -1; 537 } 538 539 return phandle; 540 } 541 542 static void set_active_intc(SpaprMachineState *spapr, 543 SpaprInterruptController *new_intc) 544 { 545 SpaprInterruptControllerClass *sicc; 546 547 assert(new_intc); 548 549 if (new_intc == spapr->active_intc) { 550 /* Nothing to do */ 551 return; 552 } 553 554 if (spapr->active_intc) { 555 sicc = SPAPR_INTC_GET_CLASS(spapr->active_intc); 556 if (sicc->deactivate) { 557 sicc->deactivate(spapr->active_intc); 558 } 559 } 560 561 sicc = SPAPR_INTC_GET_CLASS(new_intc); 562 if (sicc->activate) { 563 sicc->activate(new_intc, &error_fatal); 564 } 565 566 spapr->active_intc = new_intc; 567 } 568 569 void spapr_irq_update_active_intc(SpaprMachineState *spapr) 570 { 571 SpaprInterruptController *new_intc; 572 573 if (!spapr->ics) { 574 /* 575 * XXX before we run CAS, ov5_cas is initialized empty, which 576 * indicates XICS, even if we have ic-mode=xive. TODO: clean 577 * up the CAS path so that we have a clearer way of handling 578 * this. 579 */ 580 new_intc = SPAPR_INTC(spapr->xive); 581 } else if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { 582 new_intc = SPAPR_INTC(spapr->xive); 583 } else { 584 new_intc = SPAPR_INTC(spapr->ics); 585 } 586 587 set_active_intc(spapr, new_intc); 588 } 589 590 /* 591 * XICS legacy routines - to deprecate one day 592 */ 593 594 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 595 { 596 int first, i; 597 598 for (first = 0; first < ics->nr_irqs; first += alignnum) { 599 if (num > (ics->nr_irqs - first)) { 600 return -1; 601 } 602 for (i = first; i < first + num; ++i) { 603 if (!ics_irq_free(ics, i)) { 604 break; 605 } 606 } 607 if (i == (first + num)) { 608 return first; 609 } 610 } 611 612 return -1; 613 } 614 615 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp) 616 { 617 ICSState *ics = spapr->ics; 618 int first = -1; 619 620 assert(ics); 621 622 /* 623 * MSIMesage::data is used for storing VIRQ so 624 * it has to be aligned to num to support multiple 625 * MSI vectors. MSI-X is not affected by this. 626 * The hint is used for the first IRQ, the rest should 627 * be allocated continuously. 628 */ 629 if (align) { 630 assert((num == 1) || (num == 2) || (num == 4) || 631 (num == 8) || (num == 16) || (num == 32)); 632 first = ics_find_free_block(ics, num, num); 633 } else { 634 first = ics_find_free_block(ics, num, 1); 635 } 636 637 if (first < 0) { 638 error_setg(errp, "can't find a free %d-IRQ block", num); 639 return -1; 640 } 641 642 return first + ics->offset; 643 } 644 645 #define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400 646 647 SpaprIrq spapr_irq_xics_legacy = { 648 .nr_xirqs = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 649 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS, 650 .xics = true, 651 .xive = false, 652 653 .post_load = spapr_irq_post_load_xics, 654 .reset = spapr_irq_reset_xics, 655 }; 656 657 static void spapr_irq_register_types(void) 658 { 659 type_register_static(&spapr_intc_info); 660 } 661 662 type_init(spapr_irq_register_types) 663