1 /* 2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator 3 * 4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics 5 * 6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a copy 9 * of this software and associated documentation files (the "Software"), to deal 10 * in the Software without restriction, including without limitation the rights 11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 12 * copies of the Software, and to permit persons to whom the Software is 13 * furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice shall be included in 16 * all copies or substantial portions of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 24 * THE SOFTWARE. 25 * 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qapi/error.h" 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "hw/hw.h" 33 #include "trace.h" 34 #include "qemu/timer.h" 35 #include "hw/ppc/spapr.h" 36 #include "hw/ppc/xics.h" 37 #include "qemu/error-report.h" 38 #include "qapi/visitor.h" 39 40 static int get_cpu_index_by_dt_id(int cpu_dt_id) 41 { 42 PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id); 43 44 if (cpu) { 45 return cpu->parent_obj.cpu_index; 46 } 47 48 return -1; 49 } 50 51 void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu) 52 { 53 CPUState *cs = CPU(cpu); 54 ICPState *ss = &icp->ss[cs->cpu_index]; 55 56 assert(cs->cpu_index < icp->nr_servers); 57 assert(cs == ss->cs); 58 59 ss->output = NULL; 60 ss->cs = NULL; 61 } 62 63 void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu) 64 { 65 CPUState *cs = CPU(cpu); 66 CPUPPCState *env = &cpu->env; 67 ICPState *ss = &icp->ss[cs->cpu_index]; 68 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 69 70 assert(cs->cpu_index < icp->nr_servers); 71 72 ss->cs = cs; 73 74 if (info->cpu_setup) { 75 info->cpu_setup(icp, cpu); 76 } 77 78 switch (PPC_INPUT(env)) { 79 case PPC_FLAGS_INPUT_POWER7: 80 ss->output = env->irq_inputs[POWER7_INPUT_INT]; 81 break; 82 83 case PPC_FLAGS_INPUT_970: 84 ss->output = env->irq_inputs[PPC970_INPUT_INT]; 85 break; 86 87 default: 88 error_report("XICS interrupt controller does not support this CPU " 89 "bus model"); 90 abort(); 91 } 92 } 93 94 /* 95 * XICS Common class - parent for emulated XICS and KVM-XICS 96 */ 97 static void xics_common_reset(DeviceState *d) 98 { 99 XICSState *icp = XICS_COMMON(d); 100 int i; 101 102 for (i = 0; i < icp->nr_servers; i++) { 103 device_reset(DEVICE(&icp->ss[i])); 104 } 105 106 device_reset(DEVICE(icp->ics)); 107 } 108 109 static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name, 110 void *opaque, Error **errp) 111 { 112 XICSState *icp = XICS_COMMON(obj); 113 int64_t value = icp->nr_irqs; 114 115 visit_type_int(v, name, &value, errp); 116 } 117 118 static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name, 119 void *opaque, Error **errp) 120 { 121 XICSState *icp = XICS_COMMON(obj); 122 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 123 Error *error = NULL; 124 int64_t value; 125 126 visit_type_int(v, name, &value, &error); 127 if (error) { 128 error_propagate(errp, error); 129 return; 130 } 131 if (icp->nr_irqs) { 132 error_setg(errp, "Number of interrupts is already set to %u", 133 icp->nr_irqs); 134 return; 135 } 136 137 assert(info->set_nr_irqs); 138 assert(icp->ics); 139 info->set_nr_irqs(icp, value, errp); 140 } 141 142 static void xics_prop_get_nr_servers(Object *obj, Visitor *v, 143 const char *name, void *opaque, 144 Error **errp) 145 { 146 XICSState *icp = XICS_COMMON(obj); 147 int64_t value = icp->nr_servers; 148 149 visit_type_int(v, name, &value, errp); 150 } 151 152 static void xics_prop_set_nr_servers(Object *obj, Visitor *v, 153 const char *name, void *opaque, 154 Error **errp) 155 { 156 XICSState *icp = XICS_COMMON(obj); 157 XICSStateClass *info = XICS_COMMON_GET_CLASS(icp); 158 Error *error = NULL; 159 int64_t value; 160 161 visit_type_int(v, name, &value, &error); 162 if (error) { 163 error_propagate(errp, error); 164 return; 165 } 166 if (icp->nr_servers) { 167 error_setg(errp, "Number of servers is already set to %u", 168 icp->nr_servers); 169 return; 170 } 171 172 assert(info->set_nr_servers); 173 info->set_nr_servers(icp, value, errp); 174 } 175 176 static void xics_common_initfn(Object *obj) 177 { 178 object_property_add(obj, "nr_irqs", "int", 179 xics_prop_get_nr_irqs, xics_prop_set_nr_irqs, 180 NULL, NULL, NULL); 181 object_property_add(obj, "nr_servers", "int", 182 xics_prop_get_nr_servers, xics_prop_set_nr_servers, 183 NULL, NULL, NULL); 184 } 185 186 static void xics_common_class_init(ObjectClass *oc, void *data) 187 { 188 DeviceClass *dc = DEVICE_CLASS(oc); 189 190 dc->reset = xics_common_reset; 191 } 192 193 static const TypeInfo xics_common_info = { 194 .name = TYPE_XICS_COMMON, 195 .parent = TYPE_SYS_BUS_DEVICE, 196 .instance_size = sizeof(XICSState), 197 .class_size = sizeof(XICSStateClass), 198 .instance_init = xics_common_initfn, 199 .class_init = xics_common_class_init, 200 }; 201 202 /* 203 * ICP: Presentation layer 204 */ 205 206 #define XISR_MASK 0x00ffffff 207 #define CPPR_MASK 0xff000000 208 209 #define XISR(ss) (((ss)->xirr) & XISR_MASK) 210 #define CPPR(ss) (((ss)->xirr) >> 24) 211 212 static void ics_reject(ICSState *ics, int nr); 213 static void ics_resend(ICSState *ics); 214 static void ics_eoi(ICSState *ics, int nr); 215 216 static void icp_check_ipi(XICSState *icp, int server) 217 { 218 ICPState *ss = icp->ss + server; 219 220 if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) { 221 return; 222 } 223 224 trace_xics_icp_check_ipi(server, ss->mfrr); 225 226 if (XISR(ss)) { 227 ics_reject(icp->ics, XISR(ss)); 228 } 229 230 ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI; 231 ss->pending_priority = ss->mfrr; 232 qemu_irq_raise(ss->output); 233 } 234 235 static void icp_resend(XICSState *icp, int server) 236 { 237 ICPState *ss = icp->ss + server; 238 239 if (ss->mfrr < CPPR(ss)) { 240 icp_check_ipi(icp, server); 241 } 242 ics_resend(icp->ics); 243 } 244 245 static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr) 246 { 247 ICPState *ss = icp->ss + server; 248 uint8_t old_cppr; 249 uint32_t old_xisr; 250 251 old_cppr = CPPR(ss); 252 ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24); 253 254 if (cppr < old_cppr) { 255 if (XISR(ss) && (cppr <= ss->pending_priority)) { 256 old_xisr = XISR(ss); 257 ss->xirr &= ~XISR_MASK; /* Clear XISR */ 258 ss->pending_priority = 0xff; 259 qemu_irq_lower(ss->output); 260 ics_reject(icp->ics, old_xisr); 261 } 262 } else { 263 if (!XISR(ss)) { 264 icp_resend(icp, server); 265 } 266 } 267 } 268 269 static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr) 270 { 271 ICPState *ss = icp->ss + server; 272 273 ss->mfrr = mfrr; 274 if (mfrr < CPPR(ss)) { 275 icp_check_ipi(icp, server); 276 } 277 } 278 279 static uint32_t icp_accept(ICPState *ss) 280 { 281 uint32_t xirr = ss->xirr; 282 283 qemu_irq_lower(ss->output); 284 ss->xirr = ss->pending_priority << 24; 285 ss->pending_priority = 0xff; 286 287 trace_xics_icp_accept(xirr, ss->xirr); 288 289 return xirr; 290 } 291 292 static void icp_eoi(XICSState *icp, int server, uint32_t xirr) 293 { 294 ICPState *ss = icp->ss + server; 295 296 /* Send EOI -> ICS */ 297 ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK); 298 trace_xics_icp_eoi(server, xirr, ss->xirr); 299 ics_eoi(icp->ics, xirr & XISR_MASK); 300 if (!XISR(ss)) { 301 icp_resend(icp, server); 302 } 303 } 304 305 static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority) 306 { 307 ICPState *ss = icp->ss + server; 308 309 trace_xics_icp_irq(server, nr, priority); 310 311 if ((priority >= CPPR(ss)) 312 || (XISR(ss) && (ss->pending_priority <= priority))) { 313 ics_reject(icp->ics, nr); 314 } else { 315 if (XISR(ss)) { 316 ics_reject(icp->ics, XISR(ss)); 317 } 318 ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK); 319 ss->pending_priority = priority; 320 trace_xics_icp_raise(ss->xirr, ss->pending_priority); 321 qemu_irq_raise(ss->output); 322 } 323 } 324 325 static void icp_dispatch_pre_save(void *opaque) 326 { 327 ICPState *ss = opaque; 328 ICPStateClass *info = ICP_GET_CLASS(ss); 329 330 if (info->pre_save) { 331 info->pre_save(ss); 332 } 333 } 334 335 static int icp_dispatch_post_load(void *opaque, int version_id) 336 { 337 ICPState *ss = opaque; 338 ICPStateClass *info = ICP_GET_CLASS(ss); 339 340 if (info->post_load) { 341 return info->post_load(ss, version_id); 342 } 343 344 return 0; 345 } 346 347 static const VMStateDescription vmstate_icp_server = { 348 .name = "icp/server", 349 .version_id = 1, 350 .minimum_version_id = 1, 351 .pre_save = icp_dispatch_pre_save, 352 .post_load = icp_dispatch_post_load, 353 .fields = (VMStateField[]) { 354 /* Sanity check */ 355 VMSTATE_UINT32(xirr, ICPState), 356 VMSTATE_UINT8(pending_priority, ICPState), 357 VMSTATE_UINT8(mfrr, ICPState), 358 VMSTATE_END_OF_LIST() 359 }, 360 }; 361 362 static void icp_reset(DeviceState *dev) 363 { 364 ICPState *icp = ICP(dev); 365 366 icp->xirr = 0; 367 icp->pending_priority = 0xff; 368 icp->mfrr = 0xff; 369 370 /* Make all outputs are deasserted */ 371 qemu_set_irq(icp->output, 0); 372 } 373 374 static void icp_class_init(ObjectClass *klass, void *data) 375 { 376 DeviceClass *dc = DEVICE_CLASS(klass); 377 378 dc->reset = icp_reset; 379 dc->vmsd = &vmstate_icp_server; 380 } 381 382 static const TypeInfo icp_info = { 383 .name = TYPE_ICP, 384 .parent = TYPE_DEVICE, 385 .instance_size = sizeof(ICPState), 386 .class_init = icp_class_init, 387 .class_size = sizeof(ICPStateClass), 388 }; 389 390 /* 391 * ICS: Source layer 392 */ 393 static int ics_valid_irq(ICSState *ics, uint32_t nr) 394 { 395 return (nr >= ics->offset) 396 && (nr < (ics->offset + ics->nr_irqs)); 397 } 398 399 static void resend_msi(ICSState *ics, int srcno) 400 { 401 ICSIRQState *irq = ics->irqs + srcno; 402 403 /* FIXME: filter by server#? */ 404 if (irq->status & XICS_STATUS_REJECTED) { 405 irq->status &= ~XICS_STATUS_REJECTED; 406 if (irq->priority != 0xff) { 407 icp_irq(ics->icp, irq->server, srcno + ics->offset, 408 irq->priority); 409 } 410 } 411 } 412 413 static void resend_lsi(ICSState *ics, int srcno) 414 { 415 ICSIRQState *irq = ics->irqs + srcno; 416 417 if ((irq->priority != 0xff) 418 && (irq->status & XICS_STATUS_ASSERTED) 419 && !(irq->status & XICS_STATUS_SENT)) { 420 irq->status |= XICS_STATUS_SENT; 421 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 422 } 423 } 424 425 static void set_irq_msi(ICSState *ics, int srcno, int val) 426 { 427 ICSIRQState *irq = ics->irqs + srcno; 428 429 trace_xics_set_irq_msi(srcno, srcno + ics->offset); 430 431 if (val) { 432 if (irq->priority == 0xff) { 433 irq->status |= XICS_STATUS_MASKED_PENDING; 434 trace_xics_masked_pending(); 435 } else { 436 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 437 } 438 } 439 } 440 441 static void set_irq_lsi(ICSState *ics, int srcno, int val) 442 { 443 ICSIRQState *irq = ics->irqs + srcno; 444 445 trace_xics_set_irq_lsi(srcno, srcno + ics->offset); 446 if (val) { 447 irq->status |= XICS_STATUS_ASSERTED; 448 } else { 449 irq->status &= ~XICS_STATUS_ASSERTED; 450 } 451 resend_lsi(ics, srcno); 452 } 453 454 static void ics_set_irq(void *opaque, int srcno, int val) 455 { 456 ICSState *ics = (ICSState *)opaque; 457 458 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 459 set_irq_lsi(ics, srcno, val); 460 } else { 461 set_irq_msi(ics, srcno, val); 462 } 463 } 464 465 static void write_xive_msi(ICSState *ics, int srcno) 466 { 467 ICSIRQState *irq = ics->irqs + srcno; 468 469 if (!(irq->status & XICS_STATUS_MASKED_PENDING) 470 || (irq->priority == 0xff)) { 471 return; 472 } 473 474 irq->status &= ~XICS_STATUS_MASKED_PENDING; 475 icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority); 476 } 477 478 static void write_xive_lsi(ICSState *ics, int srcno) 479 { 480 resend_lsi(ics, srcno); 481 } 482 483 static void ics_write_xive(ICSState *ics, int nr, int server, 484 uint8_t priority, uint8_t saved_priority) 485 { 486 int srcno = nr - ics->offset; 487 ICSIRQState *irq = ics->irqs + srcno; 488 489 irq->server = server; 490 irq->priority = priority; 491 irq->saved_priority = saved_priority; 492 493 trace_xics_ics_write_xive(nr, srcno, server, priority); 494 495 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 496 write_xive_lsi(ics, srcno); 497 } else { 498 write_xive_msi(ics, srcno); 499 } 500 } 501 502 static void ics_reject(ICSState *ics, int nr) 503 { 504 ICSIRQState *irq = ics->irqs + nr - ics->offset; 505 506 trace_xics_ics_reject(nr, nr - ics->offset); 507 irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */ 508 irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */ 509 } 510 511 static void ics_resend(ICSState *ics) 512 { 513 int i; 514 515 for (i = 0; i < ics->nr_irqs; i++) { 516 /* FIXME: filter by server#? */ 517 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { 518 resend_lsi(ics, i); 519 } else { 520 resend_msi(ics, i); 521 } 522 } 523 } 524 525 static void ics_eoi(ICSState *ics, int nr) 526 { 527 int srcno = nr - ics->offset; 528 ICSIRQState *irq = ics->irqs + srcno; 529 530 trace_xics_ics_eoi(nr); 531 532 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) { 533 irq->status &= ~XICS_STATUS_SENT; 534 } 535 } 536 537 static void ics_reset(DeviceState *dev) 538 { 539 ICSState *ics = ICS(dev); 540 int i; 541 uint8_t flags[ics->nr_irqs]; 542 543 for (i = 0; i < ics->nr_irqs; i++) { 544 flags[i] = ics->irqs[i].flags; 545 } 546 547 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); 548 549 for (i = 0; i < ics->nr_irqs; i++) { 550 ics->irqs[i].priority = 0xff; 551 ics->irqs[i].saved_priority = 0xff; 552 ics->irqs[i].flags = flags[i]; 553 } 554 } 555 556 static int ics_post_load(ICSState *ics, int version_id) 557 { 558 int i; 559 560 for (i = 0; i < ics->icp->nr_servers; i++) { 561 icp_resend(ics->icp, i); 562 } 563 564 return 0; 565 } 566 567 static void ics_dispatch_pre_save(void *opaque) 568 { 569 ICSState *ics = opaque; 570 ICSStateClass *info = ICS_GET_CLASS(ics); 571 572 if (info->pre_save) { 573 info->pre_save(ics); 574 } 575 } 576 577 static int ics_dispatch_post_load(void *opaque, int version_id) 578 { 579 ICSState *ics = opaque; 580 ICSStateClass *info = ICS_GET_CLASS(ics); 581 582 if (info->post_load) { 583 return info->post_load(ics, version_id); 584 } 585 586 return 0; 587 } 588 589 static const VMStateDescription vmstate_ics_irq = { 590 .name = "ics/irq", 591 .version_id = 2, 592 .minimum_version_id = 1, 593 .fields = (VMStateField[]) { 594 VMSTATE_UINT32(server, ICSIRQState), 595 VMSTATE_UINT8(priority, ICSIRQState), 596 VMSTATE_UINT8(saved_priority, ICSIRQState), 597 VMSTATE_UINT8(status, ICSIRQState), 598 VMSTATE_UINT8(flags, ICSIRQState), 599 VMSTATE_END_OF_LIST() 600 }, 601 }; 602 603 static const VMStateDescription vmstate_ics = { 604 .name = "ics", 605 .version_id = 1, 606 .minimum_version_id = 1, 607 .pre_save = ics_dispatch_pre_save, 608 .post_load = ics_dispatch_post_load, 609 .fields = (VMStateField[]) { 610 /* Sanity check */ 611 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState), 612 613 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs, 614 vmstate_ics_irq, ICSIRQState), 615 VMSTATE_END_OF_LIST() 616 }, 617 }; 618 619 static void ics_initfn(Object *obj) 620 { 621 ICSState *ics = ICS(obj); 622 623 ics->offset = XICS_IRQ_BASE; 624 } 625 626 static void ics_realize(DeviceState *dev, Error **errp) 627 { 628 ICSState *ics = ICS(dev); 629 630 if (!ics->nr_irqs) { 631 error_setg(errp, "Number of interrupts needs to be greater 0"); 632 return; 633 } 634 ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState)); 635 ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs); 636 } 637 638 static void ics_class_init(ObjectClass *klass, void *data) 639 { 640 DeviceClass *dc = DEVICE_CLASS(klass); 641 ICSStateClass *isc = ICS_CLASS(klass); 642 643 dc->realize = ics_realize; 644 dc->vmsd = &vmstate_ics; 645 dc->reset = ics_reset; 646 isc->post_load = ics_post_load; 647 } 648 649 static const TypeInfo ics_info = { 650 .name = TYPE_ICS, 651 .parent = TYPE_DEVICE, 652 .instance_size = sizeof(ICSState), 653 .class_init = ics_class_init, 654 .class_size = sizeof(ICSStateClass), 655 .instance_init = ics_initfn, 656 }; 657 658 /* 659 * Exported functions 660 */ 661 static int xics_find_source(XICSState *icp, int irq) 662 { 663 int sources = 1; 664 int src; 665 666 /* FIXME: implement multiple sources */ 667 for (src = 0; src < sources; ++src) { 668 ICSState *ics = &icp->ics[src]; 669 if (ics_valid_irq(ics, irq)) { 670 return src; 671 } 672 } 673 674 return -1; 675 } 676 677 qemu_irq xics_get_qirq(XICSState *icp, int irq) 678 { 679 int src = xics_find_source(icp, irq); 680 681 if (src >= 0) { 682 ICSState *ics = &icp->ics[src]; 683 return ics->qirqs[irq - ics->offset]; 684 } 685 686 return NULL; 687 } 688 689 static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) 690 { 691 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK)); 692 693 ics->irqs[srcno].flags |= 694 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; 695 } 696 697 #define ICS_IRQ_FREE(ics, srcno) \ 698 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK))) 699 700 static int ics_find_free_block(ICSState *ics, int num, int alignnum) 701 { 702 int first, i; 703 704 for (first = 0; first < ics->nr_irqs; first += alignnum) { 705 if (num > (ics->nr_irqs - first)) { 706 return -1; 707 } 708 for (i = first; i < first + num; ++i) { 709 if (!ICS_IRQ_FREE(ics, i)) { 710 break; 711 } 712 } 713 if (i == (first + num)) { 714 return first; 715 } 716 } 717 718 return -1; 719 } 720 721 int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp) 722 { 723 ICSState *ics = &icp->ics[src]; 724 int irq; 725 726 if (irq_hint) { 727 assert(src == xics_find_source(icp, irq_hint)); 728 if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { 729 error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); 730 return -1; 731 } 732 irq = irq_hint; 733 } else { 734 irq = ics_find_free_block(ics, 1, 1); 735 if (irq < 0) { 736 error_setg(errp, "can't allocate IRQ: no IRQ left"); 737 return -1; 738 } 739 irq += ics->offset; 740 } 741 742 ics_set_irq_type(ics, irq - ics->offset, lsi); 743 trace_xics_alloc(src, irq); 744 745 return irq; 746 } 747 748 /* 749 * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. 750 * If align==true, aligns the first IRQ number to num. 751 */ 752 int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, 753 Error **errp) 754 { 755 int i, first = -1; 756 ICSState *ics = &icp->ics[src]; 757 758 assert(src == 0); 759 /* 760 * MSIMesage::data is used for storing VIRQ so 761 * it has to be aligned to num to support multiple 762 * MSI vectors. MSI-X is not affected by this. 763 * The hint is used for the first IRQ, the rest should 764 * be allocated continuously. 765 */ 766 if (align) { 767 assert((num == 1) || (num == 2) || (num == 4) || 768 (num == 8) || (num == 16) || (num == 32)); 769 first = ics_find_free_block(ics, num, num); 770 } else { 771 first = ics_find_free_block(ics, num, 1); 772 } 773 if (first < 0) { 774 error_setg(errp, "can't find a free %d-IRQ block", num); 775 return -1; 776 } 777 778 if (first >= 0) { 779 for (i = first; i < first + num; ++i) { 780 ics_set_irq_type(ics, i, lsi); 781 } 782 } 783 first += ics->offset; 784 785 trace_xics_alloc_block(src, first, num, lsi, align); 786 787 return first; 788 } 789 790 static void ics_free(ICSState *ics, int srcno, int num) 791 { 792 int i; 793 794 for (i = srcno; i < srcno + num; ++i) { 795 if (ICS_IRQ_FREE(ics, i)) { 796 trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset); 797 } 798 memset(&ics->irqs[i], 0, sizeof(ICSIRQState)); 799 } 800 } 801 802 void xics_free(XICSState *icp, int irq, int num) 803 { 804 int src = xics_find_source(icp, irq); 805 806 if (src >= 0) { 807 ICSState *ics = &icp->ics[src]; 808 809 /* FIXME: implement multiple sources */ 810 assert(src == 0); 811 812 trace_xics_ics_free(ics - icp->ics, irq, num); 813 ics_free(ics, irq - ics->offset, num); 814 } 815 } 816 817 /* 818 * Guest interfaces 819 */ 820 821 static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 822 target_ulong opcode, target_ulong *args) 823 { 824 CPUState *cs = CPU(cpu); 825 target_ulong cppr = args[0]; 826 827 icp_set_cppr(spapr->icp, cs->cpu_index, cppr); 828 return H_SUCCESS; 829 } 830 831 static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 832 target_ulong opcode, target_ulong *args) 833 { 834 target_ulong server = get_cpu_index_by_dt_id(args[0]); 835 target_ulong mfrr = args[1]; 836 837 if (server >= spapr->icp->nr_servers) { 838 return H_PARAMETER; 839 } 840 841 icp_set_mfrr(spapr->icp, server, mfrr); 842 return H_SUCCESS; 843 } 844 845 static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 846 target_ulong opcode, target_ulong *args) 847 { 848 CPUState *cs = CPU(cpu); 849 uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index); 850 851 args[0] = xirr; 852 return H_SUCCESS; 853 } 854 855 static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, 856 target_ulong opcode, target_ulong *args) 857 { 858 CPUState *cs = CPU(cpu); 859 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 860 uint32_t xirr = icp_accept(ss); 861 862 args[0] = xirr; 863 args[1] = cpu_get_host_ticks(); 864 return H_SUCCESS; 865 } 866 867 static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 868 target_ulong opcode, target_ulong *args) 869 { 870 CPUState *cs = CPU(cpu); 871 target_ulong xirr = args[0]; 872 873 icp_eoi(spapr->icp, cs->cpu_index, xirr); 874 return H_SUCCESS; 875 } 876 877 static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr, 878 target_ulong opcode, target_ulong *args) 879 { 880 CPUState *cs = CPU(cpu); 881 ICPState *ss = &spapr->icp->ss[cs->cpu_index]; 882 883 args[0] = ss->xirr; 884 args[1] = ss->mfrr; 885 886 return H_SUCCESS; 887 } 888 889 static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 890 uint32_t token, 891 uint32_t nargs, target_ulong args, 892 uint32_t nret, target_ulong rets) 893 { 894 ICSState *ics = spapr->icp->ics; 895 uint32_t nr, server, priority; 896 897 if ((nargs != 3) || (nret != 1)) { 898 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 899 return; 900 } 901 902 nr = rtas_ld(args, 0); 903 server = get_cpu_index_by_dt_id(rtas_ld(args, 1)); 904 priority = rtas_ld(args, 2); 905 906 if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers) 907 || (priority > 0xff)) { 908 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 909 return; 910 } 911 912 ics_write_xive(ics, nr, server, priority, priority); 913 914 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 915 } 916 917 static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr, 918 uint32_t token, 919 uint32_t nargs, target_ulong args, 920 uint32_t nret, target_ulong rets) 921 { 922 ICSState *ics = spapr->icp->ics; 923 uint32_t nr; 924 925 if ((nargs != 1) || (nret != 3)) { 926 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 927 return; 928 } 929 930 nr = rtas_ld(args, 0); 931 932 if (!ics_valid_irq(ics, nr)) { 933 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 934 return; 935 } 936 937 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 938 rtas_st(rets, 1, ics->irqs[nr - ics->offset].server); 939 rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority); 940 } 941 942 static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr, 943 uint32_t token, 944 uint32_t nargs, target_ulong args, 945 uint32_t nret, target_ulong rets) 946 { 947 ICSState *ics = spapr->icp->ics; 948 uint32_t nr; 949 950 if ((nargs != 1) || (nret != 1)) { 951 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 952 return; 953 } 954 955 nr = rtas_ld(args, 0); 956 957 if (!ics_valid_irq(ics, nr)) { 958 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 959 return; 960 } 961 962 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff, 963 ics->irqs[nr - ics->offset].priority); 964 965 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 966 } 967 968 static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr, 969 uint32_t token, 970 uint32_t nargs, target_ulong args, 971 uint32_t nret, target_ulong rets) 972 { 973 ICSState *ics = spapr->icp->ics; 974 uint32_t nr; 975 976 if ((nargs != 1) || (nret != 1)) { 977 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 978 return; 979 } 980 981 nr = rtas_ld(args, 0); 982 983 if (!ics_valid_irq(ics, nr)) { 984 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 985 return; 986 } 987 988 ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 989 ics->irqs[nr - ics->offset].saved_priority, 990 ics->irqs[nr - ics->offset].saved_priority); 991 992 rtas_st(rets, 0, RTAS_OUT_SUCCESS); 993 } 994 995 /* 996 * XICS 997 */ 998 999 static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp) 1000 { 1001 icp->nr_irqs = icp->ics->nr_irqs = nr_irqs; 1002 } 1003 1004 static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers, 1005 Error **errp) 1006 { 1007 int i; 1008 1009 icp->nr_servers = nr_servers; 1010 1011 icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState)); 1012 for (i = 0; i < icp->nr_servers; i++) { 1013 char buffer[32]; 1014 object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP); 1015 snprintf(buffer, sizeof(buffer), "icp[%d]", i); 1016 object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]), 1017 errp); 1018 } 1019 } 1020 1021 static void xics_realize(DeviceState *dev, Error **errp) 1022 { 1023 XICSState *icp = XICS(dev); 1024 Error *error = NULL; 1025 int i; 1026 1027 if (!icp->nr_servers) { 1028 error_setg(errp, "Number of servers needs to be greater 0"); 1029 return; 1030 } 1031 1032 /* Registration of global state belongs into realize */ 1033 spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); 1034 spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); 1035 spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); 1036 spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); 1037 1038 spapr_register_hypercall(H_CPPR, h_cppr); 1039 spapr_register_hypercall(H_IPI, h_ipi); 1040 spapr_register_hypercall(H_XIRR, h_xirr); 1041 spapr_register_hypercall(H_XIRR_X, h_xirr_x); 1042 spapr_register_hypercall(H_EOI, h_eoi); 1043 spapr_register_hypercall(H_IPOLL, h_ipoll); 1044 1045 object_property_set_bool(OBJECT(icp->ics), true, "realized", &error); 1046 if (error) { 1047 error_propagate(errp, error); 1048 return; 1049 } 1050 1051 for (i = 0; i < icp->nr_servers; i++) { 1052 object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error); 1053 if (error) { 1054 error_propagate(errp, error); 1055 return; 1056 } 1057 } 1058 } 1059 1060 static void xics_initfn(Object *obj) 1061 { 1062 XICSState *xics = XICS(obj); 1063 1064 xics->ics = ICS(object_new(TYPE_ICS)); 1065 object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL); 1066 xics->ics->icp = xics; 1067 } 1068 1069 static void xics_class_init(ObjectClass *oc, void *data) 1070 { 1071 DeviceClass *dc = DEVICE_CLASS(oc); 1072 XICSStateClass *xsc = XICS_CLASS(oc); 1073 1074 dc->realize = xics_realize; 1075 xsc->set_nr_irqs = xics_set_nr_irqs; 1076 xsc->set_nr_servers = xics_set_nr_servers; 1077 } 1078 1079 static const TypeInfo xics_info = { 1080 .name = TYPE_XICS, 1081 .parent = TYPE_XICS_COMMON, 1082 .instance_size = sizeof(XICSState), 1083 .class_size = sizeof(XICSStateClass), 1084 .class_init = xics_class_init, 1085 .instance_init = xics_initfn, 1086 }; 1087 1088 static void xics_register_types(void) 1089 { 1090 type_register_static(&xics_common_info); 1091 type_register_static(&xics_info); 1092 type_register_static(&ics_info); 1093 type_register_static(&icp_info); 1094 } 1095 1096 type_init(xics_register_types) 1097