1 /* 2 * s390 PCI BUS 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qapi/visitor.h" 17 #include "hw/s390x/s390-pci-bus.h" 18 #include "hw/s390x/s390-pci-inst.h" 19 #include "hw/s390x/s390-pci-kvm.h" 20 #include "hw/s390x/s390-pci-vfio.h" 21 #include "hw/pci/pci_bus.h" 22 #include "hw/qdev-properties.h" 23 #include "hw/pci/pci_bridge.h" 24 #include "hw/pci/msi.h" 25 #include "qemu/error-report.h" 26 #include "qemu/module.h" 27 #include "system/reset.h" 28 #include "system/runstate.h" 29 30 #include "trace.h" 31 32 S390pciState *s390_get_phb(void) 33 { 34 static S390pciState *phb; 35 36 if (!phb) { 37 phb = S390_PCI_HOST_BRIDGE( 38 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL)); 39 assert(phb != NULL); 40 } 41 42 return phb; 43 } 44 45 int pci_chsc_sei_nt2_get_event(void *res) 46 { 47 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res; 48 PciCcdfAvail *accdf; 49 PciCcdfErr *eccdf; 50 int rc = 1; 51 SeiContainer *sei_cont; 52 S390pciState *s = s390_get_phb(); 53 54 sei_cont = QTAILQ_FIRST(&s->pending_sei); 55 if (sei_cont) { 56 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link); 57 nt2_res->nt = 2; 58 nt2_res->cc = sei_cont->cc; 59 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res)); 60 switch (sei_cont->cc) { 61 case 1: /* error event */ 62 eccdf = (PciCcdfErr *)nt2_res->ccdf; 63 eccdf->fid = cpu_to_be32(sei_cont->fid); 64 eccdf->fh = cpu_to_be32(sei_cont->fh); 65 eccdf->e = cpu_to_be32(sei_cont->e); 66 eccdf->faddr = cpu_to_be64(sei_cont->faddr); 67 eccdf->pec = cpu_to_be16(sei_cont->pec); 68 break; 69 case 2: /* availability event */ 70 accdf = (PciCcdfAvail *)nt2_res->ccdf; 71 accdf->fid = cpu_to_be32(sei_cont->fid); 72 accdf->fh = cpu_to_be32(sei_cont->fh); 73 accdf->pec = cpu_to_be16(sei_cont->pec); 74 break; 75 default: 76 abort(); 77 } 78 g_free(sei_cont); 79 rc = 0; 80 } 81 82 return rc; 83 } 84 85 int pci_chsc_sei_nt2_have_event(void) 86 { 87 S390pciState *s = s390_get_phb(); 88 89 return !QTAILQ_EMPTY(&s->pending_sei); 90 } 91 92 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s, 93 S390PCIBusDevice *pbdev) 94 { 95 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) : 96 QTAILQ_FIRST(&s->zpci_devs); 97 98 while (ret && ret->state == ZPCI_FS_RESERVED) { 99 ret = QTAILQ_NEXT(ret, link); 100 } 101 102 return ret; 103 } 104 105 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid) 106 { 107 S390PCIBusDevice *pbdev; 108 109 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 110 if (pbdev->fid == fid) { 111 return pbdev; 112 } 113 } 114 115 return NULL; 116 } 117 118 void s390_pci_sclp_configure(SCCB *sccb) 119 { 120 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 121 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 122 be32_to_cpu(psccb->aid)); 123 uint16_t rc; 124 125 if (!pbdev) { 126 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid)); 127 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 128 goto out; 129 } 130 131 switch (pbdev->state) { 132 case ZPCI_FS_RESERVED: 133 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 134 break; 135 case ZPCI_FS_STANDBY: 136 pbdev->state = ZPCI_FS_DISABLED; 137 rc = SCLP_RC_NORMAL_COMPLETION; 138 break; 139 default: 140 rc = SCLP_RC_NO_ACTION_REQUIRED; 141 } 142 out: 143 psccb->header.response_code = cpu_to_be16(rc); 144 } 145 146 static void s390_pci_shutdown_notifier(Notifier *n, void *opaque) 147 { 148 S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice, 149 shutdown_notifier); 150 151 pci_device_reset(pbdev->pdev); 152 } 153 154 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev) 155 { 156 HotplugHandler *hotplug_ctrl; 157 158 if (pbdev->pft == ZPCI_PFT_ISM) { 159 notifier_remove(&pbdev->shutdown_notifier); 160 } 161 162 /* Unplug the PCI device */ 163 if (pbdev->pdev) { 164 DeviceState *pdev = DEVICE(pbdev->pdev); 165 166 hotplug_ctrl = qdev_get_hotplug_handler(pdev); 167 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort); 168 object_unparent(OBJECT(pdev)); 169 } 170 171 /* Unplug the zPCI device */ 172 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev)); 173 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort); 174 object_unparent(OBJECT(pbdev)); 175 } 176 177 void s390_pci_sclp_deconfigure(SCCB *sccb) 178 { 179 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb; 180 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(), 181 be32_to_cpu(psccb->aid)); 182 uint16_t rc; 183 184 if (!pbdev) { 185 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid)); 186 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED; 187 goto out; 188 } 189 190 switch (pbdev->state) { 191 case ZPCI_FS_RESERVED: 192 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE; 193 break; 194 case ZPCI_FS_STANDBY: 195 rc = SCLP_RC_NO_ACTION_REQUIRED; 196 break; 197 default: 198 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 199 /* Interpreted devices were using interrupt forwarding */ 200 s390_pci_kvm_aif_disable(pbdev); 201 } else if (pbdev->summary_ind) { 202 pci_dereg_irqs(pbdev); 203 } 204 if (pbdev->iommu->enabled) { 205 pci_dereg_ioat(pbdev->iommu); 206 } 207 pbdev->state = ZPCI_FS_STANDBY; 208 rc = SCLP_RC_NORMAL_COMPLETION; 209 210 if (pbdev->unplug_requested) { 211 s390_pci_perform_unplug(pbdev); 212 } 213 } 214 out: 215 psccb->header.response_code = cpu_to_be16(rc); 216 } 217 218 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid) 219 { 220 S390PCIBusDevice *pbdev; 221 222 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 223 if (pbdev->uid == uid) { 224 return pbdev; 225 } 226 } 227 228 return NULL; 229 } 230 231 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s, 232 const char *target) 233 { 234 S390PCIBusDevice *pbdev; 235 236 if (!target) { 237 return NULL; 238 } 239 240 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 241 if (!strcmp(pbdev->target, target)) { 242 return pbdev; 243 } 244 } 245 246 return NULL; 247 } 248 249 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s, 250 PCIDevice *pci_dev) 251 { 252 S390PCIBusDevice *pbdev; 253 254 if (!pci_dev) { 255 return NULL; 256 } 257 258 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) { 259 if (pbdev->pdev == pci_dev) { 260 return pbdev; 261 } 262 } 263 264 return NULL; 265 } 266 267 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx) 268 { 269 return g_hash_table_lookup(s->zpci_table, &idx); 270 } 271 272 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh) 273 { 274 uint32_t idx = FH_MASK_INDEX & fh; 275 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx); 276 277 if (pbdev && pbdev->fh == fh) { 278 return pbdev; 279 } 280 281 return NULL; 282 } 283 284 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh, 285 uint32_t fid, uint64_t faddr, uint32_t e) 286 { 287 SeiContainer *sei_cont; 288 S390pciState *s = s390_get_phb(); 289 290 sei_cont = g_new0(SeiContainer, 1); 291 sei_cont->fh = fh; 292 sei_cont->fid = fid; 293 sei_cont->cc = cc; 294 sei_cont->pec = pec; 295 sei_cont->faddr = faddr; 296 sei_cont->e = e; 297 298 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link); 299 css_generate_css_crws(0); 300 } 301 302 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh, 303 uint32_t fid) 304 { 305 s390_pci_generate_event(2, pec, fh, fid, 0, 0); 306 } 307 308 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid, 309 uint64_t faddr, uint32_t e) 310 { 311 s390_pci_generate_event(1, pec, fh, fid, faddr, e); 312 } 313 314 static void s390_pci_set_irq(void *opaque, int irq, int level) 315 { 316 /* nothing to do */ 317 } 318 319 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num) 320 { 321 /* nothing to do */ 322 return 0; 323 } 324 325 static uint64_t s390_pci_get_table_origin(uint64_t iota) 326 { 327 return iota & ~ZPCI_IOTA_RTTO_FLAG; 328 } 329 330 static unsigned int calc_rtx(dma_addr_t ptr) 331 { 332 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 333 } 334 335 static unsigned int calc_sx(dma_addr_t ptr) 336 { 337 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 338 } 339 340 static unsigned int calc_px(dma_addr_t ptr) 341 { 342 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK; 343 } 344 345 static uint64_t get_rt_sto(uint64_t entry) 346 { 347 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 348 ? (entry & ZPCI_RTE_ADDR_MASK) 349 : 0; 350 } 351 352 static uint64_t get_st_pto(uint64_t entry) 353 { 354 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 355 ? (entry & ZPCI_STE_ADDR_MASK) 356 : 0; 357 } 358 359 static bool rt_entry_isvalid(uint64_t entry) 360 { 361 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 362 } 363 364 static bool pt_entry_isvalid(uint64_t entry) 365 { 366 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 367 } 368 369 static bool entry_isprotected(uint64_t entry) 370 { 371 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED; 372 } 373 374 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */ 375 static uint64_t get_table_index(uint64_t iova, int8_t ett) 376 { 377 switch (ett) { 378 case ZPCI_ETT_PT: 379 return calc_px(iova); 380 case ZPCI_ETT_ST: 381 return calc_sx(iova); 382 case ZPCI_ETT_RT: 383 return calc_rtx(iova); 384 } 385 386 return -1; 387 } 388 389 static bool entry_isvalid(uint64_t entry, int8_t ett) 390 { 391 switch (ett) { 392 case ZPCI_ETT_PT: 393 return pt_entry_isvalid(entry); 394 case ZPCI_ETT_ST: 395 case ZPCI_ETT_RT: 396 return rt_entry_isvalid(entry); 397 } 398 399 return false; 400 } 401 402 /* Return true if address translation is done */ 403 static bool translate_iscomplete(uint64_t entry, int8_t ett) 404 { 405 switch (ett) { 406 case 0: 407 return (entry & ZPCI_TABLE_FC) ? true : false; 408 case 1: 409 return false; 410 } 411 412 return true; 413 } 414 415 static uint64_t get_frame_size(int8_t ett) 416 { 417 switch (ett) { 418 case ZPCI_ETT_PT: 419 return 1ULL << 12; 420 case ZPCI_ETT_ST: 421 return 1ULL << 20; 422 case ZPCI_ETT_RT: 423 return 1ULL << 31; 424 } 425 426 return 0; 427 } 428 429 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett) 430 { 431 switch (ett) { 432 case ZPCI_ETT_PT: 433 return entry & ZPCI_PTE_ADDR_MASK; 434 case ZPCI_ETT_ST: 435 return get_st_pto(entry); 436 case ZPCI_ETT_RT: 437 return get_rt_sto(entry); 438 } 439 440 return 0; 441 } 442 443 /** 444 * table_translate: do translation within one table and return the following 445 * table origin 446 * 447 * @entry: the entry being translated, the result is stored in this. 448 * @to: the address of table origin. 449 * @ett: expected table type, 1 region table, 0 segment table and -1 page table. 450 * @error: error code 451 */ 452 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett, 453 uint16_t *error) 454 { 455 uint64_t tx, te, nto = 0; 456 uint16_t err = 0; 457 458 tx = get_table_index(entry->iova, ett); 459 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t), 460 MEMTXATTRS_UNSPECIFIED, NULL); 461 462 if (!te) { 463 err = ERR_EVENT_INVALTE; 464 goto out; 465 } 466 467 if (!entry_isvalid(te, ett)) { 468 entry->perm &= IOMMU_NONE; 469 goto out; 470 } 471 472 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX 473 || te & ZPCI_TABLE_OFFSET_MASK)) { 474 err = ERR_EVENT_INVALTL; 475 goto out; 476 } 477 478 nto = get_next_table_origin(te, ett); 479 if (!nto) { 480 err = ERR_EVENT_TT; 481 goto out; 482 } 483 484 if (entry_isprotected(te)) { 485 entry->perm &= IOMMU_RO; 486 } else { 487 entry->perm &= IOMMU_RW; 488 } 489 490 if (translate_iscomplete(te, ett)) { 491 switch (ett) { 492 case ZPCI_ETT_PT: 493 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK; 494 break; 495 case ZPCI_ETT_ST: 496 entry->translated_addr = (te & ZPCI_SFAA_MASK) | 497 (entry->iova & ~ZPCI_SFAA_MASK); 498 break; 499 } 500 nto = 0; 501 } 502 out: 503 if (err) { 504 entry->perm = IOMMU_NONE; 505 *error = err; 506 } 507 entry->len = get_frame_size(ett); 508 return nto; 509 } 510 511 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr, 512 S390IOTLBEntry *entry) 513 { 514 uint64_t to = s390_pci_get_table_origin(g_iota); 515 int8_t ett = 1; 516 uint16_t error = 0; 517 518 entry->iova = addr & TARGET_PAGE_MASK; 519 entry->translated_addr = 0; 520 entry->perm = IOMMU_RW; 521 522 if (entry_isprotected(g_iota)) { 523 entry->perm &= IOMMU_RO; 524 } 525 526 while (to) { 527 to = table_translate(entry, to, ett--, &error); 528 } 529 530 return error; 531 } 532 533 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr, 534 IOMMUAccessFlags flag, int iommu_idx) 535 { 536 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr); 537 S390IOTLBEntry *entry; 538 uint64_t iova = addr & TARGET_PAGE_MASK; 539 uint16_t error = 0; 540 IOMMUTLBEntry ret = { 541 .target_as = &address_space_memory, 542 .iova = 0, 543 .translated_addr = 0, 544 .addr_mask = ~(hwaddr)0, 545 .perm = IOMMU_NONE, 546 }; 547 548 switch (iommu->pbdev->state) { 549 case ZPCI_FS_ENABLED: 550 case ZPCI_FS_BLOCKED: 551 if (!iommu->enabled) { 552 return ret; 553 } 554 break; 555 default: 556 return ret; 557 } 558 559 trace_s390_pci_iommu_xlate(addr); 560 561 if (addr < iommu->pba || addr > iommu->pal) { 562 error = ERR_EVENT_OORANGE; 563 goto err; 564 } 565 566 entry = g_hash_table_lookup(iommu->iotlb, &iova); 567 if (entry) { 568 ret.iova = entry->iova; 569 ret.translated_addr = entry->translated_addr; 570 ret.addr_mask = entry->len - 1; 571 ret.perm = entry->perm; 572 } else { 573 ret.iova = iova; 574 ret.addr_mask = ~TARGET_PAGE_MASK; 575 ret.perm = IOMMU_NONE; 576 } 577 578 if (flag != IOMMU_NONE && !(flag & ret.perm)) { 579 error = ERR_EVENT_TPROTE; 580 } 581 err: 582 if (error) { 583 iommu->pbdev->state = ZPCI_FS_ERROR; 584 s390_pci_generate_error_event(error, iommu->pbdev->fh, 585 iommu->pbdev->fid, addr, 0); 586 } 587 return ret; 588 } 589 590 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu, 591 IOMMUNotifier *notifier) 592 { 593 /* It's impossible to plug a pci device on s390x that already has iommu 594 * mappings which need to be replayed, that is due to the "one iommu per 595 * zpci device" construct. But when we support migration of vfio-pci 596 * devices in future, we need to revisit this. 597 */ 598 return; 599 } 600 601 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus, 602 int devfn) 603 { 604 uint64_t key = (uintptr_t)bus; 605 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 606 S390PCIIOMMU *iommu; 607 608 if (!table) { 609 table = g_new0(S390PCIIOMMUTable, 1); 610 table->key = key; 611 g_hash_table_insert(s->iommu_table, &table->key, table); 612 } 613 614 iommu = table->iommu[PCI_SLOT(devfn)]; 615 if (!iommu) { 616 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU)); 617 618 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x", 619 pci_bus_num(bus), 620 PCI_SLOT(devfn), 621 PCI_FUNC(devfn)); 622 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x", 623 pci_bus_num(bus), 624 PCI_SLOT(devfn), 625 PCI_FUNC(devfn)); 626 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX); 627 address_space_init(&iommu->as, &iommu->mr, as_name); 628 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal, 629 NULL, g_free); 630 table->iommu[PCI_SLOT(devfn)] = iommu; 631 632 g_free(mr_name); 633 g_free(as_name); 634 } 635 636 return iommu; 637 } 638 639 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) 640 { 641 S390pciState *s = opaque; 642 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn); 643 644 return &iommu->as; 645 } 646 647 static const PCIIOMMUOps s390_iommu_ops = { 648 .get_address_space = s390_pci_dma_iommu, 649 }; 650 651 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) 652 { 653 uint8_t expected, actual; 654 hwaddr len = 1; 655 /* avoid multiple fetches */ 656 uint8_t volatile *ind_addr; 657 658 ind_addr = cpu_physical_memory_map(ind_loc, &len, true); 659 if (!ind_addr) { 660 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); 661 return -1; 662 } 663 actual = *ind_addr; 664 do { 665 expected = actual; 666 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set); 667 } while (actual != expected); 668 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); 669 670 return actual; 671 } 672 673 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, 674 unsigned int size) 675 { 676 S390PCIBusDevice *pbdev = opaque; 677 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 678 uint64_t ind_bit; 679 uint32_t sum_bit; 680 681 assert(pbdev); 682 683 trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec); 684 685 if (pbdev->state != ZPCI_FS_ENABLED) { 686 return; 687 } 688 689 ind_bit = pbdev->routes.adapter.ind_offset; 690 sum_bit = pbdev->routes.adapter.summary_offset; 691 692 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, 693 0x80 >> ((ind_bit + vec) % 8)); 694 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, 695 0x80 >> (sum_bit % 8))) { 696 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); 697 } 698 } 699 700 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size) 701 { 702 return 0xffffffff; 703 } 704 705 static const MemoryRegionOps s390_msi_ctrl_ops = { 706 .write = s390_msi_ctrl_write, 707 .read = s390_msi_ctrl_read, 708 .endianness = DEVICE_LITTLE_ENDIAN, 709 }; 710 711 void s390_pci_iommu_enable(S390PCIIOMMU *iommu) 712 { 713 /* 714 * The iommu region is initialized against a 0-mapped address space, 715 * so the smallest IOMMU region we can define runs from 0 to the end 716 * of the PCI address space. 717 */ 718 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid); 719 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr), 720 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr), 721 name, iommu->pal + 1); 722 iommu->enabled = true; 723 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr)); 724 g_free(name); 725 } 726 727 void s390_pci_iommu_disable(S390PCIIOMMU *iommu) 728 { 729 iommu->enabled = false; 730 g_hash_table_remove_all(iommu->iotlb); 731 memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr)); 732 object_unparent(OBJECT(&iommu->iommu_mr)); 733 } 734 735 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn) 736 { 737 uint64_t key = (uintptr_t)bus; 738 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key); 739 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL; 740 741 if (!table || !iommu) { 742 return; 743 } 744 745 table->iommu[PCI_SLOT(devfn)] = NULL; 746 g_hash_table_destroy(iommu->iotlb); 747 /* 748 * An attached PCI device may have memory listeners, eg. VFIO PCI. 749 * The associated subregion will already have been unmapped in 750 * s390_pci_iommu_disable in response to the guest deconfigure request. 751 * Remove the listeners now before destroying the address space. 752 */ 753 address_space_remove_listeners(&iommu->as); 754 address_space_destroy(&iommu->as); 755 object_unparent(OBJECT(&iommu->mr)); 756 object_unparent(OBJECT(iommu)); 757 object_unref(OBJECT(iommu)); 758 } 759 760 S390PCIGroup *s390_group_create(int id, int host_id) 761 { 762 S390PCIGroup *group; 763 S390pciState *s = s390_get_phb(); 764 765 group = g_new0(S390PCIGroup, 1); 766 group->id = id; 767 group->host_id = host_id; 768 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link); 769 return group; 770 } 771 772 S390PCIGroup *s390_group_find(int id) 773 { 774 S390PCIGroup *group; 775 S390pciState *s = s390_get_phb(); 776 777 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 778 if (group->id == id) { 779 return group; 780 } 781 } 782 return NULL; 783 } 784 785 S390PCIGroup *s390_group_find_host_sim(int host_id) 786 { 787 S390PCIGroup *group; 788 S390pciState *s = s390_get_phb(); 789 790 QTAILQ_FOREACH(group, &s->zpci_groups, link) { 791 if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) { 792 return group; 793 } 794 } 795 return NULL; 796 } 797 798 static void s390_pci_init_default_group(void) 799 { 800 S390PCIGroup *group; 801 ClpRspQueryPciGrp *resgrp; 802 803 group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP); 804 resgrp = &group->zpci_group; 805 resgrp->fr = 1; 806 resgrp->dasm = 0; 807 resgrp->msia = ZPCI_MSI_ADDR; 808 resgrp->mui = DEFAULT_MUI; 809 resgrp->i = 128; 810 resgrp->maxstbl = 128; 811 resgrp->version = 0; 812 resgrp->dtsm = ZPCI_DTSM; 813 } 814 815 static void set_pbdev_info(S390PCIBusDevice *pbdev) 816 { 817 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR; 818 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR; 819 pbdev->zpci_fn.pchid = 0; 820 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP; 821 pbdev->zpci_fn.fid = pbdev->fid; 822 pbdev->zpci_fn.uid = pbdev->uid; 823 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP); 824 } 825 826 static void s390_pcihost_realize(DeviceState *dev, Error **errp) 827 { 828 PCIBus *b; 829 BusState *bus; 830 PCIHostState *phb = PCI_HOST_BRIDGE(dev); 831 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 832 833 trace_s390_pcihost("realize"); 834 835 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq, 836 NULL, get_system_memory(), get_system_io(), 0, 837 64, TYPE_PCI_BUS); 838 pci_setup_iommu(b, &s390_iommu_ops, s); 839 840 bus = BUS(b); 841 qbus_set_hotplug_handler(bus, OBJECT(dev)); 842 phb->bus = b; 843 844 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); 845 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); 846 847 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, 848 NULL, g_free); 849 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL); 850 s->bus_no = 0; 851 s->next_sim_grp = ZPCI_SIM_GRP_START; 852 QTAILQ_INIT(&s->pending_sei); 853 QTAILQ_INIT(&s->zpci_devs); 854 QTAILQ_INIT(&s->zpci_dma_limit); 855 QTAILQ_INIT(&s->zpci_groups); 856 857 s390_pci_init_default_group(); 858 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, 859 S390_ADAPTER_SUPPRESSIBLE, errp); 860 } 861 862 static void s390_pcihost_unrealize(DeviceState *dev) 863 { 864 S390PCIGroup *group; 865 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 866 867 while (!QTAILQ_EMPTY(&s->zpci_groups)) { 868 group = QTAILQ_FIRST(&s->zpci_groups); 869 QTAILQ_REMOVE(&s->zpci_groups, group, link); 870 } 871 } 872 873 static int s390_pci_msix_init(S390PCIBusDevice *pbdev) 874 { 875 char *name; 876 uint8_t pos; 877 uint16_t ctrl; 878 uint32_t table, pba; 879 880 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); 881 if (!pos) { 882 return -1; 883 } 884 885 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, 886 pci_config_size(pbdev->pdev), sizeof(ctrl)); 887 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, 888 pci_config_size(pbdev->pdev), sizeof(table)); 889 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA, 890 pci_config_size(pbdev->pdev), sizeof(pba)); 891 892 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK; 893 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK; 894 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; 895 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; 896 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 897 898 name = g_strdup_printf("msix-s390-%04x", pbdev->uid); 899 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), 900 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE); 901 memory_region_add_subregion(&pbdev->iommu->mr, 902 pbdev->pci_group->zpci_group.msia, 903 &pbdev->msix_notify_mr); 904 g_free(name); 905 906 return 0; 907 } 908 909 static void s390_pci_msix_free(S390PCIBusDevice *pbdev) 910 { 911 if (pbdev->msix.entries == 0) { 912 return; 913 } 914 915 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr); 916 object_unparent(OBJECT(&pbdev->msix_notify_mr)); 917 } 918 919 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s, 920 const char *target, Error **errp) 921 { 922 Error *local_err = NULL; 923 DeviceState *dev; 924 925 dev = qdev_try_new(TYPE_S390_PCI_DEVICE); 926 if (!dev) { 927 error_setg(errp, "zPCI device could not be created"); 928 return NULL; 929 } 930 931 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) { 932 object_unparent(OBJECT(dev)); 933 error_propagate_prepend(errp, local_err, 934 "zPCI device could not be created: "); 935 return NULL; 936 } 937 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) { 938 object_unparent(OBJECT(dev)); 939 error_propagate_prepend(errp, local_err, 940 "zPCI device could not be created: "); 941 return NULL; 942 } 943 944 return S390_PCI_DEVICE(dev); 945 } 946 947 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev) 948 { 949 uint32_t idx; 950 951 idx = s->next_idx; 952 while (s390_pci_find_dev_by_idx(s, idx)) { 953 idx = (idx + 1) & FH_MASK_INDEX; 954 if (idx == s->next_idx) { 955 return false; 956 } 957 } 958 959 pbdev->idx = idx; 960 return true; 961 } 962 963 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 964 Error **errp) 965 { 966 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 967 968 if (!s390_has_feat(S390_FEAT_ZPCI)) { 969 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU " 970 "feature enabled; the guest will not be able to see/use " 971 "this device"); 972 } 973 974 if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 975 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 976 977 if (!s390_pci_alloc_idx(s, pbdev)) { 978 error_setg(errp, "no slot for plugging zpci device"); 979 return; 980 } 981 } 982 } 983 984 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr) 985 { 986 uint32_t old_nr; 987 988 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 989 while (!pci_bus_is_root(pci_get_bus(dev))) { 990 dev = pci_get_bus(dev)->parent_dev; 991 992 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1); 993 if (old_nr < nr) { 994 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1); 995 } 996 } 997 } 998 999 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev) 1000 { 1001 uint32_t idx, fh; 1002 1003 if (!s390_pci_get_host_fh(pbdev, &fh)) { 1004 return -EPERM; 1005 } 1006 1007 /* 1008 * The host device is already in an enabled state, but we always present 1009 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED). 1010 * Therefore, mask off the enable bit from the passthrough handle until 1011 * the guest issues a CLP SET PCI FN later to enable the device. 1012 */ 1013 pbdev->fh = fh & ~FH_MASK_ENABLE; 1014 1015 /* Next, see if the idx is already in-use */ 1016 idx = pbdev->fh & FH_MASK_INDEX; 1017 if (pbdev->idx != idx) { 1018 if (s390_pci_find_dev_by_idx(s, idx)) { 1019 return -EINVAL; 1020 } 1021 /* 1022 * Update the idx entry with the passed through idx 1023 * If the relinquished idx is lower than next_idx, use it 1024 * to replace next_idx 1025 */ 1026 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1027 if (idx < s->next_idx) { 1028 s->next_idx = idx; 1029 } 1030 pbdev->idx = idx; 1031 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1032 } 1033 1034 return 0; 1035 } 1036 1037 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev, 1038 Error **errp) 1039 { 1040 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1041 PCIDevice *pdev = NULL; 1042 S390PCIBusDevice *pbdev = NULL; 1043 int rc; 1044 1045 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1046 PCIBridge *pb = PCI_BRIDGE(dev); 1047 1048 pdev = PCI_DEVICE(dev); 1049 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq); 1050 pci_setup_iommu(&pb->sec_bus, &s390_iommu_ops, s); 1051 1052 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s)); 1053 1054 if (dev->hotplugged) { 1055 pci_default_write_config(pdev, PCI_PRIMARY_BUS, 1056 pci_dev_bus_num(pdev), 1); 1057 s->bus_no += 1; 1058 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1059 1060 s390_pci_update_subordinate(pdev, s->bus_no); 1061 } 1062 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1063 pdev = PCI_DEVICE(dev); 1064 1065 /* 1066 * Multifunction is not supported due to the lack of CLP. However, 1067 * do not check for multifunction capability for SR-IOV devices because 1068 * SR-IOV devices automatically add the multifunction capability whether 1069 * the user intends to use the functions other than the PF. 1070 */ 1071 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION && 1072 !pdev->exp.sriov_cap) { 1073 error_setg(errp, "multifunction not supported in s390"); 1074 return; 1075 } 1076 1077 if (!dev->id) { 1078 /* In the case the PCI device does not define an id */ 1079 /* we generate one based on the PCI address */ 1080 dev->id = g_strdup_printf("auto_%02x:%02x.%01x", 1081 pci_dev_bus_num(pdev), 1082 PCI_SLOT(pdev->devfn), 1083 PCI_FUNC(pdev->devfn)); 1084 } 1085 1086 pbdev = s390_pci_find_dev_by_target(s, dev->id); 1087 if (!pbdev) { 1088 /* 1089 * VFs are automatically created by PF, and creating zpci for them 1090 * will result in unexpected usage of fids. Currently QEMU does not 1091 * support multifunction for s390x so we don't need zpci for VFs 1092 * anyway. 1093 */ 1094 if (pci_is_vf(pdev)) { 1095 return; 1096 } 1097 1098 pbdev = s390_pci_device_new(s, dev->id, errp); 1099 if (!pbdev) { 1100 return; 1101 } 1102 } 1103 1104 pbdev->pdev = pdev; 1105 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn); 1106 pbdev->iommu->pbdev = pbdev; 1107 pbdev->state = ZPCI_FS_DISABLED; 1108 set_pbdev_info(pbdev); 1109 1110 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) { 1111 /* 1112 * By default, interpretation is always requested; if the available 1113 * facilities indicate it is not available, fallback to the 1114 * interception model. 1115 */ 1116 if (pbdev->interp) { 1117 if (s390_pci_kvm_interp_allowed()) { 1118 rc = s390_pci_interp_plug(s, pbdev); 1119 if (rc) { 1120 error_setg(errp, "Plug failed for zPCI device in " 1121 "interpretation mode: %d", rc); 1122 return; 1123 } 1124 } else { 1125 trace_s390_pcihost("zPCI interpretation missing"); 1126 pbdev->interp = false; 1127 pbdev->forwarding_assist = false; 1128 } 1129 } 1130 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev); 1131 /* Fill in CLP information passed via the vfio region */ 1132 s390_pci_get_clp_info(pbdev); 1133 if (!pbdev->interp) { 1134 /* Do vfio passthrough but intercept for I/O */ 1135 pbdev->fh |= FH_SHM_VFIO; 1136 pbdev->forwarding_assist = false; 1137 } 1138 /* Register shutdown notifier and reset callback for ISM devices */ 1139 if (pbdev->pft == ZPCI_PFT_ISM) { 1140 pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier; 1141 qemu_register_shutdown_notifier(&pbdev->shutdown_notifier); 1142 } 1143 } else { 1144 pbdev->fh |= FH_SHM_EMUL; 1145 /* Always intercept emulated devices */ 1146 pbdev->interp = false; 1147 pbdev->forwarding_assist = false; 1148 } 1149 1150 if (s390_pci_msix_init(pbdev) && !pbdev->interp) { 1151 error_setg(errp, "MSI-X support is mandatory " 1152 "in the S390 architecture"); 1153 return; 1154 } 1155 1156 if (dev->hotplugged) { 1157 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED , 1158 pbdev->fh, pbdev->fid); 1159 } 1160 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1161 pbdev = S390_PCI_DEVICE(dev); 1162 1163 /* the allocated idx is actually getting used */ 1164 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX; 1165 pbdev->fh = pbdev->idx; 1166 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link); 1167 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev); 1168 } else { 1169 g_assert_not_reached(); 1170 } 1171 } 1172 1173 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, 1174 Error **errp) 1175 { 1176 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1177 S390PCIBusDevice *pbdev = NULL; 1178 1179 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1180 PCIDevice *pci_dev = PCI_DEVICE(dev); 1181 PCIBus *bus; 1182 int32_t devfn; 1183 1184 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1185 if (!pbdev) { 1186 g_assert(pci_is_vf(pci_dev)); 1187 return; 1188 } 1189 1190 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED, 1191 pbdev->fh, pbdev->fid); 1192 bus = pci_get_bus(pci_dev); 1193 devfn = pci_dev->devfn; 1194 qdev_unrealize(dev); 1195 1196 s390_pci_msix_free(pbdev); 1197 s390_pci_iommu_free(s, bus, devfn); 1198 pbdev->pdev = NULL; 1199 pbdev->state = ZPCI_FS_RESERVED; 1200 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1201 pbdev = S390_PCI_DEVICE(dev); 1202 pbdev->fid = 0; 1203 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link); 1204 g_hash_table_remove(s->zpci_table, &pbdev->idx); 1205 if (pbdev->iommu->dma_limit) { 1206 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit); 1207 } 1208 qdev_unrealize(dev); 1209 } 1210 } 1211 1212 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev, 1213 DeviceState *dev, 1214 Error **errp) 1215 { 1216 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev); 1217 S390PCIBusDevice *pbdev; 1218 1219 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { 1220 error_setg(errp, "PCI bridge hot unplug currently not supported"); 1221 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 1222 /* 1223 * Redirect the unplug request to the zPCI device and remember that 1224 * we've checked the PCI device already (to prevent endless recursion). 1225 */ 1226 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev)); 1227 if (!pbdev) { 1228 g_assert(pci_is_vf(PCI_DEVICE(dev))); 1229 return; 1230 } 1231 1232 pbdev->pci_unplug_request_processed = true; 1233 qdev_unplug(DEVICE(pbdev), errp); 1234 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) { 1235 pbdev = S390_PCI_DEVICE(dev); 1236 1237 /* 1238 * If unplug was initially requested for the zPCI device, we 1239 * first have to redirect to the PCI device, which will in return 1240 * redirect back to us after performing its checks (if the request 1241 * is not blocked, e.g. because it's a PCI bridge). 1242 */ 1243 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) { 1244 qdev_unplug(DEVICE(pbdev->pdev), errp); 1245 return; 1246 } 1247 pbdev->pci_unplug_request_processed = false; 1248 1249 switch (pbdev->state) { 1250 case ZPCI_FS_STANDBY: 1251 case ZPCI_FS_RESERVED: 1252 s390_pci_perform_unplug(pbdev); 1253 break; 1254 default: 1255 /* 1256 * Allow to send multiple requests, e.g. if the guest crashed 1257 * before releasing the device, we would not be able to send 1258 * another request to the same VM (e.g. fresh OS). 1259 */ 1260 pbdev->unplug_requested = true; 1261 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST, 1262 pbdev->fh, pbdev->fid); 1263 } 1264 } else { 1265 g_assert_not_reached(); 1266 } 1267 } 1268 1269 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, 1270 void *opaque) 1271 { 1272 S390pciState *s = opaque; 1273 PCIBus *sec_bus = NULL; 1274 1275 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 1276 PCI_HEADER_TYPE_BRIDGE)) { 1277 return; 1278 } 1279 1280 (s->bus_no)++; 1281 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1); 1282 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1); 1283 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1284 1285 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 1286 if (!sec_bus) { 1287 return; 1288 } 1289 1290 /* Assign numbers to all child bridges. The last is the highest number. */ 1291 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); 1292 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); 1293 } 1294 1295 void s390_pci_ism_reset(void) 1296 { 1297 S390pciState *s = s390_get_phb(); 1298 1299 S390PCIBusDevice *pbdev, *next; 1300 1301 /* Trigger reset event for each passthrough ISM device currently in-use */ 1302 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1303 if (pbdev->interp && pbdev->pft == ZPCI_PFT_ISM && 1304 pbdev->fh & FH_MASK_ENABLE) { 1305 s390_pci_kvm_aif_disable(pbdev); 1306 1307 pci_device_reset(pbdev->pdev); 1308 } 1309 } 1310 } 1311 1312 static void s390_pcihost_reset(DeviceState *dev) 1313 { 1314 S390pciState *s = S390_PCI_HOST_BRIDGE(dev); 1315 PCIBus *bus = s->parent_obj.bus; 1316 S390PCIBusDevice *pbdev, *next; 1317 1318 /* Process all pending unplug requests */ 1319 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) { 1320 if (pbdev->unplug_requested) { 1321 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1322 /* Interpreted devices were using interrupt forwarding */ 1323 s390_pci_kvm_aif_disable(pbdev); 1324 } else if (pbdev->summary_ind) { 1325 pci_dereg_irqs(pbdev); 1326 } 1327 if (pbdev->iommu->enabled) { 1328 pci_dereg_ioat(pbdev->iommu); 1329 } 1330 pbdev->state = ZPCI_FS_STANDBY; 1331 s390_pci_perform_unplug(pbdev); 1332 } 1333 } 1334 1335 /* 1336 * When resetting a PCI bridge, the assigned numbers are set to 0. So 1337 * on every system reset, we also have to reassign numbers. 1338 */ 1339 s->bus_no = 0; 1340 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); 1341 } 1342 1343 static void s390_pcihost_class_init(ObjectClass *klass, void *data) 1344 { 1345 DeviceClass *dc = DEVICE_CLASS(klass); 1346 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1347 1348 device_class_set_legacy_reset(dc, s390_pcihost_reset); 1349 dc->realize = s390_pcihost_realize; 1350 dc->unrealize = s390_pcihost_unrealize; 1351 hc->pre_plug = s390_pcihost_pre_plug; 1352 hc->plug = s390_pcihost_plug; 1353 hc->unplug_request = s390_pcihost_unplug_request; 1354 hc->unplug = s390_pcihost_unplug; 1355 msi_nonbroken = true; 1356 } 1357 1358 static const TypeInfo s390_pcihost_info = { 1359 .name = TYPE_S390_PCI_HOST_BRIDGE, 1360 .parent = TYPE_PCI_HOST_BRIDGE, 1361 .instance_size = sizeof(S390pciState), 1362 .class_init = s390_pcihost_class_init, 1363 .interfaces = (InterfaceInfo[]) { 1364 { TYPE_HOTPLUG_HANDLER }, 1365 { } 1366 } 1367 }; 1368 1369 static const TypeInfo s390_pcibus_info = { 1370 .name = TYPE_S390_PCI_BUS, 1371 .parent = TYPE_BUS, 1372 .instance_size = sizeof(S390PCIBus), 1373 }; 1374 1375 static uint16_t s390_pci_generate_uid(S390pciState *s) 1376 { 1377 uint16_t uid = 0; 1378 1379 do { 1380 uid++; 1381 if (!s390_pci_find_dev_by_uid(s, uid)) { 1382 return uid; 1383 } 1384 } while (uid < ZPCI_MAX_UID); 1385 1386 return UID_UNDEFINED; 1387 } 1388 1389 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp) 1390 { 1391 uint32_t fid = 0; 1392 1393 do { 1394 if (!s390_pci_find_dev_by_fid(s, fid)) { 1395 return fid; 1396 } 1397 } while (fid++ != ZPCI_MAX_FID); 1398 1399 error_setg(errp, "no free fid could be found"); 1400 return 0; 1401 } 1402 1403 static void s390_pci_device_realize(DeviceState *dev, Error **errp) 1404 { 1405 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev); 1406 S390pciState *s = s390_get_phb(); 1407 1408 if (!zpci->target) { 1409 error_setg(errp, "target must be defined"); 1410 return; 1411 } 1412 1413 if (s390_pci_find_dev_by_target(s, zpci->target)) { 1414 error_setg(errp, "target %s already has an associated zpci device", 1415 zpci->target); 1416 return; 1417 } 1418 1419 if (zpci->uid == UID_UNDEFINED) { 1420 zpci->uid = s390_pci_generate_uid(s); 1421 if (!zpci->uid) { 1422 error_setg(errp, "no free uid could be found"); 1423 return; 1424 } 1425 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) { 1426 error_setg(errp, "uid %u already in use", zpci->uid); 1427 return; 1428 } 1429 1430 if (!zpci->fid_defined) { 1431 Error *local_error = NULL; 1432 1433 zpci->fid = s390_pci_generate_fid(s, &local_error); 1434 if (local_error) { 1435 error_propagate(errp, local_error); 1436 return; 1437 } 1438 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) { 1439 error_setg(errp, "fid %u already in use", zpci->fid); 1440 return; 1441 } 1442 1443 zpci->state = ZPCI_FS_RESERVED; 1444 zpci->fmb.format = ZPCI_FMB_FORMAT; 1445 } 1446 1447 static void s390_pci_device_reset(DeviceState *dev) 1448 { 1449 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev); 1450 1451 switch (pbdev->state) { 1452 case ZPCI_FS_RESERVED: 1453 return; 1454 case ZPCI_FS_STANDBY: 1455 break; 1456 default: 1457 pbdev->fh &= ~FH_MASK_ENABLE; 1458 pbdev->state = ZPCI_FS_DISABLED; 1459 break; 1460 } 1461 1462 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) { 1463 /* Interpreted devices were using interrupt forwarding */ 1464 s390_pci_kvm_aif_disable(pbdev); 1465 } else if (pbdev->summary_ind) { 1466 pci_dereg_irqs(pbdev); 1467 } 1468 if (pbdev->iommu->enabled) { 1469 pci_dereg_ioat(pbdev->iommu); 1470 } 1471 1472 fmb_timer_free(pbdev); 1473 } 1474 1475 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name, 1476 void *opaque, Error **errp) 1477 { 1478 const Property *prop = opaque; 1479 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1480 1481 visit_type_uint32(v, name, ptr, errp); 1482 } 1483 1484 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name, 1485 void *opaque, Error **errp) 1486 { 1487 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj); 1488 const Property *prop = opaque; 1489 uint32_t *ptr = object_field_prop_ptr(obj, prop); 1490 1491 if (!visit_type_uint32(v, name, ptr, errp)) { 1492 return; 1493 } 1494 zpci->fid_defined = true; 1495 } 1496 1497 static const PropertyInfo s390_pci_fid_propinfo = { 1498 .type = "uint32", 1499 .description = "zpci_fid", 1500 .get = s390_pci_get_fid, 1501 .set = s390_pci_set_fid, 1502 }; 1503 1504 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \ 1505 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t) 1506 1507 static const Property s390_pci_device_properties[] = { 1508 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED), 1509 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid), 1510 DEFINE_PROP_STRING("target", S390PCIBusDevice, target), 1511 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true), 1512 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist, 1513 true), 1514 }; 1515 1516 static const VMStateDescription s390_pci_device_vmstate = { 1517 .name = TYPE_S390_PCI_DEVICE, 1518 /* 1519 * TODO: add state handling here, so migration works at least with 1520 * emulated pci devices on s390x 1521 */ 1522 .unmigratable = 1, 1523 }; 1524 1525 static void s390_pci_device_class_init(ObjectClass *klass, void *data) 1526 { 1527 DeviceClass *dc = DEVICE_CLASS(klass); 1528 1529 dc->desc = "zpci device"; 1530 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1531 device_class_set_legacy_reset(dc, s390_pci_device_reset); 1532 dc->bus_type = TYPE_S390_PCI_BUS; 1533 dc->realize = s390_pci_device_realize; 1534 device_class_set_props(dc, s390_pci_device_properties); 1535 dc->vmsd = &s390_pci_device_vmstate; 1536 } 1537 1538 static const TypeInfo s390_pci_device_info = { 1539 .name = TYPE_S390_PCI_DEVICE, 1540 .parent = TYPE_DEVICE, 1541 .instance_size = sizeof(S390PCIBusDevice), 1542 .class_init = s390_pci_device_class_init, 1543 }; 1544 1545 static const TypeInfo s390_pci_iommu_info = { 1546 .name = TYPE_S390_PCI_IOMMU, 1547 .parent = TYPE_OBJECT, 1548 .instance_size = sizeof(S390PCIIOMMU), 1549 }; 1550 1551 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data) 1552 { 1553 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1554 1555 imrc->translate = s390_translate_iommu; 1556 imrc->replay = s390_pci_iommu_replay; 1557 } 1558 1559 static const TypeInfo s390_iommu_memory_region_info = { 1560 .parent = TYPE_IOMMU_MEMORY_REGION, 1561 .name = TYPE_S390_IOMMU_MEMORY_REGION, 1562 .class_init = s390_iommu_memory_region_class_init, 1563 }; 1564 1565 static void s390_pci_register_types(void) 1566 { 1567 type_register_static(&s390_pcihost_info); 1568 type_register_static(&s390_pcibus_info); 1569 type_register_static(&s390_pci_device_info); 1570 type_register_static(&s390_pci_iommu_info); 1571 type_register_static(&s390_iommu_memory_region_info); 1572 } 1573 1574 type_init(s390_pci_register_types) 1575