1 /* 2 * s390 PCI instructions 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "s390-pci-inst.h" 15 #include "s390-pci-bus.h" 16 #include <exec/memory-internal.h> 17 #include <qemu/error-report.h> 18 19 /* #define DEBUG_S390PCI_INST */ 20 #ifdef DEBUG_S390PCI_INST 21 #define DPRINTF(fmt, ...) \ 22 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0) 23 #else 24 #define DPRINTF(fmt, ...) \ 25 do { } while (0) 26 #endif 27 28 static void s390_set_status_code(CPUS390XState *env, 29 uint8_t r, uint64_t status_code) 30 { 31 env->regs[r] &= ~0xff000000ULL; 32 env->regs[r] |= (status_code & 0xff) << 24; 33 } 34 35 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) 36 { 37 S390PCIBusDevice *pbdev; 38 uint32_t res_code, initial_l2, g_l2, finish; 39 int rc, idx; 40 uint64_t resume_token; 41 42 rc = 0; 43 if (lduw_p(&rrb->request.hdr.len) != 32) { 44 res_code = CLP_RC_LEN; 45 rc = -EINVAL; 46 goto out; 47 } 48 49 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { 50 res_code = CLP_RC_FMT; 51 rc = -EINVAL; 52 goto out; 53 } 54 55 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || 56 ldq_p(&rrb->request.reserved1) != 0 || 57 ldq_p(&rrb->request.reserved2) != 0) { 58 res_code = CLP_RC_RESNOT0; 59 rc = -EINVAL; 60 goto out; 61 } 62 63 resume_token = ldq_p(&rrb->request.resume_token); 64 65 if (resume_token) { 66 pbdev = s390_pci_find_dev_by_idx(resume_token); 67 if (!pbdev) { 68 res_code = CLP_RC_LISTPCI_BADRT; 69 rc = -EINVAL; 70 goto out; 71 } 72 } 73 74 if (lduw_p(&rrb->response.hdr.len) < 48) { 75 res_code = CLP_RC_8K; 76 rc = -EINVAL; 77 goto out; 78 } 79 80 initial_l2 = lduw_p(&rrb->response.hdr.len); 81 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) 82 != 0) { 83 res_code = CLP_RC_LEN; 84 rc = -EINVAL; 85 *cc = 3; 86 goto out; 87 } 88 89 stl_p(&rrb->response.fmt, 0); 90 stq_p(&rrb->response.reserved1, 0); 91 stq_p(&rrb->response.reserved2, 0); 92 stl_p(&rrb->response.mdd, FH_VIRT); 93 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); 94 rrb->response.entry_size = sizeof(ClpFhListEntry); 95 finish = 0; 96 idx = resume_token; 97 g_l2 = LIST_PCI_HDR_LEN; 98 do { 99 pbdev = s390_pci_find_dev_by_idx(idx); 100 if (!pbdev) { 101 finish = 1; 102 break; 103 } 104 stw_p(&rrb->response.fh_list[idx - resume_token].device_id, 105 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); 106 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id, 107 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); 108 stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000); 109 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid); 110 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh); 111 112 g_l2 += sizeof(ClpFhListEntry); 113 /* Add endian check for DPRINTF? */ 114 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", 115 g_l2, 116 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id), 117 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id), 118 ldl_p(&rrb->response.fh_list[idx - resume_token].fid), 119 ldl_p(&rrb->response.fh_list[idx - resume_token].fh)); 120 idx++; 121 } while (g_l2 < initial_l2); 122 123 if (finish == 1) { 124 resume_token = 0; 125 } else { 126 resume_token = idx; 127 } 128 stq_p(&rrb->response.resume_token, resume_token); 129 stw_p(&rrb->response.hdr.len, g_l2); 130 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); 131 out: 132 if (rc) { 133 DPRINTF("list pci failed rc 0x%x\n", rc); 134 stw_p(&rrb->response.hdr.rsp, res_code); 135 } 136 return rc; 137 } 138 139 int clp_service_call(S390CPU *cpu, uint8_t r2) 140 { 141 ClpReqHdr *reqh; 142 ClpRspHdr *resh; 143 S390PCIBusDevice *pbdev; 144 uint32_t req_len; 145 uint32_t res_len; 146 uint8_t buffer[4096 * 2]; 147 uint8_t cc = 0; 148 CPUS390XState *env = &cpu->env; 149 int i; 150 151 cpu_synchronize_state(CPU(cpu)); 152 153 if (env->psw.mask & PSW_MASK_PSTATE) { 154 program_interrupt(env, PGM_PRIVILEGED, 4); 155 return 0; 156 } 157 158 cpu_physical_memory_read(env->regs[r2], buffer, sizeof(*reqh)); 159 reqh = (ClpReqHdr *)buffer; 160 req_len = lduw_p(&reqh->len); 161 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { 162 program_interrupt(env, PGM_OPERAND, 4); 163 return 0; 164 } 165 166 cpu_physical_memory_read(env->regs[r2], buffer, req_len + sizeof(*resh)); 167 resh = (ClpRspHdr *)(buffer + req_len); 168 res_len = lduw_p(&resh->len); 169 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { 170 program_interrupt(env, PGM_OPERAND, 4); 171 return 0; 172 } 173 if ((req_len + res_len) > 8192) { 174 program_interrupt(env, PGM_OPERAND, 4); 175 return 0; 176 } 177 178 cpu_physical_memory_read(env->regs[r2], buffer, req_len + res_len); 179 180 if (req_len != 32) { 181 stw_p(&resh->rsp, CLP_RC_LEN); 182 goto out; 183 } 184 185 switch (lduw_p(&reqh->cmd)) { 186 case CLP_LIST_PCI: { 187 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; 188 list_pci(rrb, &cc); 189 break; 190 } 191 case CLP_SET_PCI_FN: { 192 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; 193 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; 194 195 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh)); 196 if (!pbdev) { 197 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); 198 goto out; 199 } 200 201 switch (reqsetpci->oc) { 202 case CLP_SET_ENABLE_PCI_FN: 203 pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET; 204 stl_p(&ressetpci->fh, pbdev->fh); 205 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 206 break; 207 case CLP_SET_DISABLE_PCI_FN: 208 pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET); 209 pbdev->error_state = false; 210 pbdev->lgstg_blocked = false; 211 stl_p(&ressetpci->fh, pbdev->fh); 212 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 213 break; 214 default: 215 DPRINTF("unknown set pci command\n"); 216 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 217 break; 218 } 219 break; 220 } 221 case CLP_QUERY_PCI_FN: { 222 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; 223 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; 224 225 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh)); 226 if (!pbdev) { 227 DPRINTF("query pci no pci dev\n"); 228 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); 229 goto out; 230 } 231 232 for (i = 0; i < PCI_BAR_COUNT; i++) { 233 uint32_t data = pci_get_long(pbdev->pdev->config + 234 PCI_BASE_ADDRESS_0 + (i * 4)); 235 236 stl_p(&resquery->bar[i], data); 237 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? 238 ctz64(pbdev->pdev->io_regions[i].size) : 0; 239 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, 240 ldl_p(&resquery->bar[i]), 241 pbdev->pdev->io_regions[i].size, 242 resquery->bar_size[i]); 243 } 244 245 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR); 246 stq_p(&resquery->edma, ZPCI_EDMA_ADDR); 247 stw_p(&resquery->pchid, 0); 248 stw_p(&resquery->ug, 1); 249 stl_p(&resquery->uid, pbdev->fid); 250 stw_p(&resquery->hdr.rsp, CLP_RC_OK); 251 break; 252 } 253 case CLP_QUERY_PCI_FNGRP: { 254 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; 255 resgrp->fr = 1; 256 stq_p(&resgrp->dasm, 0); 257 stq_p(&resgrp->msia, ZPCI_MSI_ADDR); 258 stw_p(&resgrp->mui, 0); 259 stw_p(&resgrp->i, 128); 260 resgrp->version = 0; 261 262 stw_p(&resgrp->hdr.rsp, CLP_RC_OK); 263 break; 264 } 265 default: 266 DPRINTF("unknown clp command\n"); 267 stw_p(&resh->rsp, CLP_RC_CMD); 268 break; 269 } 270 271 out: 272 cpu_physical_memory_write(env->regs[r2], buffer, req_len + res_len); 273 setcc(cpu, cc); 274 return 0; 275 } 276 277 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 278 { 279 CPUS390XState *env = &cpu->env; 280 S390PCIBusDevice *pbdev; 281 uint64_t offset; 282 uint64_t data; 283 uint8_t len; 284 uint32_t fh; 285 uint8_t pcias; 286 287 cpu_synchronize_state(CPU(cpu)); 288 289 if (env->psw.mask & PSW_MASK_PSTATE) { 290 program_interrupt(env, PGM_PRIVILEGED, 4); 291 return 0; 292 } 293 294 if (r2 & 0x1) { 295 program_interrupt(env, PGM_SPECIFICATION, 4); 296 return 0; 297 } 298 299 fh = env->regs[r2] >> 32; 300 pcias = (env->regs[r2] >> 16) & 0xf; 301 len = env->regs[r2] & 0xf; 302 offset = env->regs[r2 + 1]; 303 304 pbdev = s390_pci_find_dev_by_fh(fh); 305 if (!pbdev) { 306 DPRINTF("pcilg no pci dev\n"); 307 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 308 return 0; 309 } 310 311 if (pbdev->lgstg_blocked) { 312 setcc(cpu, ZPCI_PCI_LS_ERR); 313 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 314 return 0; 315 } 316 317 if (pcias < 6) { 318 if ((8 - (offset & 0x7)) < len) { 319 program_interrupt(env, PGM_OPERAND, 4); 320 return 0; 321 } 322 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory; 323 io_mem_read(mr, offset, &data, len); 324 } else if (pcias == 15) { 325 if ((4 - (offset & 0x3)) < len) { 326 program_interrupt(env, PGM_OPERAND, 4); 327 return 0; 328 } 329 data = pci_host_config_read_common( 330 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len); 331 332 switch (len) { 333 case 1: 334 break; 335 case 2: 336 data = bswap16(data); 337 break; 338 case 4: 339 data = bswap32(data); 340 break; 341 case 8: 342 data = bswap64(data); 343 break; 344 default: 345 program_interrupt(env, PGM_OPERAND, 4); 346 return 0; 347 } 348 } else { 349 DPRINTF("invalid space\n"); 350 setcc(cpu, ZPCI_PCI_LS_ERR); 351 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 352 return 0; 353 } 354 355 env->regs[r1] = data; 356 setcc(cpu, ZPCI_PCI_LS_OK); 357 return 0; 358 } 359 360 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset, 361 uint64_t *data, uint8_t len) 362 { 363 uint32_t val; 364 uint8_t *msg_data; 365 366 if (offset % PCI_MSIX_ENTRY_SIZE != 8) { 367 return; 368 } 369 370 if (len != 4) { 371 DPRINTF("access msix table msg data but len is %d\n", len); 372 return; 373 } 374 375 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE + 376 PCI_MSIX_ENTRY_VECTOR_CTRL; 377 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS); 378 pci_set_long(msg_data, val); 379 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data); 380 } 381 382 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias) 383 { 384 if (pbdev->msix.available && pbdev->msix.table_bar == pcias && 385 offset >= pbdev->msix.table_offset && 386 offset <= pbdev->msix.table_offset + 387 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) { 388 return 1; 389 } else { 390 return 0; 391 } 392 } 393 394 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 395 { 396 CPUS390XState *env = &cpu->env; 397 uint64_t offset, data; 398 S390PCIBusDevice *pbdev; 399 uint8_t len; 400 uint32_t fh; 401 uint8_t pcias; 402 403 cpu_synchronize_state(CPU(cpu)); 404 405 if (env->psw.mask & PSW_MASK_PSTATE) { 406 program_interrupt(env, PGM_PRIVILEGED, 4); 407 return 0; 408 } 409 410 if (r2 & 0x1) { 411 program_interrupt(env, PGM_SPECIFICATION, 4); 412 return 0; 413 } 414 415 fh = env->regs[r2] >> 32; 416 pcias = (env->regs[r2] >> 16) & 0xf; 417 len = env->regs[r2] & 0xf; 418 offset = env->regs[r2 + 1]; 419 420 pbdev = s390_pci_find_dev_by_fh(fh); 421 if (!pbdev) { 422 DPRINTF("pcistg no pci dev\n"); 423 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 424 return 0; 425 } 426 427 if (pbdev->lgstg_blocked) { 428 setcc(cpu, ZPCI_PCI_LS_ERR); 429 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 430 return 0; 431 } 432 433 data = env->regs[r1]; 434 if (pcias < 6) { 435 if ((8 - (offset & 0x7)) < len) { 436 program_interrupt(env, PGM_OPERAND, 4); 437 return 0; 438 } 439 MemoryRegion *mr; 440 if (trap_msix(pbdev, offset, pcias)) { 441 offset = offset - pbdev->msix.table_offset; 442 mr = &pbdev->pdev->msix_table_mmio; 443 update_msix_table_msg_data(pbdev, offset, &data, len); 444 } else { 445 mr = pbdev->pdev->io_regions[pcias].memory; 446 } 447 448 io_mem_write(mr, offset, data, len); 449 } else if (pcias == 15) { 450 if ((4 - (offset & 0x3)) < len) { 451 program_interrupt(env, PGM_OPERAND, 4); 452 return 0; 453 } 454 switch (len) { 455 case 1: 456 break; 457 case 2: 458 data = bswap16(data); 459 break; 460 case 4: 461 data = bswap32(data); 462 break; 463 case 8: 464 data = bswap64(data); 465 break; 466 default: 467 program_interrupt(env, PGM_OPERAND, 4); 468 return 0; 469 } 470 471 pci_host_config_write_common(pbdev->pdev, offset, 472 pci_config_size(pbdev->pdev), 473 data, len); 474 } else { 475 DPRINTF("pcistg invalid space\n"); 476 setcc(cpu, ZPCI_PCI_LS_ERR); 477 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 478 return 0; 479 } 480 481 setcc(cpu, ZPCI_PCI_LS_OK); 482 return 0; 483 } 484 485 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 486 { 487 CPUS390XState *env = &cpu->env; 488 uint32_t fh; 489 S390PCIBusDevice *pbdev; 490 hwaddr start, end; 491 IOMMUTLBEntry entry; 492 MemoryRegion *mr; 493 494 cpu_synchronize_state(CPU(cpu)); 495 496 if (env->psw.mask & PSW_MASK_PSTATE) { 497 program_interrupt(env, PGM_PRIVILEGED, 4); 498 goto out; 499 } 500 501 if (r2 & 0x1) { 502 program_interrupt(env, PGM_SPECIFICATION, 4); 503 goto out; 504 } 505 506 fh = env->regs[r1] >> 32; 507 start = env->regs[r2]; 508 end = start + env->regs[r2 + 1]; 509 510 pbdev = s390_pci_find_dev_by_fh(fh); 511 512 if (!pbdev) { 513 DPRINTF("rpcit no pci dev\n"); 514 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 515 goto out; 516 } 517 518 mr = pci_device_iommu_address_space(pbdev->pdev)->root; 519 while (start < end) { 520 entry = mr->iommu_ops->translate(mr, start, 0); 521 522 if (!entry.translated_addr) { 523 setcc(cpu, ZPCI_PCI_LS_ERR); 524 goto out; 525 } 526 527 memory_region_notify_iommu(mr, entry); 528 start += entry.addr_mask + 1; 529 } 530 531 setcc(cpu, ZPCI_PCI_LS_OK); 532 out: 533 return 0; 534 } 535 536 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr) 537 { 538 CPUS390XState *env = &cpu->env; 539 S390PCIBusDevice *pbdev; 540 MemoryRegion *mr; 541 int i; 542 uint64_t val; 543 uint32_t fh; 544 uint8_t pcias; 545 uint8_t len; 546 547 if (env->psw.mask & PSW_MASK_PSTATE) { 548 program_interrupt(env, PGM_PRIVILEGED, 6); 549 return 0; 550 } 551 552 fh = env->regs[r1] >> 32; 553 pcias = (env->regs[r1] >> 16) & 0xf; 554 len = env->regs[r1] & 0xff; 555 556 if (pcias > 5) { 557 DPRINTF("pcistb invalid space\n"); 558 setcc(cpu, ZPCI_PCI_LS_ERR); 559 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); 560 return 0; 561 } 562 563 switch (len) { 564 case 16: 565 case 32: 566 case 64: 567 case 128: 568 break; 569 default: 570 program_interrupt(env, PGM_SPECIFICATION, 6); 571 return 0; 572 } 573 574 pbdev = s390_pci_find_dev_by_fh(fh); 575 if (!pbdev) { 576 DPRINTF("pcistb no pci dev fh 0x%x\n", fh); 577 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 578 return 0; 579 } 580 581 if (pbdev->lgstg_blocked) { 582 setcc(cpu, ZPCI_PCI_LS_ERR); 583 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); 584 return 0; 585 } 586 587 mr = pbdev->pdev->io_regions[pcias].memory; 588 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) { 589 program_interrupt(env, PGM_ADDRESSING, 6); 590 return 0; 591 } 592 593 for (i = 0; i < len / 8; i++) { 594 val = ldq_phys(&address_space_memory, gaddr + i * 8); 595 io_mem_write(mr, env->regs[r3] + i * 8, val, 8); 596 } 597 598 setcc(cpu, ZPCI_PCI_LS_OK); 599 return 0; 600 } 601 602 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 603 { 604 int ret; 605 S390FLICState *fs = s390_get_flic(); 606 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 607 608 ret = css_register_io_adapter(S390_PCIPT_ADAPTER, 609 FIB_DATA_ISC(ldl_p(&fib.data)), true, false, 610 &pbdev->routes.adapter.adapter_id); 611 assert(ret == 0); 612 613 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 614 ldq_p(&fib.aisb), true); 615 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 616 ldq_p(&fib.aibv), true); 617 618 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); 619 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); 620 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); 621 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); 622 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data)); 623 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); 624 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); 625 626 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 627 return 0; 628 } 629 630 static int dereg_irqs(S390PCIBusDevice *pbdev) 631 { 632 S390FLICState *fs = s390_get_flic(); 633 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 634 635 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 636 pbdev->routes.adapter.ind_addr, false); 637 638 pbdev->routes.adapter.summary_addr = 0; 639 pbdev->routes.adapter.summary_offset = 0; 640 pbdev->routes.adapter.ind_addr = 0; 641 pbdev->routes.adapter.ind_offset = 0; 642 pbdev->isc = 0; 643 pbdev->noi = 0; 644 pbdev->sum = 0; 645 646 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 647 return 0; 648 } 649 650 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 651 { 652 uint64_t pba = ldq_p(&fib.pba); 653 uint64_t pal = ldq_p(&fib.pal); 654 uint64_t g_iota = ldq_p(&fib.iota); 655 uint8_t dt = (g_iota >> 2) & 0x7; 656 uint8_t t = (g_iota >> 11) & 0x1; 657 658 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { 659 program_interrupt(env, PGM_OPERAND, 6); 660 return -EINVAL; 661 } 662 663 /* currently we only support designation type 1 with translation */ 664 if (!(dt == ZPCI_IOTA_RTTO && t)) { 665 error_report("unsupported ioat dt %d t %d", dt, t); 666 program_interrupt(env, PGM_OPERAND, 6); 667 return -EINVAL; 668 } 669 670 pbdev->pba = pba; 671 pbdev->pal = pal; 672 pbdev->g_iota = g_iota; 673 return 0; 674 } 675 676 static void dereg_ioat(S390PCIBusDevice *pbdev) 677 { 678 pbdev->pba = 0; 679 pbdev->pal = 0; 680 pbdev->g_iota = 0; 681 } 682 683 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) 684 { 685 CPUS390XState *env = &cpu->env; 686 uint8_t oc; 687 uint32_t fh; 688 ZpciFib fib; 689 S390PCIBusDevice *pbdev; 690 uint64_t cc = ZPCI_PCI_LS_OK; 691 692 if (env->psw.mask & PSW_MASK_PSTATE) { 693 program_interrupt(env, PGM_PRIVILEGED, 6); 694 return 0; 695 } 696 697 oc = env->regs[r1] & 0xff; 698 fh = env->regs[r1] >> 32; 699 700 if (fiba & 0x7) { 701 program_interrupt(env, PGM_SPECIFICATION, 6); 702 return 0; 703 } 704 705 pbdev = s390_pci_find_dev_by_fh(fh); 706 if (!pbdev) { 707 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); 708 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 709 return 0; 710 } 711 712 cpu_physical_memory_read(fiba, (uint8_t *)&fib, sizeof(fib)); 713 714 switch (oc) { 715 case ZPCI_MOD_FC_REG_INT: 716 if (reg_irqs(env, pbdev, fib)) { 717 cc = ZPCI_PCI_LS_ERR; 718 } 719 break; 720 case ZPCI_MOD_FC_DEREG_INT: 721 dereg_irqs(pbdev); 722 break; 723 case ZPCI_MOD_FC_REG_IOAT: 724 if (reg_ioat(env, pbdev, fib)) { 725 cc = ZPCI_PCI_LS_ERR; 726 } 727 break; 728 case ZPCI_MOD_FC_DEREG_IOAT: 729 dereg_ioat(pbdev); 730 break; 731 case ZPCI_MOD_FC_REREG_IOAT: 732 dereg_ioat(pbdev); 733 if (reg_ioat(env, pbdev, fib)) { 734 cc = ZPCI_PCI_LS_ERR; 735 } 736 break; 737 case ZPCI_MOD_FC_RESET_ERROR: 738 pbdev->error_state = false; 739 pbdev->lgstg_blocked = false; 740 break; 741 case ZPCI_MOD_FC_RESET_BLOCK: 742 pbdev->lgstg_blocked = false; 743 break; 744 case ZPCI_MOD_FC_SET_MEASURE: 745 pbdev->fmb_addr = ldq_p(&fib.fmb_addr); 746 break; 747 default: 748 program_interrupt(&cpu->env, PGM_OPERAND, 6); 749 cc = ZPCI_PCI_LS_ERR; 750 } 751 752 setcc(cpu, cc); 753 return 0; 754 } 755 756 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) 757 { 758 CPUS390XState *env = &cpu->env; 759 uint32_t fh; 760 ZpciFib fib; 761 S390PCIBusDevice *pbdev; 762 uint32_t data; 763 uint64_t cc = ZPCI_PCI_LS_OK; 764 765 if (env->psw.mask & PSW_MASK_PSTATE) { 766 program_interrupt(env, PGM_PRIVILEGED, 6); 767 return 0; 768 } 769 770 fh = env->regs[r1] >> 32; 771 772 if (fiba & 0x7) { 773 program_interrupt(env, PGM_SPECIFICATION, 6); 774 return 0; 775 } 776 777 pbdev = s390_pci_find_dev_by_fh(fh); 778 if (!pbdev) { 779 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 780 return 0; 781 } 782 783 memset(&fib, 0, sizeof(fib)); 784 stq_p(&fib.pba, pbdev->pba); 785 stq_p(&fib.pal, pbdev->pal); 786 stq_p(&fib.iota, pbdev->g_iota); 787 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); 788 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); 789 stq_p(&fib.fmb_addr, pbdev->fmb_addr); 790 791 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | 792 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | 793 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; 794 stl_p(&fib.data, data); 795 796 if (pbdev->fh >> ENABLE_BIT_OFFSET) { 797 fib.fc |= 0x80; 798 } 799 800 if (pbdev->error_state) { 801 fib.fc |= 0x40; 802 } 803 804 if (pbdev->lgstg_blocked) { 805 fib.fc |= 0x20; 806 } 807 808 if (pbdev->g_iota) { 809 fib.fc |= 0x10; 810 } 811 812 cpu_physical_memory_write(fiba, (uint8_t *)&fib, sizeof(fib)); 813 setcc(cpu, cc); 814 return 0; 815 } 816