1 /* 2 * Channel subsystem base support. 3 * 4 * Copyright 2012 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or (at 8 * your option) any later version. See the COPYING file in the top-level 9 * directory. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qapi/error.h" 14 #include "qapi/visitor.h" 15 #include "qemu/bitops.h" 16 #include "qemu/error-report.h" 17 #include "system/address-spaces.h" 18 #include "hw/s390x/ioinst.h" 19 #include "hw/qdev-properties.h" 20 #include "hw/s390x/css.h" 21 #include "trace.h" 22 #include "hw/s390x/s390_flic.h" 23 #include "hw/s390x/s390-virtio-ccw.h" 24 #include "hw/s390x/s390-ccw.h" 25 26 typedef struct CrwContainer { 27 CRW crw; 28 QTAILQ_ENTRY(CrwContainer) sibling; 29 } CrwContainer; 30 31 static const VMStateDescription vmstate_crw = { 32 .name = "s390_crw", 33 .version_id = 1, 34 .minimum_version_id = 1, 35 .fields = (const VMStateField[]) { 36 VMSTATE_UINT16(flags, CRW), 37 VMSTATE_UINT16(rsid, CRW), 38 VMSTATE_END_OF_LIST() 39 }, 40 }; 41 42 static const VMStateDescription vmstate_crw_container = { 43 .name = "s390_crw_container", 44 .version_id = 1, 45 .minimum_version_id = 1, 46 .fields = (const VMStateField[]) { 47 VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW), 48 VMSTATE_END_OF_LIST() 49 }, 50 }; 51 52 typedef struct ChpInfo { 53 uint8_t in_use; 54 uint8_t type; 55 uint8_t is_virtual; 56 } ChpInfo; 57 58 static const VMStateDescription vmstate_chp_info = { 59 .name = "s390_chp_info", 60 .version_id = 1, 61 .minimum_version_id = 1, 62 .fields = (const VMStateField[]) { 63 VMSTATE_UINT8(in_use, ChpInfo), 64 VMSTATE_UINT8(type, ChpInfo), 65 VMSTATE_UINT8(is_virtual, ChpInfo), 66 VMSTATE_END_OF_LIST() 67 } 68 }; 69 70 typedef struct SubchSet { 71 SubchDev *sch[MAX_SCHID + 1]; 72 unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)]; 73 unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)]; 74 } SubchSet; 75 76 static const VMStateDescription vmstate_scsw = { 77 .name = "s390_scsw", 78 .version_id = 1, 79 .minimum_version_id = 1, 80 .fields = (const VMStateField[]) { 81 VMSTATE_UINT16(flags, SCSW), 82 VMSTATE_UINT16(ctrl, SCSW), 83 VMSTATE_UINT32(cpa, SCSW), 84 VMSTATE_UINT8(dstat, SCSW), 85 VMSTATE_UINT8(cstat, SCSW), 86 VMSTATE_UINT16(count, SCSW), 87 VMSTATE_END_OF_LIST() 88 } 89 }; 90 91 static const VMStateDescription vmstate_pmcw = { 92 .name = "s390_pmcw", 93 .version_id = 1, 94 .minimum_version_id = 1, 95 .fields = (const VMStateField[]) { 96 VMSTATE_UINT32(intparm, PMCW), 97 VMSTATE_UINT16(flags, PMCW), 98 VMSTATE_UINT16(devno, PMCW), 99 VMSTATE_UINT8(lpm, PMCW), 100 VMSTATE_UINT8(pnom, PMCW), 101 VMSTATE_UINT8(lpum, PMCW), 102 VMSTATE_UINT8(pim, PMCW), 103 VMSTATE_UINT16(mbi, PMCW), 104 VMSTATE_UINT8(pom, PMCW), 105 VMSTATE_UINT8(pam, PMCW), 106 VMSTATE_UINT8_ARRAY(chpid, PMCW, 8), 107 VMSTATE_UINT32(chars, PMCW), 108 VMSTATE_END_OF_LIST() 109 } 110 }; 111 112 static const VMStateDescription vmstate_schib = { 113 .name = "s390_schib", 114 .version_id = 1, 115 .minimum_version_id = 1, 116 .fields = (const VMStateField[]) { 117 VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW), 118 VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW), 119 VMSTATE_UINT64(mba, SCHIB), 120 VMSTATE_UINT8_ARRAY(mda, SCHIB, 4), 121 VMSTATE_END_OF_LIST() 122 } 123 }; 124 125 126 static const VMStateDescription vmstate_ccw1 = { 127 .name = "s390_ccw1", 128 .version_id = 1, 129 .minimum_version_id = 1, 130 .fields = (const VMStateField[]) { 131 VMSTATE_UINT8(cmd_code, CCW1), 132 VMSTATE_UINT8(flags, CCW1), 133 VMSTATE_UINT16(count, CCW1), 134 VMSTATE_UINT32(cda, CCW1), 135 VMSTATE_END_OF_LIST() 136 } 137 }; 138 139 static const VMStateDescription vmstate_ciw = { 140 .name = "s390_ciw", 141 .version_id = 1, 142 .minimum_version_id = 1, 143 .fields = (const VMStateField[]) { 144 VMSTATE_UINT8(type, CIW), 145 VMSTATE_UINT8(command, CIW), 146 VMSTATE_UINT16(count, CIW), 147 VMSTATE_END_OF_LIST() 148 } 149 }; 150 151 static const VMStateDescription vmstate_sense_id = { 152 .name = "s390_sense_id", 153 .version_id = 1, 154 .minimum_version_id = 1, 155 .fields = (const VMStateField[]) { 156 VMSTATE_UINT8(reserved, SenseId), 157 VMSTATE_UINT16(cu_type, SenseId), 158 VMSTATE_UINT8(cu_model, SenseId), 159 VMSTATE_UINT16(dev_type, SenseId), 160 VMSTATE_UINT8(dev_model, SenseId), 161 VMSTATE_UINT8(unused, SenseId), 162 VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW), 163 VMSTATE_END_OF_LIST() 164 } 165 }; 166 167 static const VMStateDescription vmstate_orb = { 168 .name = "s390_orb", 169 .version_id = 1, 170 .minimum_version_id = 1, 171 .fields = (const VMStateField[]) { 172 VMSTATE_UINT32(intparm, ORB), 173 VMSTATE_UINT16(ctrl0, ORB), 174 VMSTATE_UINT8(lpm, ORB), 175 VMSTATE_UINT8(ctrl1, ORB), 176 VMSTATE_UINT32(cpa, ORB), 177 VMSTATE_END_OF_LIST() 178 } 179 }; 180 181 static const VMStateDescription vmstate_schdev_orb = { 182 .name = "s390_subch_dev/orb", 183 .version_id = 1, 184 .minimum_version_id = 1, 185 .fields = (const VMStateField[]) { 186 VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB), 187 VMSTATE_END_OF_LIST() 188 } 189 }; 190 191 static int subch_dev_post_load(void *opaque, int version_id); 192 static int subch_dev_pre_save(void *opaque); 193 194 const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!" 195 " Likely reason: some sequences of plug and unplug can break" 196 " migration for machine versions prior to 2.7 (known design flaw)."; 197 198 const VMStateDescription vmstate_subch_dev = { 199 .name = "s390_subch_dev", 200 .version_id = 1, 201 .minimum_version_id = 1, 202 .post_load = subch_dev_post_load, 203 .pre_save = subch_dev_pre_save, 204 .fields = (const VMStateField[]) { 205 VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"), 206 VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"), 207 VMSTATE_UINT16(migrated_schid, SubchDev), 208 VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno), 209 VMSTATE_BOOL(thinint_active, SubchDev), 210 VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB), 211 VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32), 212 VMSTATE_UINT64(channel_prog, SubchDev), 213 VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1), 214 VMSTATE_BOOL(last_cmd_valid, SubchDev), 215 VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId), 216 VMSTATE_BOOL(ccw_fmt_1, SubchDev), 217 VMSTATE_UINT8(ccw_no_data_cnt, SubchDev), 218 VMSTATE_END_OF_LIST() 219 }, 220 .subsections = (const VMStateDescription * const []) { 221 &vmstate_schdev_orb, 222 NULL 223 } 224 }; 225 226 typedef struct IndAddrPtrTmp { 227 IndAddr **parent; 228 uint64_t addr; 229 int32_t len; 230 } IndAddrPtrTmp; 231 232 static int post_load_ind_addr(void *opaque, int version_id) 233 { 234 IndAddrPtrTmp *ptmp = opaque; 235 IndAddr **ind_addr = ptmp->parent; 236 237 if (ptmp->len != 0) { 238 *ind_addr = get_indicator(ptmp->addr, ptmp->len); 239 } else { 240 *ind_addr = NULL; 241 } 242 return 0; 243 } 244 245 static int pre_save_ind_addr(void *opaque) 246 { 247 IndAddrPtrTmp *ptmp = opaque; 248 IndAddr *ind_addr = *(ptmp->parent); 249 250 if (ind_addr != NULL) { 251 ptmp->len = ind_addr->len; 252 ptmp->addr = ind_addr->addr; 253 } else { 254 ptmp->len = 0; 255 ptmp->addr = 0L; 256 } 257 258 return 0; 259 } 260 261 static const VMStateDescription vmstate_ind_addr_tmp = { 262 .name = "s390_ind_addr_tmp", 263 .pre_save = pre_save_ind_addr, 264 .post_load = post_load_ind_addr, 265 266 .fields = (const VMStateField[]) { 267 VMSTATE_INT32(len, IndAddrPtrTmp), 268 VMSTATE_UINT64(addr, IndAddrPtrTmp), 269 VMSTATE_END_OF_LIST() 270 } 271 }; 272 273 const VMStateDescription vmstate_ind_addr = { 274 .name = "s390_ind_addr_tmp", 275 .fields = (const VMStateField[]) { 276 VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp), 277 VMSTATE_END_OF_LIST() 278 } 279 }; 280 281 typedef struct CssImage { 282 SubchSet *sch_set[MAX_SSID + 1]; 283 ChpInfo chpids[MAX_CHPID + 1]; 284 } CssImage; 285 286 static const VMStateDescription vmstate_css_img = { 287 .name = "s390_css_img", 288 .version_id = 1, 289 .minimum_version_id = 1, 290 .fields = (const VMStateField[]) { 291 /* Subchannel sets have no relevant state. */ 292 VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0, 293 vmstate_chp_info, ChpInfo), 294 VMSTATE_END_OF_LIST() 295 } 296 297 }; 298 299 typedef struct IoAdapter { 300 uint32_t id; 301 uint8_t type; 302 uint8_t isc; 303 uint8_t flags; 304 } IoAdapter; 305 306 typedef struct ChannelSubSys { 307 QTAILQ_HEAD(, CrwContainer) pending_crws; 308 bool sei_pending; 309 bool do_crw_mchk; 310 bool crws_lost; 311 uint8_t max_cssid; 312 uint8_t max_ssid; 313 bool chnmon_active; 314 uint64_t chnmon_area; 315 CssImage *css[MAX_CSSID + 1]; 316 uint8_t default_cssid; 317 /* don't migrate, see css_register_io_adapters */ 318 IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1]; 319 /* don't migrate, see get_indicator and IndAddrPtrTmp */ 320 QTAILQ_HEAD(, IndAddr) indicator_addresses; 321 } ChannelSubSys; 322 323 static const VMStateDescription vmstate_css = { 324 .name = "s390_css", 325 .version_id = 1, 326 .minimum_version_id = 1, 327 .fields = (const VMStateField[]) { 328 VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container, 329 CrwContainer, sibling), 330 VMSTATE_BOOL(sei_pending, ChannelSubSys), 331 VMSTATE_BOOL(do_crw_mchk, ChannelSubSys), 332 VMSTATE_BOOL(crws_lost, ChannelSubSys), 333 /* These were kind of migrated by virtio */ 334 VMSTATE_UINT8(max_cssid, ChannelSubSys), 335 VMSTATE_UINT8(max_ssid, ChannelSubSys), 336 VMSTATE_BOOL(chnmon_active, ChannelSubSys), 337 VMSTATE_UINT64(chnmon_area, ChannelSubSys), 338 VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(css, ChannelSubSys, MAX_CSSID + 1, 339 0, vmstate_css_img, CssImage), 340 VMSTATE_UINT8(default_cssid, ChannelSubSys), 341 VMSTATE_END_OF_LIST() 342 } 343 }; 344 345 static ChannelSubSys channel_subsys = { 346 .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws), 347 .do_crw_mchk = true, 348 .sei_pending = false, 349 .crws_lost = false, 350 .chnmon_active = false, 351 .indicator_addresses = 352 QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses), 353 }; 354 355 static int subch_dev_pre_save(void *opaque) 356 { 357 SubchDev *s = opaque; 358 359 /* Prepare remote_schid for save */ 360 s->migrated_schid = s->schid; 361 362 return 0; 363 } 364 365 static int subch_dev_post_load(void *opaque, int version_id) 366 { 367 368 SubchDev *s = opaque; 369 370 /* Re-assign the subchannel to remote_schid if necessary */ 371 if (s->migrated_schid != s->schid) { 372 if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) { 373 /* 374 * Cleanup the slot before moving to s->migrated_schid provided 375 * it still belongs to us, i.e. it was not changed by previous 376 * invocation of this function. 377 */ 378 css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL); 379 } 380 /* It's OK to re-assign without a prior de-assign. */ 381 s->schid = s->migrated_schid; 382 css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s); 383 } 384 385 return 0; 386 } 387 388 void css_register_vmstate(void) 389 { 390 vmstate_register(NULL, 0, &vmstate_css, &channel_subsys); 391 } 392 393 IndAddr *get_indicator(hwaddr ind_addr, int len) 394 { 395 IndAddr *indicator; 396 397 QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) { 398 if (indicator->addr == ind_addr) { 399 indicator->refcnt++; 400 return indicator; 401 } 402 } 403 indicator = g_new0(IndAddr, 1); 404 indicator->addr = ind_addr; 405 indicator->len = len; 406 indicator->refcnt = 1; 407 QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses, 408 indicator, sibling); 409 return indicator; 410 } 411 412 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr, 413 bool do_map) 414 { 415 S390FLICState *fs = s390_get_flic(); 416 S390FLICStateClass *fsc = s390_get_flic_class(fs); 417 418 return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map); 419 } 420 421 void release_indicator(AdapterInfo *adapter, IndAddr *indicator) 422 { 423 assert(indicator->refcnt > 0); 424 indicator->refcnt--; 425 if (indicator->refcnt > 0) { 426 return; 427 } 428 QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling); 429 if (indicator->map) { 430 s390_io_adapter_map(adapter, indicator->map, false); 431 } 432 g_free(indicator); 433 } 434 435 int map_indicator(AdapterInfo *adapter, IndAddr *indicator) 436 { 437 int ret; 438 439 if (indicator->map) { 440 return 0; /* already mapped is not an error */ 441 } 442 indicator->map = indicator->addr; 443 ret = s390_io_adapter_map(adapter, indicator->map, true); 444 if ((ret != 0) && (ret != -ENOSYS)) { 445 goto out_err; 446 } 447 return 0; 448 449 out_err: 450 indicator->map = 0; 451 return ret; 452 } 453 454 int css_create_css_image(uint8_t cssid, bool default_image) 455 { 456 trace_css_new_image(cssid, default_image ? "(default)" : ""); 457 /* 255 is reserved */ 458 if (cssid == 255) { 459 return -EINVAL; 460 } 461 if (channel_subsys.css[cssid]) { 462 return -EBUSY; 463 } 464 channel_subsys.css[cssid] = g_new0(CssImage, 1); 465 if (default_image) { 466 channel_subsys.default_cssid = cssid; 467 } 468 return 0; 469 } 470 471 uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc) 472 { 473 if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC || 474 !channel_subsys.io_adapters[type][isc]) { 475 return -1; 476 } 477 478 return channel_subsys.io_adapters[type][isc]->id; 479 } 480 481 /** 482 * css_register_io_adapters: Register I/O adapters per ISC during init 483 * 484 * @swap: an indication if byte swap is needed. 485 * @maskable: an indication if the adapter is subject to the mask operation. 486 * @flags: further characteristics of the adapter. 487 * e.g. suppressible, an indication if the adapter is subject to AIS. 488 * @errp: location to store error information. 489 */ 490 void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable, 491 uint8_t flags, Error **errp) 492 { 493 uint32_t id; 494 int ret, isc; 495 IoAdapter *adapter; 496 S390FLICState *fs = s390_get_flic(); 497 S390FLICStateClass *fsc = s390_get_flic_class(fs); 498 499 /* 500 * Disallow multiple registrations for the same device type. 501 * Report an error if registering for an already registered type. 502 */ 503 if (channel_subsys.io_adapters[type][0]) { 504 error_setg(errp, "Adapters for type %d already registered", type); 505 } 506 507 for (isc = 0; isc <= MAX_ISC; isc++) { 508 id = (type << 3) | isc; 509 ret = fsc->register_io_adapter(fs, id, isc, swap, maskable, flags); 510 if (ret == 0) { 511 adapter = g_new0(IoAdapter, 1); 512 adapter->id = id; 513 adapter->isc = isc; 514 adapter->type = type; 515 adapter->flags = flags; 516 channel_subsys.io_adapters[type][isc] = adapter; 517 } else { 518 error_setg_errno(errp, -ret, "Unexpected error %d when " 519 "registering adapter %d", ret, id); 520 break; 521 } 522 } 523 524 /* 525 * No need to free registered adapters in kvm: kvm will clean up 526 * when the machine goes away. 527 */ 528 if (ret) { 529 for (isc--; isc >= 0; isc--) { 530 g_free(channel_subsys.io_adapters[type][isc]); 531 channel_subsys.io_adapters[type][isc] = NULL; 532 } 533 } 534 535 } 536 537 static void css_clear_io_interrupt(uint16_t subchannel_id, 538 uint16_t subchannel_nr) 539 { 540 Error *err = NULL; 541 static bool no_clear_irq; 542 S390FLICState *fs = s390_get_flic(); 543 S390FLICStateClass *fsc = s390_get_flic_class(fs); 544 int r; 545 546 if (unlikely(no_clear_irq)) { 547 return; 548 } 549 r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr); 550 switch (r) { 551 case 0: 552 break; 553 case -ENOSYS: 554 no_clear_irq = true; 555 /* 556 * Ignore unavailability, as the user can't do anything 557 * about it anyway. 558 */ 559 break; 560 default: 561 error_setg_errno(&err, -r, "unexpected error condition"); 562 error_propagate(&error_abort, err); 563 } 564 } 565 566 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid) 567 { 568 if (channel_subsys.max_cssid > 0) { 569 return (cssid << 8) | (1 << 3) | (ssid << 1) | 1; 570 } 571 return (ssid << 1) | 1; 572 } 573 574 uint16_t css_build_subchannel_id(SubchDev *sch) 575 { 576 return css_do_build_subchannel_id(sch->cssid, sch->ssid); 577 } 578 579 void css_inject_io_interrupt(SubchDev *sch) 580 { 581 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; 582 583 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, 584 sch->curr_status.pmcw.intparm, isc, ""); 585 s390_io_interrupt(css_build_subchannel_id(sch), 586 sch->schid, 587 sch->curr_status.pmcw.intparm, 588 isc << 27); 589 } 590 591 void css_conditional_io_interrupt(SubchDev *sch) 592 { 593 /* 594 * If the subchannel is not enabled, it is not made status pending 595 * (see PoP p. 16-17, "Status Control"). 596 */ 597 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA)) { 598 return; 599 } 600 601 /* 602 * If the subchannel is not currently status pending, make it pending 603 * with alert status. 604 */ 605 if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) { 606 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; 607 608 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, 609 sch->curr_status.pmcw.intparm, isc, 610 "(unsolicited)"); 611 sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 612 sch->curr_status.scsw.ctrl |= 613 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 614 /* Inject an I/O interrupt. */ 615 s390_io_interrupt(css_build_subchannel_id(sch), 616 sch->schid, 617 sch->curr_status.pmcw.intparm, 618 isc << 27); 619 } 620 } 621 622 int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode) 623 { 624 CPUS390XState *env = &cpu->env; 625 S390FLICState *fs = s390_get_flic(); 626 S390FLICStateClass *fsc = s390_get_flic_class(fs); 627 int r; 628 629 if (env->psw.mask & PSW_MASK_PSTATE) { 630 r = -PGM_PRIVILEGED; 631 goto out; 632 } 633 634 trace_css_do_sic(mode, isc); 635 switch (mode) { 636 case SIC_IRQ_MODE_ALL: 637 case SIC_IRQ_MODE_SINGLE: 638 break; 639 default: 640 r = -PGM_OPERAND; 641 goto out; 642 } 643 644 r = fsc->modify_ais_mode(fs, isc, mode) ? -PGM_OPERATION : 0; 645 out: 646 return r; 647 } 648 649 void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc) 650 { 651 S390FLICState *fs = s390_get_flic(); 652 S390FLICStateClass *fsc = s390_get_flic_class(fs); 653 uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI; 654 IoAdapter *adapter = channel_subsys.io_adapters[type][isc]; 655 656 if (!adapter) { 657 return; 658 } 659 660 trace_css_adapter_interrupt(isc); 661 if (fs->ais_supported) { 662 if (fsc->inject_airq(fs, type, isc, adapter->flags)) { 663 error_report("Failed to inject airq with AIS supported"); 664 exit(1); 665 } 666 } else { 667 s390_io_interrupt(0, 0, 0, io_int_word); 668 } 669 } 670 671 static void sch_handle_clear_func(SubchDev *sch) 672 { 673 SCHIB *schib = &sch->curr_status; 674 int path; 675 676 /* Path management: In our simple css, we always choose the only path. */ 677 path = 0x80; 678 679 /* Reset values prior to 'issuing the clear signal'. */ 680 schib->pmcw.lpum = 0; 681 schib->pmcw.pom = 0xff; 682 schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; 683 684 /* We always 'attempt to issue the clear signal', and we always succeed. */ 685 sch->channel_prog = 0x0; 686 sch->last_cmd_valid = false; 687 schib->scsw.ctrl &= ~SCSW_ACTL_CLEAR_PEND; 688 schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND; 689 690 schib->scsw.dstat = 0; 691 schib->scsw.cstat = 0; 692 schib->pmcw.lpum = path; 693 694 } 695 696 static void sch_handle_halt_func(SubchDev *sch) 697 { 698 SCHIB *schib = &sch->curr_status; 699 hwaddr curr_ccw = sch->channel_prog; 700 int path; 701 702 /* Path management: In our simple css, we always choose the only path. */ 703 path = 0x80; 704 705 /* We always 'attempt to issue the halt signal', and we always succeed. */ 706 sch->channel_prog = 0x0; 707 sch->last_cmd_valid = false; 708 schib->scsw.ctrl &= ~SCSW_ACTL_HALT_PEND; 709 schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND; 710 711 if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE | 712 SCSW_ACTL_DEVICE_ACTIVE)) || 713 !((schib->scsw.ctrl & SCSW_ACTL_START_PEND) || 714 (schib->scsw.ctrl & SCSW_ACTL_SUSP))) { 715 schib->scsw.dstat = SCSW_DSTAT_DEVICE_END; 716 } 717 if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE | 718 SCSW_ACTL_DEVICE_ACTIVE)) || 719 (schib->scsw.ctrl & SCSW_ACTL_SUSP)) { 720 schib->scsw.cpa = curr_ccw + 8; 721 } 722 schib->scsw.cstat = 0; 723 schib->pmcw.lpum = path; 724 725 } 726 727 /* 728 * As the SenseId struct cannot be packed (would cause unaligned accesses), we 729 * have to copy the individual fields to an unstructured area using the correct 730 * layout (see SA22-7204-01 "Common I/O-Device Commands"). 731 */ 732 static void copy_sense_id_to_guest(uint8_t *dest, SenseId *src) 733 { 734 int i; 735 736 dest[0] = src->reserved; 737 stw_be_p(dest + 1, src->cu_type); 738 dest[3] = src->cu_model; 739 stw_be_p(dest + 4, src->dev_type); 740 dest[6] = src->dev_model; 741 dest[7] = src->unused; 742 for (i = 0; i < ARRAY_SIZE(src->ciw); i++) { 743 dest[8 + i * 4] = src->ciw[i].type; 744 dest[9 + i * 4] = src->ciw[i].command; 745 stw_be_p(dest + 10 + i * 4, src->ciw[i].count); 746 } 747 } 748 749 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1) 750 { 751 CCW0 tmp0; 752 CCW1 tmp1; 753 CCW1 ret; 754 755 if (fmt1) { 756 cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1)); 757 ret.cmd_code = tmp1.cmd_code; 758 ret.flags = tmp1.flags; 759 ret.count = be16_to_cpu(tmp1.count); 760 ret.cda = be32_to_cpu(tmp1.cda); 761 } else { 762 cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0)); 763 if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) { 764 ret.cmd_code = CCW_CMD_TIC; 765 ret.flags = 0; 766 ret.count = 0; 767 } else { 768 ret.cmd_code = tmp0.cmd_code; 769 ret.flags = tmp0.flags; 770 ret.count = be16_to_cpu(tmp0.count); 771 } 772 ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16); 773 } 774 return ret; 775 } 776 /** 777 * If out of bounds marks the stream broken. If broken returns -EINVAL, 778 * otherwise the requested length (may be zero) 779 */ 780 static inline int cds_check_len(CcwDataStream *cds, int len) 781 { 782 if (cds->at_byte + len > cds->count) { 783 cds->flags |= CDS_F_STREAM_BROKEN; 784 } 785 return cds->flags & CDS_F_STREAM_BROKEN ? -EINVAL : len; 786 } 787 788 static inline bool cds_ccw_addrs_ok(hwaddr addr, int len, bool ccw_fmt1) 789 { 790 return (addr + len) < (ccw_fmt1 ? (1UL << 31) : (1UL << 24)); 791 } 792 793 static int ccw_dstream_rw_noflags(CcwDataStream *cds, void *buff, int len, 794 CcwDataStreamOp op) 795 { 796 int ret; 797 798 ret = cds_check_len(cds, len); 799 if (ret <= 0) { 800 return ret; 801 } 802 if (!cds_ccw_addrs_ok(cds->cda, len, cds->flags & CDS_F_FMT)) { 803 return -EINVAL; /* channel program check */ 804 } 805 if (op == CDS_OP_A) { 806 goto incr; 807 } 808 if (!cds->do_skip) { 809 ret = address_space_rw(&address_space_memory, cds->cda, 810 MEMTXATTRS_UNSPECIFIED, buff, len, op); 811 } else { 812 ret = MEMTX_OK; 813 } 814 if (ret != MEMTX_OK) { 815 cds->flags |= CDS_F_STREAM_BROKEN; 816 return -EINVAL; 817 } 818 incr: 819 cds->at_byte += len; 820 cds->cda += len; 821 return 0; 822 } 823 824 /* returns values between 1 and bsz, where bsz is a power of 2 */ 825 static inline uint16_t ida_continuous_left(hwaddr cda, uint64_t bsz) 826 { 827 return bsz - (cda & (bsz - 1)); 828 } 829 830 static inline uint64_t ccw_ida_block_size(uint8_t flags) 831 { 832 if ((flags & CDS_F_C64) && !(flags & CDS_F_I2K)) { 833 return 1ULL << 12; 834 } 835 return 1ULL << 11; 836 } 837 838 static inline int ida_read_next_idaw(CcwDataStream *cds) 839 { 840 union {uint64_t fmt2; uint32_t fmt1; } idaw; 841 int ret; 842 hwaddr idaw_addr; 843 bool idaw_fmt2 = cds->flags & CDS_F_C64; 844 bool ccw_fmt1 = cds->flags & CDS_F_FMT; 845 846 if (idaw_fmt2) { 847 idaw_addr = cds->cda_orig + sizeof(idaw.fmt2) * cds->at_idaw; 848 if (idaw_addr & 0x07 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) { 849 return -EINVAL; /* channel program check */ 850 } 851 ret = address_space_read(&address_space_memory, idaw_addr, 852 MEMTXATTRS_UNSPECIFIED, &idaw.fmt2, 853 sizeof(idaw.fmt2)); 854 cds->cda = be64_to_cpu(idaw.fmt2); 855 } else { 856 idaw_addr = cds->cda_orig + sizeof(idaw.fmt1) * cds->at_idaw; 857 if (idaw_addr & 0x03 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) { 858 return -EINVAL; /* channel program check */ 859 } 860 ret = address_space_read(&address_space_memory, idaw_addr, 861 MEMTXATTRS_UNSPECIFIED, &idaw.fmt1, 862 sizeof(idaw.fmt1)); 863 cds->cda = be64_to_cpu(idaw.fmt1); 864 if (cds->cda & 0x80000000) { 865 return -EINVAL; /* channel program check */ 866 } 867 } 868 ++(cds->at_idaw); 869 if (ret != MEMTX_OK) { 870 /* assume inaccessible address */ 871 return -EINVAL; /* channel program check */ 872 } 873 return 0; 874 } 875 876 static int ccw_dstream_rw_ida(CcwDataStream *cds, void *buff, int len, 877 CcwDataStreamOp op) 878 { 879 uint64_t bsz = ccw_ida_block_size(cds->flags); 880 int ret = 0; 881 uint16_t cont_left, iter_len; 882 883 ret = cds_check_len(cds, len); 884 if (ret <= 0) { 885 return ret; 886 } 887 if (!cds->at_idaw) { 888 /* read first idaw */ 889 ret = ida_read_next_idaw(cds); 890 if (ret) { 891 goto err; 892 } 893 cont_left = ida_continuous_left(cds->cda, bsz); 894 } else { 895 cont_left = ida_continuous_left(cds->cda, bsz); 896 if (cont_left == bsz) { 897 ret = ida_read_next_idaw(cds); 898 if (ret) { 899 goto err; 900 } 901 if (cds->cda & (bsz - 1)) { 902 ret = -EINVAL; /* channel program check */ 903 goto err; 904 } 905 } 906 } 907 do { 908 iter_len = MIN(len, cont_left); 909 if (op != CDS_OP_A) { 910 if (!cds->do_skip) { 911 ret = address_space_rw(&address_space_memory, cds->cda, 912 MEMTXATTRS_UNSPECIFIED, buff, iter_len, 913 op); 914 } else { 915 ret = MEMTX_OK; 916 } 917 if (ret != MEMTX_OK) { 918 /* assume inaccessible address */ 919 ret = -EINVAL; /* channel program check */ 920 goto err; 921 } 922 } 923 cds->at_byte += iter_len; 924 cds->cda += iter_len; 925 len -= iter_len; 926 if (!len) { 927 break; 928 } 929 ret = ida_read_next_idaw(cds); 930 if (ret) { 931 goto err; 932 } 933 cont_left = bsz; 934 } while (true); 935 return ret; 936 err: 937 cds->flags |= CDS_F_STREAM_BROKEN; 938 return ret; 939 } 940 941 void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb) 942 { 943 /* 944 * We don't support MIDA (an optional facility) yet and we 945 * catch this earlier. Just for expressing the precondition. 946 */ 947 g_assert(!(orb->ctrl1 & ORB_CTRL1_MASK_MIDAW)); 948 cds->flags = (orb->ctrl0 & ORB_CTRL0_MASK_I2K ? CDS_F_I2K : 0) | 949 (orb->ctrl0 & ORB_CTRL0_MASK_C64 ? CDS_F_C64 : 0) | 950 (orb->ctrl0 & ORB_CTRL0_MASK_FMT ? CDS_F_FMT : 0) | 951 (ccw->flags & CCW_FLAG_IDA ? CDS_F_IDA : 0); 952 953 cds->count = ccw->count; 954 cds->cda_orig = ccw->cda; 955 /* skip is only effective for read, read backwards, or sense commands */ 956 cds->do_skip = (ccw->flags & CCW_FLAG_SKIP) && 957 ((ccw->cmd_code & 0x0f) == CCW_CMD_BASIC_SENSE || 958 (ccw->cmd_code & 0x03) == 0x02 /* read */ || 959 (ccw->cmd_code & 0x0f) == 0x0c /* read backwards */); 960 ccw_dstream_rewind(cds); 961 if (!(cds->flags & CDS_F_IDA)) { 962 cds->op_handler = ccw_dstream_rw_noflags; 963 } else { 964 cds->op_handler = ccw_dstream_rw_ida; 965 } 966 } 967 968 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr, 969 bool suspend_allowed) 970 { 971 int ret; 972 bool check_len; 973 int len; 974 CCW1 ccw; 975 976 if (!ccw_addr) { 977 return -EINVAL; /* channel-program check */ 978 } 979 /* Check doubleword aligned and 31 or 24 (fmt 0) bit addressable. */ 980 if (ccw_addr & (sch->ccw_fmt_1 ? 0x80000007 : 0xff000007)) { 981 return -EINVAL; 982 } 983 984 /* Translate everything to format-1 ccws - the information is the same. */ 985 ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1); 986 987 /* Check for invalid command codes. */ 988 if ((ccw.cmd_code & 0x0f) == 0) { 989 return -EINVAL; 990 } 991 if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) && 992 ((ccw.cmd_code & 0xf0) != 0)) { 993 return -EINVAL; 994 } 995 if (!sch->ccw_fmt_1 && (ccw.count == 0) && 996 (ccw.cmd_code != CCW_CMD_TIC)) { 997 return -EINVAL; 998 } 999 1000 /* We don't support MIDA. */ 1001 if (ccw.flags & CCW_FLAG_MIDA) { 1002 return -EINVAL; 1003 } 1004 1005 if (ccw.flags & CCW_FLAG_SUSPEND) { 1006 return suspend_allowed ? -EINPROGRESS : -EINVAL; 1007 } 1008 1009 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 1010 1011 if (!ccw.cda) { 1012 if (sch->ccw_no_data_cnt == 255) { 1013 return -EINVAL; 1014 } 1015 sch->ccw_no_data_cnt++; 1016 } 1017 1018 /* Look at the command. */ 1019 ccw_dstream_init(&sch->cds, &ccw, &(sch->orb)); 1020 switch (ccw.cmd_code) { 1021 case CCW_CMD_NOOP: 1022 /* Nothing to do. */ 1023 ret = 0; 1024 break; 1025 case CCW_CMD_BASIC_SENSE: 1026 if (check_len) { 1027 if (ccw.count != sizeof(sch->sense_data)) { 1028 ret = -EINVAL; 1029 break; 1030 } 1031 } 1032 len = MIN(ccw.count, sizeof(sch->sense_data)); 1033 ret = ccw_dstream_write_buf(&sch->cds, sch->sense_data, len); 1034 sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds); 1035 if (!ret) { 1036 memset(sch->sense_data, 0, sizeof(sch->sense_data)); 1037 } 1038 break; 1039 case CCW_CMD_SENSE_ID: 1040 { 1041 /* According to SA22-7204-01, Sense-ID can store up to 256 bytes */ 1042 uint8_t sense_id[256]; 1043 1044 copy_sense_id_to_guest(sense_id, &sch->id); 1045 /* Sense ID information is device specific. */ 1046 if (check_len) { 1047 if (ccw.count != sizeof(sense_id)) { 1048 ret = -EINVAL; 1049 break; 1050 } 1051 } 1052 len = MIN(ccw.count, sizeof(sense_id)); 1053 /* 1054 * Only indicate 0xff in the first sense byte if we actually 1055 * have enough place to store at least bytes 0-3. 1056 */ 1057 if (len >= 4) { 1058 sense_id[0] = 0xff; 1059 } else { 1060 sense_id[0] = 0; 1061 } 1062 ret = ccw_dstream_write_buf(&sch->cds, sense_id, len); 1063 if (!ret) { 1064 sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds); 1065 } 1066 break; 1067 } 1068 case CCW_CMD_TIC: 1069 if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) { 1070 ret = -EINVAL; 1071 break; 1072 } 1073 if (ccw.flags || ccw.count) { 1074 /* We have already sanitized these if converted from fmt 0. */ 1075 ret = -EINVAL; 1076 break; 1077 } 1078 sch->channel_prog = ccw.cda; 1079 ret = -EAGAIN; 1080 break; 1081 default: 1082 if (sch->ccw_cb) { 1083 /* Handle device specific commands. */ 1084 ret = sch->ccw_cb(sch, ccw); 1085 } else { 1086 ret = -ENOSYS; 1087 } 1088 break; 1089 } 1090 sch->last_cmd = ccw; 1091 sch->last_cmd_valid = true; 1092 if (ret == 0) { 1093 if (ccw.flags & CCW_FLAG_CC) { 1094 sch->channel_prog += 8; 1095 ret = -EAGAIN; 1096 } 1097 } 1098 1099 return ret; 1100 } 1101 1102 static void sch_handle_start_func_virtual(SubchDev *sch) 1103 { 1104 SCHIB *schib = &sch->curr_status; 1105 int path; 1106 int ret; 1107 bool suspend_allowed; 1108 1109 /* Path management: In our simple css, we always choose the only path. */ 1110 path = 0x80; 1111 1112 if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) { 1113 /* Start Function triggered via ssch, i.e. we have an ORB */ 1114 ORB *orb = &sch->orb; 1115 schib->scsw.cstat = 0; 1116 schib->scsw.dstat = 0; 1117 /* Look at the orb and try to execute the channel program. */ 1118 schib->pmcw.intparm = orb->intparm; 1119 if (!(orb->lpm & path)) { 1120 /* Generate a deferred cc 3 condition. */ 1121 schib->scsw.flags |= SCSW_FLAGS_MASK_CC; 1122 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 1123 schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); 1124 return; 1125 } 1126 sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT); 1127 schib->scsw.flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0; 1128 sch->ccw_no_data_cnt = 0; 1129 suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND); 1130 } else { 1131 /* Start Function resumed via rsch */ 1132 schib->scsw.ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); 1133 /* The channel program had been suspended before. */ 1134 suspend_allowed = true; 1135 } 1136 sch->last_cmd_valid = false; 1137 do { 1138 ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed); 1139 switch (ret) { 1140 case -EAGAIN: 1141 /* ccw chain, continue processing */ 1142 break; 1143 case 0: 1144 /* success */ 1145 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 1146 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 1147 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 1148 SCSW_STCTL_STATUS_PEND; 1149 schib->scsw.dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END; 1150 schib->scsw.cpa = sch->channel_prog + 8; 1151 break; 1152 case -EIO: 1153 /* I/O errors, status depends on specific devices */ 1154 break; 1155 case -ENOSYS: 1156 /* unsupported command, generate unit check (command reject) */ 1157 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 1158 schib->scsw.dstat = SCSW_DSTAT_UNIT_CHECK; 1159 /* Set sense bit 0 in ecw0. */ 1160 sch->sense_data[0] = 0x80; 1161 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 1162 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 1163 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 1164 schib->scsw.cpa = sch->channel_prog + 8; 1165 break; 1166 case -EINPROGRESS: 1167 /* channel program has been suspended */ 1168 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 1169 schib->scsw.ctrl |= SCSW_ACTL_SUSP; 1170 break; 1171 default: 1172 /* error, generate channel program check */ 1173 schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND; 1174 schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK; 1175 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 1176 schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 1177 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 1178 schib->scsw.cpa = sch->channel_prog + 8; 1179 break; 1180 } 1181 } while (ret == -EAGAIN); 1182 1183 } 1184 1185 static IOInstEnding sch_handle_halt_func_passthrough(SubchDev *sch) 1186 { 1187 int ret; 1188 1189 ret = s390_ccw_halt(sch); 1190 if (ret == -ENOSYS) { 1191 sch_handle_halt_func(sch); 1192 return IOINST_CC_EXPECTED; 1193 } 1194 /* 1195 * Some conditions may have been detected prior to starting the halt 1196 * function; map them to the correct cc. 1197 * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really 1198 * anything else we can do.) 1199 */ 1200 switch (ret) { 1201 case -EBUSY: 1202 return IOINST_CC_BUSY; 1203 case -ENODEV: 1204 case -EACCES: 1205 return IOINST_CC_NOT_OPERATIONAL; 1206 default: 1207 return IOINST_CC_EXPECTED; 1208 } 1209 } 1210 1211 static IOInstEnding sch_handle_clear_func_passthrough(SubchDev *sch) 1212 { 1213 int ret; 1214 1215 ret = s390_ccw_clear(sch); 1216 if (ret == -ENOSYS) { 1217 sch_handle_clear_func(sch); 1218 return IOINST_CC_EXPECTED; 1219 } 1220 /* 1221 * Some conditions may have been detected prior to starting the clear 1222 * function; map them to the correct cc. 1223 * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really 1224 * anything else we can do.) 1225 */ 1226 switch (ret) { 1227 case -ENODEV: 1228 case -EACCES: 1229 return IOINST_CC_NOT_OPERATIONAL; 1230 default: 1231 return IOINST_CC_EXPECTED; 1232 } 1233 } 1234 1235 static IOInstEnding sch_handle_start_func_passthrough(SubchDev *sch) 1236 { 1237 SCHIB *schib = &sch->curr_status; 1238 ORB *orb = &sch->orb; 1239 if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) { 1240 assert(orb != NULL); 1241 schib->pmcw.intparm = orb->intparm; 1242 } 1243 return s390_ccw_cmd_request(sch); 1244 } 1245 1246 /* 1247 * On real machines, this would run asynchronously to the main vcpus. 1248 * We might want to make some parts of the ssch handling (interpreting 1249 * read/writes) asynchronous later on if we start supporting more than 1250 * our current very simple devices. 1251 */ 1252 IOInstEnding do_subchannel_work_virtual(SubchDev *sch) 1253 { 1254 SCHIB *schib = &sch->curr_status; 1255 1256 if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { 1257 sch_handle_clear_func(sch); 1258 } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { 1259 sch_handle_halt_func(sch); 1260 } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { 1261 /* Triggered by both ssch and rsch. */ 1262 sch_handle_start_func_virtual(sch); 1263 } 1264 css_inject_io_interrupt(sch); 1265 /* inst must succeed if this func is called */ 1266 return IOINST_CC_EXPECTED; 1267 } 1268 1269 IOInstEnding do_subchannel_work_passthrough(SubchDev *sch) 1270 { 1271 SCHIB *schib = &sch->curr_status; 1272 1273 if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) { 1274 return sch_handle_clear_func_passthrough(sch); 1275 } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) { 1276 return sch_handle_halt_func_passthrough(sch); 1277 } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) { 1278 return sch_handle_start_func_passthrough(sch); 1279 } 1280 return IOINST_CC_EXPECTED; 1281 } 1282 1283 static IOInstEnding do_subchannel_work(SubchDev *sch) 1284 { 1285 if (!sch->do_subchannel_work) { 1286 return IOINST_CC_STATUS_PRESENT; 1287 } 1288 g_assert(sch->curr_status.scsw.ctrl & SCSW_CTRL_MASK_FCTL); 1289 return sch->do_subchannel_work(sch); 1290 } 1291 1292 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src) 1293 { 1294 int i; 1295 1296 dest->intparm = cpu_to_be32(src->intparm); 1297 dest->flags = cpu_to_be16(src->flags); 1298 dest->devno = cpu_to_be16(src->devno); 1299 dest->lpm = src->lpm; 1300 dest->pnom = src->pnom; 1301 dest->lpum = src->lpum; 1302 dest->pim = src->pim; 1303 dest->mbi = cpu_to_be16(src->mbi); 1304 dest->pom = src->pom; 1305 dest->pam = src->pam; 1306 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { 1307 dest->chpid[i] = src->chpid[i]; 1308 } 1309 dest->chars = cpu_to_be32(src->chars); 1310 } 1311 1312 void copy_scsw_to_guest(SCSW *dest, const SCSW *src) 1313 { 1314 dest->flags = cpu_to_be16(src->flags); 1315 dest->ctrl = cpu_to_be16(src->ctrl); 1316 dest->cpa = cpu_to_be32(src->cpa); 1317 dest->dstat = src->dstat; 1318 dest->cstat = src->cstat; 1319 dest->count = cpu_to_be16(src->count); 1320 } 1321 1322 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src) 1323 { 1324 int i; 1325 /* 1326 * We copy the PMCW and SCSW in and out of local variables to 1327 * avoid taking the address of members of a packed struct. 1328 */ 1329 PMCW src_pmcw, dest_pmcw; 1330 SCSW src_scsw, dest_scsw; 1331 1332 src_pmcw = src->pmcw; 1333 copy_pmcw_to_guest(&dest_pmcw, &src_pmcw); 1334 dest->pmcw = dest_pmcw; 1335 src_scsw = src->scsw; 1336 copy_scsw_to_guest(&dest_scsw, &src_scsw); 1337 dest->scsw = dest_scsw; 1338 dest->mba = cpu_to_be64(src->mba); 1339 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { 1340 dest->mda[i] = src->mda[i]; 1341 } 1342 } 1343 1344 void copy_esw_to_guest(ESW *dest, const ESW *src) 1345 { 1346 dest->word0 = cpu_to_be32(src->word0); 1347 dest->erw = cpu_to_be32(src->erw); 1348 dest->word2 = cpu_to_be64(src->word2); 1349 dest->word4 = cpu_to_be32(src->word4); 1350 } 1351 1352 IOInstEnding css_do_stsch(SubchDev *sch, SCHIB *schib) 1353 { 1354 int ret; 1355 1356 /* 1357 * For some subchannels, we may want to update parts of 1358 * the schib (e.g., update path masks from the host device 1359 * for passthrough subchannels). 1360 */ 1361 ret = s390_ccw_store(sch); 1362 1363 /* Use current status. */ 1364 copy_schib_to_guest(schib, &sch->curr_status); 1365 return ret; 1366 } 1367 1368 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src) 1369 { 1370 int i; 1371 1372 dest->intparm = be32_to_cpu(src->intparm); 1373 dest->flags = be16_to_cpu(src->flags); 1374 dest->devno = be16_to_cpu(src->devno); 1375 dest->lpm = src->lpm; 1376 dest->pnom = src->pnom; 1377 dest->lpum = src->lpum; 1378 dest->pim = src->pim; 1379 dest->mbi = be16_to_cpu(src->mbi); 1380 dest->pom = src->pom; 1381 dest->pam = src->pam; 1382 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { 1383 dest->chpid[i] = src->chpid[i]; 1384 } 1385 dest->chars = be32_to_cpu(src->chars); 1386 } 1387 1388 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src) 1389 { 1390 dest->flags = be16_to_cpu(src->flags); 1391 dest->ctrl = be16_to_cpu(src->ctrl); 1392 dest->cpa = be32_to_cpu(src->cpa); 1393 dest->dstat = src->dstat; 1394 dest->cstat = src->cstat; 1395 dest->count = be16_to_cpu(src->count); 1396 } 1397 1398 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src) 1399 { 1400 int i; 1401 /* 1402 * We copy the PMCW and SCSW in and out of local variables to 1403 * avoid taking the address of members of a packed struct. 1404 */ 1405 PMCW src_pmcw, dest_pmcw; 1406 SCSW src_scsw, dest_scsw; 1407 1408 src_pmcw = src->pmcw; 1409 copy_pmcw_from_guest(&dest_pmcw, &src_pmcw); 1410 dest->pmcw = dest_pmcw; 1411 src_scsw = src->scsw; 1412 copy_scsw_from_guest(&dest_scsw, &src_scsw); 1413 dest->scsw = dest_scsw; 1414 dest->mba = be64_to_cpu(src->mba); 1415 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { 1416 dest->mda[i] = src->mda[i]; 1417 } 1418 } 1419 1420 IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *orig_schib) 1421 { 1422 SCHIB *schib = &sch->curr_status; 1423 uint16_t oldflags; 1424 SCHIB schib_copy; 1425 1426 if (!(schib->pmcw.flags & PMCW_FLAGS_MASK_DNV)) { 1427 return IOINST_CC_EXPECTED; 1428 } 1429 1430 if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { 1431 return IOINST_CC_STATUS_PRESENT; 1432 } 1433 1434 if (schib->scsw.ctrl & 1435 (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) { 1436 return IOINST_CC_BUSY; 1437 } 1438 1439 copy_schib_from_guest(&schib_copy, orig_schib); 1440 /* Only update the program-modifiable fields. */ 1441 schib->pmcw.intparm = schib_copy.pmcw.intparm; 1442 oldflags = schib->pmcw.flags; 1443 schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 1444 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 1445 PMCW_FLAGS_MASK_MP); 1446 schib->pmcw.flags |= schib_copy.pmcw.flags & 1447 (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 1448 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 1449 PMCW_FLAGS_MASK_MP); 1450 schib->pmcw.lpm = schib_copy.pmcw.lpm; 1451 schib->pmcw.mbi = schib_copy.pmcw.mbi; 1452 schib->pmcw.pom = schib_copy.pmcw.pom; 1453 schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); 1454 schib->pmcw.chars |= schib_copy.pmcw.chars & 1455 (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); 1456 schib->mba = schib_copy.mba; 1457 1458 /* Has the channel been disabled? */ 1459 if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0 1460 && (schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) == 0) { 1461 sch->disable_cb(sch); 1462 } 1463 return IOINST_CC_EXPECTED; 1464 } 1465 1466 IOInstEnding css_do_xsch(SubchDev *sch) 1467 { 1468 SCHIB *schib = &sch->curr_status; 1469 1470 if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { 1471 return IOINST_CC_NOT_OPERATIONAL; 1472 } 1473 1474 if (schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) { 1475 return IOINST_CC_STATUS_PRESENT; 1476 } 1477 1478 if (!(schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) || 1479 ((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || 1480 (!(schib->scsw.ctrl & 1481 (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) || 1482 (schib->scsw.ctrl & SCSW_ACTL_SUBCH_ACTIVE)) { 1483 return IOINST_CC_BUSY; 1484 } 1485 1486 /* Cancel the current operation. */ 1487 schib->scsw.ctrl &= ~(SCSW_FCTL_START_FUNC | 1488 SCSW_ACTL_RESUME_PEND | 1489 SCSW_ACTL_START_PEND | 1490 SCSW_ACTL_SUSP); 1491 sch->channel_prog = 0x0; 1492 sch->last_cmd_valid = false; 1493 schib->scsw.dstat = 0; 1494 schib->scsw.cstat = 0; 1495 return IOINST_CC_EXPECTED; 1496 } 1497 1498 IOInstEnding css_do_csch(SubchDev *sch) 1499 { 1500 SCHIB *schib = &sch->curr_status; 1501 uint16_t old_scsw_ctrl; 1502 IOInstEnding ccode; 1503 1504 if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { 1505 return IOINST_CC_NOT_OPERATIONAL; 1506 } 1507 1508 /* 1509 * Save the current scsw.ctrl in case CSCH fails and we need 1510 * to revert the scsw to the status quo ante. 1511 */ 1512 old_scsw_ctrl = schib->scsw.ctrl; 1513 1514 /* Trigger the clear function. */ 1515 schib->scsw.ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); 1516 schib->scsw.ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; 1517 1518 ccode = do_subchannel_work(sch); 1519 1520 if (ccode != IOINST_CC_EXPECTED) { 1521 schib->scsw.ctrl = old_scsw_ctrl; 1522 } 1523 1524 return ccode; 1525 } 1526 1527 IOInstEnding css_do_hsch(SubchDev *sch) 1528 { 1529 SCHIB *schib = &sch->curr_status; 1530 uint16_t old_scsw_ctrl; 1531 IOInstEnding ccode; 1532 1533 if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { 1534 return IOINST_CC_NOT_OPERATIONAL; 1535 } 1536 1537 if (((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) || 1538 (schib->scsw.ctrl & (SCSW_STCTL_PRIMARY | 1539 SCSW_STCTL_SECONDARY | 1540 SCSW_STCTL_ALERT))) { 1541 return IOINST_CC_STATUS_PRESENT; 1542 } 1543 1544 if (schib->scsw.ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 1545 return IOINST_CC_BUSY; 1546 } 1547 1548 /* 1549 * Save the current scsw.ctrl in case HSCH fails and we need 1550 * to revert the scsw to the status quo ante. 1551 */ 1552 old_scsw_ctrl = schib->scsw.ctrl; 1553 1554 /* Trigger the halt function. */ 1555 schib->scsw.ctrl |= SCSW_FCTL_HALT_FUNC; 1556 schib->scsw.ctrl &= ~SCSW_FCTL_START_FUNC; 1557 if (((schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL) == 1558 (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) && 1559 ((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == 1560 SCSW_STCTL_INTERMEDIATE)) { 1561 schib->scsw.ctrl &= ~SCSW_STCTL_STATUS_PEND; 1562 } 1563 schib->scsw.ctrl |= SCSW_ACTL_HALT_PEND; 1564 1565 ccode = do_subchannel_work(sch); 1566 1567 if (ccode != IOINST_CC_EXPECTED) { 1568 schib->scsw.ctrl = old_scsw_ctrl; 1569 } 1570 1571 return ccode; 1572 } 1573 1574 static void css_update_chnmon(SubchDev *sch) 1575 { 1576 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) { 1577 /* Not active. */ 1578 return; 1579 } 1580 /* The counter is conveniently located at the beginning of the struct. */ 1581 if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) { 1582 /* Format 1, per-subchannel area. */ 1583 uint32_t count; 1584 1585 count = address_space_ldl(&address_space_memory, 1586 sch->curr_status.mba, 1587 MEMTXATTRS_UNSPECIFIED, 1588 NULL); 1589 count++; 1590 address_space_stl(&address_space_memory, sch->curr_status.mba, count, 1591 MEMTXATTRS_UNSPECIFIED, NULL); 1592 } else { 1593 /* Format 0, global area. */ 1594 uint32_t offset; 1595 uint16_t count; 1596 1597 offset = sch->curr_status.pmcw.mbi << 5; 1598 count = address_space_lduw(&address_space_memory, 1599 channel_subsys.chnmon_area + offset, 1600 MEMTXATTRS_UNSPECIFIED, 1601 NULL); 1602 count++; 1603 address_space_stw(&address_space_memory, 1604 channel_subsys.chnmon_area + offset, count, 1605 MEMTXATTRS_UNSPECIFIED, NULL); 1606 } 1607 } 1608 1609 IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb) 1610 { 1611 SCHIB *schib = &sch->curr_status; 1612 uint16_t old_scsw_ctrl, old_scsw_flags; 1613 IOInstEnding ccode; 1614 1615 if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { 1616 return IOINST_CC_NOT_OPERATIONAL; 1617 } 1618 1619 if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { 1620 return IOINST_CC_STATUS_PRESENT; 1621 } 1622 1623 if (schib->scsw.ctrl & (SCSW_FCTL_START_FUNC | 1624 SCSW_FCTL_HALT_FUNC | 1625 SCSW_FCTL_CLEAR_FUNC)) { 1626 return IOINST_CC_BUSY; 1627 } 1628 1629 /* If monitoring is active, update counter. */ 1630 if (channel_subsys.chnmon_active) { 1631 css_update_chnmon(sch); 1632 } 1633 sch->orb = *orb; 1634 sch->channel_prog = orb->cpa; 1635 1636 /* 1637 * Save the current scsw.ctrl and scsw.flags in case SSCH fails and we need 1638 * to revert the scsw to the status quo ante. 1639 */ 1640 old_scsw_ctrl = schib->scsw.ctrl; 1641 old_scsw_flags = schib->scsw.flags; 1642 1643 /* Trigger the start function. */ 1644 schib->scsw.ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); 1645 schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; 1646 1647 ccode = do_subchannel_work(sch); 1648 1649 if (ccode != IOINST_CC_EXPECTED) { 1650 schib->scsw.ctrl = old_scsw_ctrl; 1651 schib->scsw.flags = old_scsw_flags; 1652 } 1653 1654 return ccode; 1655 } 1656 1657 static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw, 1658 int *irb_len) 1659 { 1660 int i; 1661 uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL; 1662 uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL; 1663 1664 copy_scsw_to_guest(&dest->scsw, &src->scsw); 1665 1666 copy_esw_to_guest(&dest->esw, &src->esw); 1667 1668 for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) { 1669 dest->ecw[i] = cpu_to_be32(src->ecw[i]); 1670 } 1671 *irb_len = sizeof(*dest) - sizeof(dest->emw); 1672 1673 /* extended measurements enabled? */ 1674 if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) || 1675 !(pmcw->flags & PMCW_FLAGS_MASK_TF) || 1676 !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) { 1677 return; 1678 } 1679 /* extended measurements pending? */ 1680 if (!(stctl & SCSW_STCTL_STATUS_PEND)) { 1681 return; 1682 } 1683 if ((stctl & SCSW_STCTL_PRIMARY) || 1684 (stctl == SCSW_STCTL_SECONDARY) || 1685 ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) { 1686 for (i = 0; i < ARRAY_SIZE(dest->emw); i++) { 1687 dest->emw[i] = cpu_to_be32(src->emw[i]); 1688 } 1689 } 1690 *irb_len = sizeof(*dest); 1691 } 1692 1693 static void build_irb_sense_data(SubchDev *sch, IRB *irb) 1694 { 1695 int i; 1696 1697 /* Attention: sense_data is already BE! */ 1698 memcpy(irb->ecw, sch->sense_data, sizeof(sch->sense_data)); 1699 for (i = 0; i < ARRAY_SIZE(irb->ecw); i++) { 1700 irb->ecw[i] = be32_to_cpu(irb->ecw[i]); 1701 } 1702 } 1703 1704 void build_irb_passthrough(SubchDev *sch, IRB *irb) 1705 { 1706 /* Copy ESW from hardware */ 1707 irb->esw = sch->esw; 1708 1709 /* 1710 * If (irb->esw.erw & ESW_ERW_SENSE) is true, then the contents 1711 * of the ECW is sense data. If false, then it is model-dependent 1712 * information. Either way, copy it into the IRB for the guest to 1713 * read/decide what to do with. 1714 */ 1715 build_irb_sense_data(sch, irb); 1716 } 1717 1718 void build_irb_virtual(SubchDev *sch, IRB *irb) 1719 { 1720 SCHIB *schib = &sch->curr_status; 1721 uint16_t stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; 1722 1723 if (stctl & SCSW_STCTL_STATUS_PEND) { 1724 if (schib->scsw.cstat & (SCSW_CSTAT_DATA_CHECK | 1725 SCSW_CSTAT_CHN_CTRL_CHK | 1726 SCSW_CSTAT_INTF_CTRL_CHK)) { 1727 irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF; 1728 irb->esw.word0 = 0x04804000; 1729 } else { 1730 irb->esw.word0 = 0x00800000; 1731 } 1732 /* If a unit check is pending, copy sense data. */ 1733 if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) && 1734 (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) { 1735 irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; 1736 build_irb_sense_data(sch, irb); 1737 irb->esw.erw = ESW_ERW_SENSE | (sizeof(sch->sense_data) << 8); 1738 } 1739 } 1740 } 1741 1742 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) 1743 { 1744 SCHIB *schib = &sch->curr_status; 1745 PMCW p; 1746 uint16_t stctl; 1747 IRB irb; 1748 1749 if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { 1750 return 3; 1751 } 1752 1753 stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; 1754 1755 /* Prepare the irb for the guest. */ 1756 memset(&irb, 0, sizeof(IRB)); 1757 1758 /* Copy scsw from current status. */ 1759 irb.scsw = schib->scsw; 1760 1761 /* Build other IRB data, if necessary */ 1762 if (sch->irb_cb) { 1763 sch->irb_cb(sch, &irb); 1764 } 1765 1766 /* Store the irb to the guest. */ 1767 p = schib->pmcw; 1768 copy_irb_to_guest(target_irb, &irb, &p, irb_len); 1769 1770 return ((stctl & SCSW_STCTL_STATUS_PEND) == 0); 1771 } 1772 1773 void css_do_tsch_update_subch(SubchDev *sch) 1774 { 1775 SCHIB *schib = &sch->curr_status; 1776 uint16_t stctl; 1777 uint16_t fctl; 1778 uint16_t actl; 1779 1780 stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL; 1781 fctl = schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL; 1782 actl = schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL; 1783 1784 /* Clear conditions on subchannel, if applicable. */ 1785 if (stctl & SCSW_STCTL_STATUS_PEND) { 1786 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 1787 if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || 1788 ((fctl & SCSW_FCTL_HALT_FUNC) && 1789 (actl & SCSW_ACTL_SUSP))) { 1790 schib->scsw.ctrl &= ~SCSW_CTRL_MASK_FCTL; 1791 } 1792 if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { 1793 schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; 1794 schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND | 1795 SCSW_ACTL_START_PEND | 1796 SCSW_ACTL_HALT_PEND | 1797 SCSW_ACTL_CLEAR_PEND | 1798 SCSW_ACTL_SUSP); 1799 } else { 1800 if ((actl & SCSW_ACTL_SUSP) && 1801 (fctl & SCSW_FCTL_START_FUNC)) { 1802 schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO; 1803 if (fctl & SCSW_FCTL_HALT_FUNC) { 1804 schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND | 1805 SCSW_ACTL_START_PEND | 1806 SCSW_ACTL_HALT_PEND | 1807 SCSW_ACTL_CLEAR_PEND | 1808 SCSW_ACTL_SUSP); 1809 } else { 1810 schib->scsw.ctrl &= ~SCSW_ACTL_RESUME_PEND; 1811 } 1812 } 1813 } 1814 /* Clear pending sense data. */ 1815 if (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE) { 1816 memset(sch->sense_data, 0 , sizeof(sch->sense_data)); 1817 } 1818 } 1819 } 1820 1821 static void copy_crw_to_guest(CRW *dest, const CRW *src) 1822 { 1823 dest->flags = cpu_to_be16(src->flags); 1824 dest->rsid = cpu_to_be16(src->rsid); 1825 } 1826 1827 int css_do_stcrw(CRW *crw) 1828 { 1829 CrwContainer *crw_cont; 1830 int ret; 1831 1832 crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws); 1833 if (crw_cont) { 1834 QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); 1835 copy_crw_to_guest(crw, &crw_cont->crw); 1836 g_free(crw_cont); 1837 ret = 0; 1838 } else { 1839 /* List was empty, turn crw machine checks on again. */ 1840 memset(crw, 0, sizeof(*crw)); 1841 channel_subsys.do_crw_mchk = true; 1842 ret = 1; 1843 } 1844 1845 return ret; 1846 } 1847 1848 static void copy_crw_from_guest(CRW *dest, const CRW *src) 1849 { 1850 dest->flags = be16_to_cpu(src->flags); 1851 dest->rsid = be16_to_cpu(src->rsid); 1852 } 1853 1854 void css_undo_stcrw(CRW *crw) 1855 { 1856 CrwContainer *crw_cont; 1857 1858 crw_cont = g_try_new0(CrwContainer, 1); 1859 if (!crw_cont) { 1860 channel_subsys.crws_lost = true; 1861 return; 1862 } 1863 copy_crw_from_guest(&crw_cont->crw, crw); 1864 1865 QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling); 1866 } 1867 1868 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid, 1869 int rfmt, void *buf) 1870 { 1871 int i, desc_size; 1872 uint32_t words[8]; 1873 uint32_t chpid_type_word; 1874 CssImage *css; 1875 1876 if (!m && !cssid) { 1877 css = channel_subsys.css[channel_subsys.default_cssid]; 1878 } else { 1879 css = channel_subsys.css[cssid]; 1880 } 1881 if (!css) { 1882 return 0; 1883 } 1884 desc_size = 0; 1885 for (i = f_chpid; i <= l_chpid; i++) { 1886 if (css->chpids[i].in_use) { 1887 chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i; 1888 if (rfmt == 0) { 1889 words[0] = cpu_to_be32(chpid_type_word); 1890 words[1] = 0; 1891 memcpy(buf + desc_size, words, 8); 1892 desc_size += 8; 1893 } else if (rfmt == 1) { 1894 words[0] = cpu_to_be32(chpid_type_word); 1895 words[1] = 0; 1896 words[2] = 0; 1897 words[3] = 0; 1898 words[4] = 0; 1899 words[5] = 0; 1900 words[6] = 0; 1901 words[7] = 0; 1902 memcpy(buf + desc_size, words, 32); 1903 desc_size += 32; 1904 } 1905 } 1906 } 1907 return desc_size; 1908 } 1909 1910 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo) 1911 { 1912 /* dct is currently ignored (not really meaningful for our devices) */ 1913 /* TODO: Don't ignore mbk. */ 1914 if (update && !channel_subsys.chnmon_active) { 1915 /* Enable measuring. */ 1916 channel_subsys.chnmon_area = mbo; 1917 channel_subsys.chnmon_active = true; 1918 } 1919 if (!update && channel_subsys.chnmon_active) { 1920 /* Disable measuring. */ 1921 channel_subsys.chnmon_area = 0; 1922 channel_subsys.chnmon_active = false; 1923 } 1924 } 1925 1926 IOInstEnding css_do_rsch(SubchDev *sch) 1927 { 1928 SCHIB *schib = &sch->curr_status; 1929 1930 if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) { 1931 return IOINST_CC_NOT_OPERATIONAL; 1932 } 1933 1934 if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) { 1935 return IOINST_CC_STATUS_PRESENT; 1936 } 1937 1938 if (((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || 1939 (schib->scsw.ctrl & SCSW_ACTL_RESUME_PEND) || 1940 (!(schib->scsw.ctrl & SCSW_ACTL_SUSP))) { 1941 return IOINST_CC_BUSY; 1942 } 1943 1944 /* If monitoring is active, update counter. */ 1945 if (channel_subsys.chnmon_active) { 1946 css_update_chnmon(sch); 1947 } 1948 1949 schib->scsw.ctrl |= SCSW_ACTL_RESUME_PEND; 1950 return do_subchannel_work(sch); 1951 } 1952 1953 int css_do_rchp(uint8_t cssid, uint8_t chpid) 1954 { 1955 uint8_t real_cssid; 1956 1957 if (cssid > channel_subsys.max_cssid) { 1958 return -EINVAL; 1959 } 1960 if (channel_subsys.max_cssid == 0) { 1961 real_cssid = channel_subsys.default_cssid; 1962 } else { 1963 real_cssid = cssid; 1964 } 1965 if (!channel_subsys.css[real_cssid]) { 1966 return -EINVAL; 1967 } 1968 1969 if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) { 1970 return -ENODEV; 1971 } 1972 1973 if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) { 1974 fprintf(stderr, 1975 "rchp unsupported for non-virtual chpid %x.%02x!\n", 1976 real_cssid, chpid); 1977 return -ENODEV; 1978 } 1979 1980 /* We don't really use a channel path, so we're done here. */ 1981 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1, 1982 channel_subsys.max_cssid > 0 ? 1 : 0, chpid); 1983 if (channel_subsys.max_cssid > 0) { 1984 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1, 0, real_cssid << 8); 1985 } 1986 return 0; 1987 } 1988 1989 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid) 1990 { 1991 SubchSet *set; 1992 uint8_t real_cssid; 1993 1994 real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; 1995 if (ssid > MAX_SSID || 1996 !channel_subsys.css[real_cssid] || 1997 !channel_subsys.css[real_cssid]->sch_set[ssid]) { 1998 return true; 1999 } 2000 set = channel_subsys.css[real_cssid]->sch_set[ssid]; 2001 return schid > find_last_bit(set->schids_used, 2002 (MAX_SCHID + 1) / sizeof(unsigned long)); 2003 } 2004 2005 unsigned int css_find_free_chpid(uint8_t cssid) 2006 { 2007 CssImage *css = channel_subsys.css[cssid]; 2008 unsigned int chpid; 2009 2010 if (!css) { 2011 return MAX_CHPID + 1; 2012 } 2013 2014 for (chpid = 0; chpid <= MAX_CHPID; chpid++) { 2015 /* skip reserved chpid */ 2016 if (chpid == VIRTIO_CCW_CHPID) { 2017 continue; 2018 } 2019 if (!css->chpids[chpid].in_use) { 2020 return chpid; 2021 } 2022 } 2023 return MAX_CHPID + 1; 2024 } 2025 2026 static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type, 2027 bool is_virt) 2028 { 2029 CssImage *css; 2030 2031 trace_css_chpid_add(cssid, chpid, type); 2032 css = channel_subsys.css[cssid]; 2033 if (!css) { 2034 return -EINVAL; 2035 } 2036 if (css->chpids[chpid].in_use) { 2037 return -EEXIST; 2038 } 2039 css->chpids[chpid].in_use = 1; 2040 css->chpids[chpid].type = type; 2041 css->chpids[chpid].is_virtual = is_virt; 2042 2043 css_generate_chp_crws(cssid, chpid); 2044 2045 return 0; 2046 } 2047 2048 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type) 2049 { 2050 SCHIB *schib = &sch->curr_status; 2051 int i; 2052 CssImage *css = channel_subsys.css[sch->cssid]; 2053 2054 assert(css != NULL); 2055 memset(&schib->pmcw, 0, sizeof(PMCW)); 2056 schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; 2057 schib->pmcw.devno = sch->devno; 2058 /* single path */ 2059 schib->pmcw.pim = 0x80; 2060 schib->pmcw.pom = 0xff; 2061 schib->pmcw.pam = 0x80; 2062 schib->pmcw.chpid[0] = chpid; 2063 if (!css->chpids[chpid].in_use) { 2064 css_add_chpid(sch->cssid, chpid, type, true); 2065 } 2066 2067 memset(&schib->scsw, 0, sizeof(SCSW)); 2068 schib->mba = 0; 2069 for (i = 0; i < ARRAY_SIZE(schib->mda); i++) { 2070 schib->mda[i] = 0; 2071 } 2072 } 2073 2074 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid) 2075 { 2076 uint8_t real_cssid; 2077 2078 real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; 2079 2080 if (!channel_subsys.css[real_cssid]) { 2081 return NULL; 2082 } 2083 2084 if (!channel_subsys.css[real_cssid]->sch_set[ssid]) { 2085 return NULL; 2086 } 2087 2088 return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid]; 2089 } 2090 2091 /** 2092 * Return free device number in subchannel set. 2093 * 2094 * Return index of the first free device number in the subchannel set 2095 * identified by @p cssid and @p ssid, beginning the search at @p 2096 * start and wrapping around at MAX_DEVNO. Return a value exceeding 2097 * MAX_SCHID if there are no free device numbers in the subchannel 2098 * set. 2099 */ 2100 static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid, 2101 uint16_t start) 2102 { 2103 uint32_t round; 2104 2105 for (round = 0; round <= MAX_DEVNO; round++) { 2106 uint16_t devno = (start + round) % MAX_DEVNO; 2107 2108 if (!css_devno_used(cssid, ssid, devno)) { 2109 return devno; 2110 } 2111 } 2112 return MAX_DEVNO + 1; 2113 } 2114 2115 /** 2116 * Return first free subchannel (id) in subchannel set. 2117 * 2118 * Return index of the first free subchannel in the subchannel set 2119 * identified by @p cssid and @p ssid, if there is any. Return a value 2120 * exceeding MAX_SCHID if there are no free subchannels in the 2121 * subchannel set. 2122 */ 2123 static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid) 2124 { 2125 uint32_t schid; 2126 2127 for (schid = 0; schid <= MAX_SCHID; schid++) { 2128 if (!css_find_subch(1, cssid, ssid, schid)) { 2129 return schid; 2130 } 2131 } 2132 return MAX_SCHID + 1; 2133 } 2134 2135 /** 2136 * Return first free subchannel (id) in subchannel set for a device number 2137 * 2138 * Verify the device number @p devno is not used yet in the subchannel 2139 * set identified by @p cssid and @p ssid. Set @p schid to the index 2140 * of the first free subchannel in the subchannel set, if there is 2141 * any. Return true if everything succeeded and false otherwise. 2142 */ 2143 static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid, 2144 uint16_t devno, uint16_t *schid, 2145 Error **errp) 2146 { 2147 uint32_t free_schid; 2148 2149 assert(schid); 2150 if (css_devno_used(cssid, ssid, devno)) { 2151 error_setg(errp, "Device %x.%x.%04x already exists", 2152 cssid, ssid, devno); 2153 return false; 2154 } 2155 free_schid = css_find_free_subch(cssid, ssid); 2156 if (free_schid > MAX_SCHID) { 2157 error_setg(errp, "No free subchannel found for %x.%x.%04x", 2158 cssid, ssid, devno); 2159 return false; 2160 } 2161 *schid = free_schid; 2162 return true; 2163 } 2164 2165 /** 2166 * Return first free subchannel (id) and device number 2167 * 2168 * Locate the first free subchannel and first free device number in 2169 * any of the subchannel sets of the channel subsystem identified by 2170 * @p cssid. Return false if no free subchannel / device number could 2171 * be found. Otherwise set @p ssid, @p devno and @p schid to identify 2172 * the available subchannel and device number and return true. 2173 * 2174 * May modify @p ssid, @p devno and / or @p schid even if no free 2175 * subchannel / device number could be found. 2176 */ 2177 static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid, 2178 uint16_t *devno, uint16_t *schid, 2179 Error **errp) 2180 { 2181 uint32_t free_schid, free_devno; 2182 2183 assert(ssid && devno && schid); 2184 for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) { 2185 free_schid = css_find_free_subch(cssid, *ssid); 2186 if (free_schid > MAX_SCHID) { 2187 continue; 2188 } 2189 free_devno = css_find_free_devno(cssid, *ssid, free_schid); 2190 if (free_devno > MAX_DEVNO) { 2191 continue; 2192 } 2193 *schid = free_schid; 2194 *devno = free_devno; 2195 return true; 2196 } 2197 error_setg(errp, "Virtual channel subsystem is full!"); 2198 return false; 2199 } 2200 2201 bool css_subch_visible(SubchDev *sch) 2202 { 2203 if (sch->ssid > channel_subsys.max_ssid) { 2204 return false; 2205 } 2206 2207 if (sch->cssid != channel_subsys.default_cssid) { 2208 return (channel_subsys.max_cssid > 0); 2209 } 2210 2211 return true; 2212 } 2213 2214 bool css_present(uint8_t cssid) 2215 { 2216 return (channel_subsys.css[cssid] != NULL); 2217 } 2218 2219 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno) 2220 { 2221 if (!channel_subsys.css[cssid]) { 2222 return false; 2223 } 2224 if (!channel_subsys.css[cssid]->sch_set[ssid]) { 2225 return false; 2226 } 2227 2228 return !!test_bit(devno, 2229 channel_subsys.css[cssid]->sch_set[ssid]->devnos_used); 2230 } 2231 2232 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid, 2233 uint16_t devno, SubchDev *sch) 2234 { 2235 CssImage *css; 2236 SubchSet *s_set; 2237 2238 trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid, 2239 devno); 2240 if (!channel_subsys.css[cssid]) { 2241 fprintf(stderr, 2242 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n", 2243 __func__, cssid, ssid, schid); 2244 return; 2245 } 2246 css = channel_subsys.css[cssid]; 2247 2248 if (!css->sch_set[ssid]) { 2249 css->sch_set[ssid] = g_new0(SubchSet, 1); 2250 } 2251 s_set = css->sch_set[ssid]; 2252 2253 s_set->sch[schid] = sch; 2254 if (sch) { 2255 set_bit(schid, s_set->schids_used); 2256 set_bit(devno, s_set->devnos_used); 2257 } else { 2258 clear_bit(schid, s_set->schids_used); 2259 clear_bit(devno, s_set->devnos_used); 2260 } 2261 } 2262 2263 void css_crw_add_to_queue(CRW crw) 2264 { 2265 CrwContainer *crw_cont; 2266 2267 trace_css_crw((crw.flags & CRW_FLAGS_MASK_RSC) >> 8, 2268 crw.flags & CRW_FLAGS_MASK_ERC, 2269 crw.rsid, 2270 (crw.flags & CRW_FLAGS_MASK_C) ? "(chained)" : ""); 2271 2272 /* TODO: Maybe use a static crw pool? */ 2273 crw_cont = g_try_new0(CrwContainer, 1); 2274 if (!crw_cont) { 2275 channel_subsys.crws_lost = true; 2276 return; 2277 } 2278 2279 crw_cont->crw = crw; 2280 2281 QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling); 2282 2283 if (channel_subsys.do_crw_mchk) { 2284 channel_subsys.do_crw_mchk = false; 2285 /* Inject crw pending machine check. */ 2286 s390_crw_mchk(); 2287 } 2288 } 2289 2290 void css_queue_crw(uint8_t rsc, uint8_t erc, int solicited, 2291 int chain, uint16_t rsid) 2292 { 2293 CRW crw; 2294 2295 crw.flags = (rsc << 8) | erc; 2296 if (solicited) { 2297 crw.flags |= CRW_FLAGS_MASK_S; 2298 } 2299 if (chain) { 2300 crw.flags |= CRW_FLAGS_MASK_C; 2301 } 2302 crw.rsid = rsid; 2303 if (channel_subsys.crws_lost) { 2304 crw.flags |= CRW_FLAGS_MASK_R; 2305 channel_subsys.crws_lost = false; 2306 } 2307 2308 css_crw_add_to_queue(crw); 2309 } 2310 2311 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, 2312 int hotplugged, int add) 2313 { 2314 uint8_t guest_cssid; 2315 bool chain_crw; 2316 2317 if (add && !hotplugged) { 2318 return; 2319 } 2320 if (channel_subsys.max_cssid == 0) { 2321 /* Default cssid shows up as 0. */ 2322 guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid; 2323 } else { 2324 /* Show real cssid to the guest. */ 2325 guest_cssid = cssid; 2326 } 2327 /* 2328 * Only notify for higher subchannel sets/channel subsystems if the 2329 * guest has enabled it. 2330 */ 2331 if ((ssid > channel_subsys.max_ssid) || 2332 (guest_cssid > channel_subsys.max_cssid) || 2333 ((channel_subsys.max_cssid == 0) && 2334 (cssid != channel_subsys.default_cssid))) { 2335 return; 2336 } 2337 chain_crw = (channel_subsys.max_ssid > 0) || 2338 (channel_subsys.max_cssid > 0); 2339 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, chain_crw ? 1 : 0, schid); 2340 if (chain_crw) { 2341 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, 0, 2342 (guest_cssid << 8) | (ssid << 4)); 2343 } 2344 /* RW_ERC_IPI --> clear pending interrupts */ 2345 css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid); 2346 } 2347 2348 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid) 2349 { 2350 /* TODO */ 2351 } 2352 2353 void css_generate_css_crws(uint8_t cssid) 2354 { 2355 if (!channel_subsys.sei_pending) { 2356 css_queue_crw(CRW_RSC_CSS, CRW_ERC_EVENT, 0, 0, cssid); 2357 } 2358 channel_subsys.sei_pending = true; 2359 } 2360 2361 void css_clear_sei_pending(void) 2362 { 2363 channel_subsys.sei_pending = false; 2364 } 2365 2366 int css_enable_mcsse(void) 2367 { 2368 trace_css_enable_facility("mcsse"); 2369 channel_subsys.max_cssid = MAX_CSSID; 2370 return 0; 2371 } 2372 2373 int css_enable_mss(void) 2374 { 2375 trace_css_enable_facility("mss"); 2376 channel_subsys.max_ssid = MAX_SSID; 2377 return 0; 2378 } 2379 2380 void css_reset_sch(SubchDev *sch) 2381 { 2382 SCHIB *schib = &sch->curr_status; 2383 2384 if ((schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) { 2385 sch->disable_cb(sch); 2386 } 2387 2388 schib->pmcw.intparm = 0; 2389 schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 2390 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 2391 PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF); 2392 schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; 2393 schib->pmcw.devno = sch->devno; 2394 schib->pmcw.pim = 0x80; 2395 schib->pmcw.lpm = schib->pmcw.pim; 2396 schib->pmcw.pnom = 0; 2397 schib->pmcw.lpum = 0; 2398 schib->pmcw.mbi = 0; 2399 schib->pmcw.pom = 0xff; 2400 schib->pmcw.pam = 0x80; 2401 schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME | 2402 PMCW_CHARS_MASK_CSENSE); 2403 2404 memset(&schib->scsw, 0, sizeof(schib->scsw)); 2405 schib->mba = 0; 2406 2407 sch->channel_prog = 0x0; 2408 sch->last_cmd_valid = false; 2409 sch->thinint_active = false; 2410 } 2411 2412 void css_reset(void) 2413 { 2414 CrwContainer *crw_cont; 2415 2416 /* Clean up monitoring. */ 2417 channel_subsys.chnmon_active = false; 2418 channel_subsys.chnmon_area = 0; 2419 2420 /* Clear pending CRWs. */ 2421 while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) { 2422 QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); 2423 g_free(crw_cont); 2424 } 2425 channel_subsys.sei_pending = false; 2426 channel_subsys.do_crw_mchk = true; 2427 channel_subsys.crws_lost = false; 2428 2429 /* Reset maximum ids. */ 2430 channel_subsys.max_cssid = 0; 2431 channel_subsys.max_ssid = 0; 2432 } 2433 2434 static void get_css_devid(Object *obj, Visitor *v, const char *name, 2435 void *opaque, Error **errp) 2436 { 2437 const Property *prop = opaque; 2438 CssDevId *dev_id = object_field_prop_ptr(obj, prop); 2439 char buffer[] = "xx.x.xxxx"; 2440 char *p = buffer; 2441 int r; 2442 2443 if (dev_id->valid) { 2444 2445 r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid, 2446 dev_id->ssid, dev_id->devid); 2447 assert(r == sizeof(buffer) - 1); 2448 2449 /* drop leading zero */ 2450 if (dev_id->cssid <= 0xf) { 2451 p++; 2452 } 2453 } else { 2454 snprintf(buffer, sizeof(buffer), "<unset>"); 2455 } 2456 2457 visit_type_str(v, name, &p, errp); 2458 } 2459 2460 /* 2461 * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid 2462 */ 2463 static void set_css_devid(Object *obj, Visitor *v, const char *name, 2464 void *opaque, Error **errp) 2465 { 2466 const Property *prop = opaque; 2467 CssDevId *dev_id = object_field_prop_ptr(obj, prop); 2468 char *str; 2469 int num, n1, n2; 2470 unsigned int cssid, ssid, devid; 2471 2472 if (!visit_type_str(v, name, &str, errp)) { 2473 return; 2474 } 2475 2476 num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2); 2477 if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) { 2478 error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str); 2479 goto out; 2480 } 2481 if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) { 2482 error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x", 2483 cssid, ssid); 2484 goto out; 2485 } 2486 2487 dev_id->cssid = cssid; 2488 dev_id->ssid = ssid; 2489 dev_id->devid = devid; 2490 dev_id->valid = true; 2491 2492 out: 2493 g_free(str); 2494 } 2495 2496 const PropertyInfo css_devid_propinfo = { 2497 .type = "str", 2498 .description = "Identifier of an I/O device in the channel " 2499 "subsystem, example: fe.1.23ab", 2500 .get = get_css_devid, 2501 .set = set_css_devid, 2502 }; 2503 2504 const PropertyInfo css_devid_ro_propinfo = { 2505 .type = "str", 2506 .description = "Read-only identifier of an I/O device in the channel " 2507 "subsystem, example: fe.1.23ab", 2508 .get = get_css_devid, 2509 }; 2510 2511 SubchDev *css_create_sch(CssDevId bus_id, Error **errp) 2512 { 2513 uint16_t schid = 0; 2514 SubchDev *sch; 2515 2516 if (bus_id.valid) { 2517 if (!channel_subsys.css[bus_id.cssid]) { 2518 css_create_css_image(bus_id.cssid, false); 2519 } 2520 2521 if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid, 2522 bus_id.devid, &schid, errp)) { 2523 return NULL; 2524 } 2525 } else { 2526 for (bus_id.cssid = channel_subsys.default_cssid;;) { 2527 if (!channel_subsys.css[bus_id.cssid]) { 2528 css_create_css_image(bus_id.cssid, false); 2529 } 2530 2531 if (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid, 2532 &bus_id.devid, &schid, 2533 NULL)) { 2534 break; 2535 } 2536 bus_id.cssid = (bus_id.cssid + 1) % MAX_CSSID; 2537 if (bus_id.cssid == channel_subsys.default_cssid) { 2538 error_setg(errp, "Virtual channel subsystem is full!"); 2539 return NULL; 2540 } 2541 } 2542 } 2543 2544 sch = g_new0(SubchDev, 1); 2545 sch->cssid = bus_id.cssid; 2546 sch->ssid = bus_id.ssid; 2547 sch->devno = bus_id.devid; 2548 sch->schid = schid; 2549 css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch); 2550 return sch; 2551 } 2552 2553 static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id) 2554 { 2555 char *fid_path; 2556 FILE *fd; 2557 uint32_t chpid[8]; 2558 int i; 2559 SCHIB *schib = &sch->curr_status; 2560 2561 fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids", 2562 dev_id->cssid, dev_id->ssid, dev_id->devid); 2563 fd = fopen(fid_path, "r"); 2564 if (fd == NULL) { 2565 error_report("%s: open %s failed", __func__, fid_path); 2566 g_free(fid_path); 2567 return -EINVAL; 2568 } 2569 2570 if (fscanf(fd, "%x %x %x %x %x %x %x %x", 2571 &chpid[0], &chpid[1], &chpid[2], &chpid[3], 2572 &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) { 2573 fclose(fd); 2574 g_free(fid_path); 2575 return -EINVAL; 2576 } 2577 2578 for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) { 2579 schib->pmcw.chpid[i] = chpid[i]; 2580 } 2581 2582 fclose(fd); 2583 g_free(fid_path); 2584 2585 return 0; 2586 } 2587 2588 static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id) 2589 { 2590 char *fid_path; 2591 FILE *fd; 2592 uint32_t pim, pam, pom; 2593 SCHIB *schib = &sch->curr_status; 2594 2595 fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom", 2596 dev_id->cssid, dev_id->ssid, dev_id->devid); 2597 fd = fopen(fid_path, "r"); 2598 if (fd == NULL) { 2599 error_report("%s: open %s failed", __func__, fid_path); 2600 g_free(fid_path); 2601 return -EINVAL; 2602 } 2603 2604 if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) { 2605 fclose(fd); 2606 g_free(fid_path); 2607 return -EINVAL; 2608 } 2609 2610 schib->pmcw.pim = pim; 2611 schib->pmcw.pam = pam; 2612 schib->pmcw.pom = pom; 2613 fclose(fd); 2614 g_free(fid_path); 2615 2616 return 0; 2617 } 2618 2619 static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type, 2620 CssDevId *dev_id) 2621 { 2622 char *fid_path; 2623 FILE *fd; 2624 2625 fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type", 2626 dev_id->cssid, chpid); 2627 fd = fopen(fid_path, "r"); 2628 if (fd == NULL) { 2629 error_report("%s: open %s failed", __func__, fid_path); 2630 g_free(fid_path); 2631 return -EINVAL; 2632 } 2633 2634 if (fscanf(fd, "%x", type) != 1) { 2635 fclose(fd); 2636 g_free(fid_path); 2637 return -EINVAL; 2638 } 2639 2640 fclose(fd); 2641 g_free(fid_path); 2642 2643 return 0; 2644 } 2645 2646 /* 2647 * We currently retrieve the real device information from sysfs to build the 2648 * guest subchannel information block without considering the migration feature. 2649 * We need to revisit this problem when we want to add migration support. 2650 */ 2651 int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id) 2652 { 2653 CssImage *css = channel_subsys.css[sch->cssid]; 2654 SCHIB *schib = &sch->curr_status; 2655 uint32_t type; 2656 int i, ret; 2657 2658 assert(css != NULL); 2659 memset(&schib->pmcw, 0, sizeof(PMCW)); 2660 schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV; 2661 /* We are dealing with I/O subchannels only. */ 2662 schib->pmcw.devno = sch->devno; 2663 2664 /* Grab path mask from sysfs. */ 2665 ret = css_sch_get_path_masks(sch, dev_id); 2666 if (ret) { 2667 return ret; 2668 } 2669 2670 /* Grab chpids from sysfs. */ 2671 ret = css_sch_get_chpids(sch, dev_id); 2672 if (ret) { 2673 return ret; 2674 } 2675 2676 /* Build chpid type. */ 2677 for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) { 2678 if (schib->pmcw.chpid[i] && !css->chpids[schib->pmcw.chpid[i]].in_use) { 2679 ret = css_sch_get_chpid_type(schib->pmcw.chpid[i], &type, dev_id); 2680 if (ret) { 2681 return ret; 2682 } 2683 css_add_chpid(sch->cssid, schib->pmcw.chpid[i], type, false); 2684 } 2685 } 2686 2687 memset(&schib->scsw, 0, sizeof(SCSW)); 2688 schib->mba = 0; 2689 for (i = 0; i < ARRAY_SIZE(schib->mda); i++) { 2690 schib->mda[i] = 0; 2691 } 2692 2693 return 0; 2694 } 2695