1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include <linux/kvm.h> 25 #include <asm/ptrace.h> 26 27 #include "qemu-common.h" 28 #include "cpu.h" 29 #include "internal.h" 30 #include "kvm_s390x.h" 31 #include "sysemu/kvm_int.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "qemu/timer.h" 35 #include "qemu/units.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/mmap-alloc.h" 38 #include "qemu/log.h" 39 #include "sysemu/sysemu.h" 40 #include "sysemu/hw_accel.h" 41 #include "sysemu/runstate.h" 42 #include "sysemu/device_tree.h" 43 #include "exec/gdbstub.h" 44 #include "exec/ram_addr.h" 45 #include "trace.h" 46 #include "hw/s390x/s390-pci-inst.h" 47 #include "hw/s390x/s390-pci-bus.h" 48 #include "hw/s390x/ipl.h" 49 #include "hw/s390x/ebcdic.h" 50 #include "exec/memattrs.h" 51 #include "hw/s390x/s390-virtio-ccw.h" 52 #include "hw/s390x/s390-virtio-hcall.h" 53 #include "hw/s390x/pv.h" 54 55 #ifndef DEBUG_KVM 56 #define DEBUG_KVM 0 57 #endif 58 59 #define DPRINTF(fmt, ...) do { \ 60 if (DEBUG_KVM) { \ 61 fprintf(stderr, fmt, ## __VA_ARGS__); \ 62 } \ 63 } while (0) 64 65 #define kvm_vm_check_mem_attr(s, attr) \ 66 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 67 68 #define IPA0_DIAG 0x8300 69 #define IPA0_SIGP 0xae00 70 #define IPA0_B2 0xb200 71 #define IPA0_B9 0xb900 72 #define IPA0_EB 0xeb00 73 #define IPA0_E3 0xe300 74 75 #define PRIV_B2_SCLP_CALL 0x20 76 #define PRIV_B2_CSCH 0x30 77 #define PRIV_B2_HSCH 0x31 78 #define PRIV_B2_MSCH 0x32 79 #define PRIV_B2_SSCH 0x33 80 #define PRIV_B2_STSCH 0x34 81 #define PRIV_B2_TSCH 0x35 82 #define PRIV_B2_TPI 0x36 83 #define PRIV_B2_SAL 0x37 84 #define PRIV_B2_RSCH 0x38 85 #define PRIV_B2_STCRW 0x39 86 #define PRIV_B2_STCPS 0x3a 87 #define PRIV_B2_RCHP 0x3b 88 #define PRIV_B2_SCHM 0x3c 89 #define PRIV_B2_CHSC 0x5f 90 #define PRIV_B2_SIGA 0x74 91 #define PRIV_B2_XSCH 0x76 92 93 #define PRIV_EB_SQBS 0x8a 94 #define PRIV_EB_PCISTB 0xd0 95 #define PRIV_EB_SIC 0xd1 96 97 #define PRIV_B9_EQBS 0x9c 98 #define PRIV_B9_CLP 0xa0 99 #define PRIV_B9_PCISTG 0xd0 100 #define PRIV_B9_PCILG 0xd2 101 #define PRIV_B9_RPCIT 0xd3 102 103 #define PRIV_E3_MPCIFC 0xd0 104 #define PRIV_E3_STPCIFC 0xd4 105 106 #define DIAG_TIMEREVENT 0x288 107 #define DIAG_IPL 0x308 108 #define DIAG_KVM_HYPERCALL 0x500 109 #define DIAG_KVM_BREAKPOINT 0x501 110 111 #define ICPT_INSTRUCTION 0x04 112 #define ICPT_PROGRAM 0x08 113 #define ICPT_EXT_INT 0x14 114 #define ICPT_WAITPSW 0x1c 115 #define ICPT_SOFT_INTERCEPT 0x24 116 #define ICPT_CPU_STOP 0x28 117 #define ICPT_OPEREXC 0x2c 118 #define ICPT_IO 0x40 119 #define ICPT_PV_INSTR 0x68 120 #define ICPT_PV_INSTR_NOTIFICATION 0x6c 121 122 #define NR_LOCAL_IRQS 32 123 /* 124 * Needs to be big enough to contain max_cpus emergency signals 125 * and in addition NR_LOCAL_IRQS interrupts 126 */ 127 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ 128 (max_cpus + NR_LOCAL_IRQS)) 129 /* 130 * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages 131 * as the dirty bitmap must be managed by bitops that take an int as 132 * position indicator. This would end at an unaligned address 133 * (0x7fffff00000). As future variants might provide larger pages 134 * and to make all addresses properly aligned, let us split at 4TB. 135 */ 136 #define KVM_SLOT_MAX_BYTES (4UL * TiB) 137 138 static CPUWatchpoint hw_watchpoint; 139 /* 140 * We don't use a list because this structure is also used to transmit the 141 * hardware breakpoints to the kernel. 142 */ 143 static struct kvm_hw_breakpoint *hw_breakpoints; 144 static int nb_hw_breakpoints; 145 146 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 147 KVM_CAP_LAST_INFO 148 }; 149 150 static int cap_sync_regs; 151 static int cap_async_pf; 152 static int cap_mem_op; 153 static int cap_s390_irq; 154 static int cap_ri; 155 static int cap_gs; 156 static int cap_hpage_1m; 157 static int cap_vcpu_resets; 158 static int cap_protected; 159 160 static int active_cmma; 161 162 static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared); 163 164 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 165 { 166 struct kvm_device_attr attr = { 167 .group = KVM_S390_VM_MEM_CTRL, 168 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 169 .addr = (uint64_t) memory_limit, 170 }; 171 172 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 173 } 174 175 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 176 { 177 int rc; 178 179 struct kvm_device_attr attr = { 180 .group = KVM_S390_VM_MEM_CTRL, 181 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 182 .addr = (uint64_t) &new_limit, 183 }; 184 185 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 186 return 0; 187 } 188 189 rc = kvm_s390_query_mem_limit(hw_limit); 190 if (rc) { 191 return rc; 192 } else if (*hw_limit < new_limit) { 193 return -E2BIG; 194 } 195 196 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 197 } 198 199 int kvm_s390_cmma_active(void) 200 { 201 return active_cmma; 202 } 203 204 static bool kvm_s390_cmma_available(void) 205 { 206 static bool initialized, value; 207 208 if (!initialized) { 209 initialized = true; 210 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 211 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 212 } 213 return value; 214 } 215 216 void kvm_s390_cmma_reset(void) 217 { 218 int rc; 219 struct kvm_device_attr attr = { 220 .group = KVM_S390_VM_MEM_CTRL, 221 .attr = KVM_S390_VM_MEM_CLR_CMMA, 222 }; 223 224 if (!kvm_s390_cmma_active()) { 225 return; 226 } 227 228 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 229 trace_kvm_clear_cmma(rc); 230 } 231 232 static void kvm_s390_enable_cmma(void) 233 { 234 int rc; 235 struct kvm_device_attr attr = { 236 .group = KVM_S390_VM_MEM_CTRL, 237 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 238 }; 239 240 if (cap_hpage_1m) { 241 warn_report("CMM will not be enabled because it is not " 242 "compatible with huge memory backings."); 243 return; 244 } 245 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 246 active_cmma = !rc; 247 trace_kvm_enable_cmma(rc); 248 } 249 250 static void kvm_s390_set_attr(uint64_t attr) 251 { 252 struct kvm_device_attr attribute = { 253 .group = KVM_S390_VM_CRYPTO, 254 .attr = attr, 255 }; 256 257 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 258 259 if (ret) { 260 error_report("Failed to set crypto device attribute %lu: %s", 261 attr, strerror(-ret)); 262 } 263 } 264 265 static void kvm_s390_init_aes_kw(void) 266 { 267 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 268 269 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 270 NULL)) { 271 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 272 } 273 274 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 275 kvm_s390_set_attr(attr); 276 } 277 } 278 279 static void kvm_s390_init_dea_kw(void) 280 { 281 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 282 283 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 284 NULL)) { 285 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 286 } 287 288 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 289 kvm_s390_set_attr(attr); 290 } 291 } 292 293 void kvm_s390_crypto_reset(void) 294 { 295 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 296 kvm_s390_init_aes_kw(); 297 kvm_s390_init_dea_kw(); 298 } 299 } 300 301 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) 302 { 303 if (pagesize == 4 * KiB) { 304 return; 305 } 306 307 if (!hpage_1m_allowed()) { 308 error_setg(errp, "This QEMU machine does not support huge page " 309 "mappings"); 310 return; 311 } 312 313 if (pagesize != 1 * MiB) { 314 error_setg(errp, "Memory backing with 2G pages was specified, " 315 "but KVM does not support this memory backing"); 316 return; 317 } 318 319 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { 320 error_setg(errp, "Memory backing with 1M pages was specified, " 321 "but KVM does not support this memory backing"); 322 return; 323 } 324 325 cap_hpage_1m = 1; 326 } 327 328 int kvm_s390_get_hpage_1m(void) 329 { 330 return cap_hpage_1m; 331 } 332 333 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) 334 { 335 MachineClass *mc = MACHINE_CLASS(oc); 336 337 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 338 } 339 340 int kvm_arch_init(MachineState *ms, KVMState *s) 341 { 342 object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, 343 false, NULL); 344 345 if (!kvm_check_extension(kvm_state, KVM_CAP_DEVICE_CTRL)) { 346 error_report("KVM is missing capability KVM_CAP_DEVICE_CTRL - " 347 "please use kernel 3.15 or newer"); 348 return -1; 349 } 350 351 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); 352 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 353 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 354 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 355 cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); 356 cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); 357 358 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) 359 || !kvm_check_extension(s, KVM_CAP_S390_COW)) { 360 phys_mem_set_alloc(legacy_s390_alloc); 361 } 362 363 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 364 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 365 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 366 if (ri_allowed()) { 367 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 368 cap_ri = 1; 369 } 370 } 371 if (cpu_model_allowed()) { 372 if (kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0) == 0) { 373 cap_gs = 1; 374 } 375 } 376 377 /* 378 * The migration interface for ais was introduced with kernel 4.13 379 * but the capability itself had been active since 4.12. As migration 380 * support is considered necessary, we only try to enable this for 381 * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. 382 */ 383 if (cpu_model_allowed() && kvm_kernel_irqchip_allowed() && 384 kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { 385 kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); 386 } 387 388 kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); 389 return 0; 390 } 391 392 int kvm_arch_irqchip_create(KVMState *s) 393 { 394 return 0; 395 } 396 397 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 398 { 399 return cpu->cpu_index; 400 } 401 402 int kvm_arch_init_vcpu(CPUState *cs) 403 { 404 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 405 S390CPU *cpu = S390_CPU(cs); 406 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 407 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); 408 return 0; 409 } 410 411 int kvm_arch_destroy_vcpu(CPUState *cs) 412 { 413 S390CPU *cpu = S390_CPU(cs); 414 415 g_free(cpu->irqstate); 416 cpu->irqstate = NULL; 417 418 return 0; 419 } 420 421 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) 422 { 423 CPUState *cs = CPU(cpu); 424 425 /* 426 * The reset call is needed here to reset in-kernel vcpu data that 427 * we can't access directly from QEMU (i.e. with older kernels 428 * which don't support sync_regs/ONE_REG). Before this ioctl 429 * cpu_synchronize_state() is called in common kvm code 430 * (kvm-all). 431 */ 432 if (kvm_vcpu_ioctl(cs, type)) { 433 error_report("CPU reset failed on CPU %i type %lx", 434 cs->cpu_index, type); 435 } 436 } 437 438 void kvm_s390_reset_vcpu_initial(S390CPU *cpu) 439 { 440 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 441 } 442 443 void kvm_s390_reset_vcpu_clear(S390CPU *cpu) 444 { 445 if (cap_vcpu_resets) { 446 kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); 447 } else { 448 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 449 } 450 } 451 452 void kvm_s390_reset_vcpu_normal(S390CPU *cpu) 453 { 454 if (cap_vcpu_resets) { 455 kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); 456 } 457 } 458 459 static int can_sync_regs(CPUState *cs, int regs) 460 { 461 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; 462 } 463 464 int kvm_arch_put_registers(CPUState *cs, int level) 465 { 466 S390CPU *cpu = S390_CPU(cs); 467 CPUS390XState *env = &cpu->env; 468 struct kvm_sregs sregs; 469 struct kvm_regs regs; 470 struct kvm_fpu fpu = {}; 471 int r; 472 int i; 473 474 /* always save the PSW and the GPRS*/ 475 cs->kvm_run->psw_addr = env->psw.addr; 476 cs->kvm_run->psw_mask = env->psw.mask; 477 478 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 479 for (i = 0; i < 16; i++) { 480 cs->kvm_run->s.regs.gprs[i] = env->regs[i]; 481 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 482 } 483 } else { 484 for (i = 0; i < 16; i++) { 485 regs.gprs[i] = env->regs[i]; 486 } 487 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 488 if (r < 0) { 489 return r; 490 } 491 } 492 493 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 494 for (i = 0; i < 32; i++) { 495 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; 496 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; 497 } 498 cs->kvm_run->s.regs.fpc = env->fpc; 499 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 500 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 501 for (i = 0; i < 16; i++) { 502 cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); 503 } 504 cs->kvm_run->s.regs.fpc = env->fpc; 505 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 506 } else { 507 /* Floating point */ 508 for (i = 0; i < 16; i++) { 509 fpu.fprs[i] = *get_freg(env, i); 510 } 511 fpu.fpc = env->fpc; 512 513 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 514 if (r < 0) { 515 return r; 516 } 517 } 518 519 /* Do we need to save more than that? */ 520 if (level == KVM_PUT_RUNTIME_STATE) { 521 return 0; 522 } 523 524 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 525 cs->kvm_run->s.regs.cputm = env->cputm; 526 cs->kvm_run->s.regs.ckc = env->ckc; 527 cs->kvm_run->s.regs.todpr = env->todpr; 528 cs->kvm_run->s.regs.gbea = env->gbea; 529 cs->kvm_run->s.regs.pp = env->pp; 530 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 531 } else { 532 /* 533 * These ONE_REGS are not protected by a capability. As they are only 534 * necessary for migration we just trace a possible error, but don't 535 * return with an error return code. 536 */ 537 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 538 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 539 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 540 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 541 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 542 } 543 544 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 545 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 546 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 547 } 548 549 /* pfault parameters */ 550 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 551 cs->kvm_run->s.regs.pft = env->pfault_token; 552 cs->kvm_run->s.regs.pfs = env->pfault_select; 553 cs->kvm_run->s.regs.pfc = env->pfault_compare; 554 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 555 } else if (cap_async_pf) { 556 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 557 if (r < 0) { 558 return r; 559 } 560 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 561 if (r < 0) { 562 return r; 563 } 564 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 565 if (r < 0) { 566 return r; 567 } 568 } 569 570 /* access registers and control registers*/ 571 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 572 for (i = 0; i < 16; i++) { 573 cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; 574 cs->kvm_run->s.regs.crs[i] = env->cregs[i]; 575 } 576 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; 577 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; 578 } else { 579 for (i = 0; i < 16; i++) { 580 sregs.acrs[i] = env->aregs[i]; 581 sregs.crs[i] = env->cregs[i]; 582 } 583 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 584 if (r < 0) { 585 return r; 586 } 587 } 588 589 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 590 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 591 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 592 } 593 594 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 595 cs->kvm_run->s.regs.bpbc = env->bpbc; 596 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; 597 } 598 599 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 600 cs->kvm_run->s.regs.etoken = env->etoken; 601 cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; 602 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; 603 } 604 605 /* Finally the prefix */ 606 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 607 cs->kvm_run->s.regs.prefix = env->psa; 608 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; 609 } else { 610 /* prefix is only supported via sync regs */ 611 } 612 return 0; 613 } 614 615 int kvm_arch_get_registers(CPUState *cs) 616 { 617 S390CPU *cpu = S390_CPU(cs); 618 CPUS390XState *env = &cpu->env; 619 struct kvm_sregs sregs; 620 struct kvm_regs regs; 621 struct kvm_fpu fpu; 622 int i, r; 623 624 /* get the PSW */ 625 env->psw.addr = cs->kvm_run->psw_addr; 626 env->psw.mask = cs->kvm_run->psw_mask; 627 628 /* the GPRS */ 629 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 630 for (i = 0; i < 16; i++) { 631 env->regs[i] = cs->kvm_run->s.regs.gprs[i]; 632 } 633 } else { 634 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 635 if (r < 0) { 636 return r; 637 } 638 for (i = 0; i < 16; i++) { 639 env->regs[i] = regs.gprs[i]; 640 } 641 } 642 643 /* The ACRS and CRS */ 644 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 645 for (i = 0; i < 16; i++) { 646 env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; 647 env->cregs[i] = cs->kvm_run->s.regs.crs[i]; 648 } 649 } else { 650 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 651 if (r < 0) { 652 return r; 653 } 654 for (i = 0; i < 16; i++) { 655 env->aregs[i] = sregs.acrs[i]; 656 env->cregs[i] = sregs.crs[i]; 657 } 658 } 659 660 /* Floating point and vector registers */ 661 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 662 for (i = 0; i < 32; i++) { 663 env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; 664 env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; 665 } 666 env->fpc = cs->kvm_run->s.regs.fpc; 667 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 668 for (i = 0; i < 16; i++) { 669 *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; 670 } 671 env->fpc = cs->kvm_run->s.regs.fpc; 672 } else { 673 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 674 if (r < 0) { 675 return r; 676 } 677 for (i = 0; i < 16; i++) { 678 *get_freg(env, i) = fpu.fprs[i]; 679 } 680 env->fpc = fpu.fpc; 681 } 682 683 /* The prefix */ 684 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 685 env->psa = cs->kvm_run->s.regs.prefix; 686 } 687 688 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 689 env->cputm = cs->kvm_run->s.regs.cputm; 690 env->ckc = cs->kvm_run->s.regs.ckc; 691 env->todpr = cs->kvm_run->s.regs.todpr; 692 env->gbea = cs->kvm_run->s.regs.gbea; 693 env->pp = cs->kvm_run->s.regs.pp; 694 } else { 695 /* 696 * These ONE_REGS are not protected by a capability. As they are only 697 * necessary for migration we just trace a possible error, but don't 698 * return with an error return code. 699 */ 700 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 701 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 702 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 703 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 704 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 705 } 706 707 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 708 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 709 } 710 711 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 712 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 713 } 714 715 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 716 env->bpbc = cs->kvm_run->s.regs.bpbc; 717 } 718 719 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 720 env->etoken = cs->kvm_run->s.regs.etoken; 721 env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; 722 } 723 724 /* pfault parameters */ 725 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 726 env->pfault_token = cs->kvm_run->s.regs.pft; 727 env->pfault_select = cs->kvm_run->s.regs.pfs; 728 env->pfault_compare = cs->kvm_run->s.regs.pfc; 729 } else if (cap_async_pf) { 730 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 731 if (r < 0) { 732 return r; 733 } 734 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 735 if (r < 0) { 736 return r; 737 } 738 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 739 if (r < 0) { 740 return r; 741 } 742 } 743 744 return 0; 745 } 746 747 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 748 { 749 int r; 750 struct kvm_device_attr attr = { 751 .group = KVM_S390_VM_TOD, 752 .attr = KVM_S390_VM_TOD_LOW, 753 .addr = (uint64_t)tod_low, 754 }; 755 756 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 757 if (r) { 758 return r; 759 } 760 761 attr.attr = KVM_S390_VM_TOD_HIGH; 762 attr.addr = (uint64_t)tod_high; 763 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 764 } 765 766 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 767 { 768 int r; 769 struct kvm_s390_vm_tod_clock gtod; 770 struct kvm_device_attr attr = { 771 .group = KVM_S390_VM_TOD, 772 .attr = KVM_S390_VM_TOD_EXT, 773 .addr = (uint64_t)>od, 774 }; 775 776 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 777 *tod_high = gtod.epoch_idx; 778 *tod_low = gtod.tod; 779 780 return r; 781 } 782 783 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) 784 { 785 int r; 786 struct kvm_device_attr attr = { 787 .group = KVM_S390_VM_TOD, 788 .attr = KVM_S390_VM_TOD_LOW, 789 .addr = (uint64_t)&tod_low, 790 }; 791 792 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 793 if (r) { 794 return r; 795 } 796 797 attr.attr = KVM_S390_VM_TOD_HIGH; 798 attr.addr = (uint64_t)&tod_high; 799 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 800 } 801 802 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) 803 { 804 struct kvm_s390_vm_tod_clock gtod = { 805 .epoch_idx = tod_high, 806 .tod = tod_low, 807 }; 808 struct kvm_device_attr attr = { 809 .group = KVM_S390_VM_TOD, 810 .attr = KVM_S390_VM_TOD_EXT, 811 .addr = (uint64_t)>od, 812 }; 813 814 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 815 } 816 817 /** 818 * kvm_s390_mem_op: 819 * @addr: the logical start address in guest memory 820 * @ar: the access register number 821 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 822 * @len: length that should be transferred 823 * @is_write: true = write, false = read 824 * Returns: 0 on success, non-zero if an exception or error occurred 825 * 826 * Use KVM ioctl to read/write from/to guest memory. An access exception 827 * is injected into the vCPU in case of translation errors. 828 */ 829 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 830 int len, bool is_write) 831 { 832 struct kvm_s390_mem_op mem_op = { 833 .gaddr = addr, 834 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 835 .size = len, 836 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 837 : KVM_S390_MEMOP_LOGICAL_READ, 838 .buf = (uint64_t)hostbuf, 839 .ar = ar, 840 }; 841 int ret; 842 843 if (!cap_mem_op) { 844 return -ENOSYS; 845 } 846 if (!hostbuf) { 847 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 848 } 849 850 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 851 if (ret < 0) { 852 warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 853 } 854 return ret; 855 } 856 857 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, 858 int len, bool is_write) 859 { 860 struct kvm_s390_mem_op mem_op = { 861 .sida_offset = offset, 862 .size = len, 863 .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE 864 : KVM_S390_MEMOP_SIDA_READ, 865 .buf = (uint64_t)hostbuf, 866 }; 867 int ret; 868 869 if (!cap_mem_op || !cap_protected) { 870 return -ENOSYS; 871 } 872 873 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 874 if (ret < 0) { 875 error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 876 abort(); 877 } 878 return ret; 879 } 880 881 /* 882 * Legacy layout for s390: 883 * Older S390 KVM requires the topmost vma of the RAM to be 884 * smaller than an system defined value, which is at least 256GB. 885 * Larger systems have larger values. We put the guest between 886 * the end of data segment (system break) and this value. We 887 * use 32GB as a base to have enough room for the system break 888 * to grow. We also have to use MAP parameters that avoid 889 * read-only mapping of guest pages. 890 */ 891 static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared) 892 { 893 static void *mem; 894 895 if (mem) { 896 /* we only support one allocation, which is enough for initial ram */ 897 return NULL; 898 } 899 900 mem = mmap((void *) 0x800000000ULL, size, 901 PROT_EXEC|PROT_READ|PROT_WRITE, 902 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 903 if (mem == MAP_FAILED) { 904 mem = NULL; 905 } 906 if (mem && align) { 907 *align = QEMU_VMALLOC_ALIGN; 908 } 909 return mem; 910 } 911 912 static uint8_t const *sw_bp_inst; 913 static uint8_t sw_bp_ilen; 914 915 static void determine_sw_breakpoint_instr(void) 916 { 917 /* DIAG 501 is used for sw breakpoints with old kernels */ 918 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 919 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 920 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 921 922 if (sw_bp_inst) { 923 return; 924 } 925 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 926 sw_bp_inst = diag_501; 927 sw_bp_ilen = sizeof(diag_501); 928 DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); 929 } else { 930 sw_bp_inst = instr_0x0000; 931 sw_bp_ilen = sizeof(instr_0x0000); 932 DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); 933 } 934 } 935 936 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 937 { 938 determine_sw_breakpoint_instr(); 939 940 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 941 sw_bp_ilen, 0) || 942 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 943 return -EINVAL; 944 } 945 return 0; 946 } 947 948 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 949 { 950 uint8_t t[MAX_ILEN]; 951 952 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 953 return -EINVAL; 954 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 955 return -EINVAL; 956 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 957 sw_bp_ilen, 1)) { 958 return -EINVAL; 959 } 960 961 return 0; 962 } 963 964 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 965 int len, int type) 966 { 967 int n; 968 969 for (n = 0; n < nb_hw_breakpoints; n++) { 970 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 971 (hw_breakpoints[n].len == len || len == -1)) { 972 return &hw_breakpoints[n]; 973 } 974 } 975 976 return NULL; 977 } 978 979 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 980 { 981 int size; 982 983 if (find_hw_breakpoint(addr, len, type)) { 984 return -EEXIST; 985 } 986 987 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 988 989 if (!hw_breakpoints) { 990 nb_hw_breakpoints = 0; 991 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 992 } else { 993 hw_breakpoints = 994 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 995 } 996 997 if (!hw_breakpoints) { 998 nb_hw_breakpoints = 0; 999 return -ENOMEM; 1000 } 1001 1002 hw_breakpoints[nb_hw_breakpoints].addr = addr; 1003 hw_breakpoints[nb_hw_breakpoints].len = len; 1004 hw_breakpoints[nb_hw_breakpoints].type = type; 1005 1006 nb_hw_breakpoints++; 1007 1008 return 0; 1009 } 1010 1011 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 1012 target_ulong len, int type) 1013 { 1014 switch (type) { 1015 case GDB_BREAKPOINT_HW: 1016 type = KVM_HW_BP; 1017 break; 1018 case GDB_WATCHPOINT_WRITE: 1019 if (len < 1) { 1020 return -EINVAL; 1021 } 1022 type = KVM_HW_WP_WRITE; 1023 break; 1024 default: 1025 return -ENOSYS; 1026 } 1027 return insert_hw_breakpoint(addr, len, type); 1028 } 1029 1030 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 1031 target_ulong len, int type) 1032 { 1033 int size; 1034 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 1035 1036 if (bp == NULL) { 1037 return -ENOENT; 1038 } 1039 1040 nb_hw_breakpoints--; 1041 if (nb_hw_breakpoints > 0) { 1042 /* 1043 * In order to trim the array, move the last element to the position to 1044 * be removed - if necessary. 1045 */ 1046 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 1047 *bp = hw_breakpoints[nb_hw_breakpoints]; 1048 } 1049 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 1050 hw_breakpoints = 1051 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); 1052 } else { 1053 g_free(hw_breakpoints); 1054 hw_breakpoints = NULL; 1055 } 1056 1057 return 0; 1058 } 1059 1060 void kvm_arch_remove_all_hw_breakpoints(void) 1061 { 1062 nb_hw_breakpoints = 0; 1063 g_free(hw_breakpoints); 1064 hw_breakpoints = NULL; 1065 } 1066 1067 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 1068 { 1069 int i; 1070 1071 if (nb_hw_breakpoints > 0) { 1072 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 1073 dbg->arch.hw_bp = hw_breakpoints; 1074 1075 for (i = 0; i < nb_hw_breakpoints; ++i) { 1076 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 1077 hw_breakpoints[i].addr); 1078 } 1079 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 1080 } else { 1081 dbg->arch.nr_hw_bp = 0; 1082 dbg->arch.hw_bp = NULL; 1083 } 1084 } 1085 1086 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 1087 { 1088 } 1089 1090 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1091 { 1092 return MEMTXATTRS_UNSPECIFIED; 1093 } 1094 1095 int kvm_arch_process_async_events(CPUState *cs) 1096 { 1097 return cs->halted; 1098 } 1099 1100 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 1101 struct kvm_s390_interrupt *interrupt) 1102 { 1103 int r = 0; 1104 1105 interrupt->type = irq->type; 1106 switch (irq->type) { 1107 case KVM_S390_INT_VIRTIO: 1108 interrupt->parm = irq->u.ext.ext_params; 1109 /* fall through */ 1110 case KVM_S390_INT_PFAULT_INIT: 1111 case KVM_S390_INT_PFAULT_DONE: 1112 interrupt->parm64 = irq->u.ext.ext_params2; 1113 break; 1114 case KVM_S390_PROGRAM_INT: 1115 interrupt->parm = irq->u.pgm.code; 1116 break; 1117 case KVM_S390_SIGP_SET_PREFIX: 1118 interrupt->parm = irq->u.prefix.address; 1119 break; 1120 case KVM_S390_INT_SERVICE: 1121 interrupt->parm = irq->u.ext.ext_params; 1122 break; 1123 case KVM_S390_MCHK: 1124 interrupt->parm = irq->u.mchk.cr14; 1125 interrupt->parm64 = irq->u.mchk.mcic; 1126 break; 1127 case KVM_S390_INT_EXTERNAL_CALL: 1128 interrupt->parm = irq->u.extcall.code; 1129 break; 1130 case KVM_S390_INT_EMERGENCY: 1131 interrupt->parm = irq->u.emerg.code; 1132 break; 1133 case KVM_S390_SIGP_STOP: 1134 case KVM_S390_RESTART: 1135 break; /* These types have no parameters */ 1136 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1137 interrupt->parm = irq->u.io.subchannel_id << 16; 1138 interrupt->parm |= irq->u.io.subchannel_nr; 1139 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 1140 interrupt->parm64 |= irq->u.io.io_int_word; 1141 break; 1142 default: 1143 r = -EINVAL; 1144 break; 1145 } 1146 return r; 1147 } 1148 1149 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 1150 { 1151 struct kvm_s390_interrupt kvmint = {}; 1152 int r; 1153 1154 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1155 if (r < 0) { 1156 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1157 exit(1); 1158 } 1159 1160 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1161 if (r < 0) { 1162 fprintf(stderr, "KVM failed to inject interrupt\n"); 1163 exit(1); 1164 } 1165 } 1166 1167 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1168 { 1169 CPUState *cs = CPU(cpu); 1170 int r; 1171 1172 if (cap_s390_irq) { 1173 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1174 if (!r) { 1175 return; 1176 } 1177 error_report("KVM failed to inject interrupt %llx", irq->type); 1178 exit(1); 1179 } 1180 1181 inject_vcpu_irq_legacy(cs, irq); 1182 } 1183 1184 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) 1185 { 1186 struct kvm_s390_interrupt kvmint = {}; 1187 int r; 1188 1189 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1190 if (r < 0) { 1191 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1192 exit(1); 1193 } 1194 1195 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1196 if (r < 0) { 1197 fprintf(stderr, "KVM failed to inject interrupt\n"); 1198 exit(1); 1199 } 1200 } 1201 1202 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1203 { 1204 struct kvm_s390_irq irq = { 1205 .type = KVM_S390_PROGRAM_INT, 1206 .u.pgm.code = code, 1207 }; 1208 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", 1209 cpu->env.psw.addr); 1210 kvm_s390_vcpu_interrupt(cpu, &irq); 1211 } 1212 1213 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1214 { 1215 struct kvm_s390_irq irq = { 1216 .type = KVM_S390_PROGRAM_INT, 1217 .u.pgm.code = code, 1218 .u.pgm.trans_exc_code = te_code, 1219 .u.pgm.exc_access_id = te_code & 3, 1220 }; 1221 1222 kvm_s390_vcpu_interrupt(cpu, &irq); 1223 } 1224 1225 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1226 uint16_t ipbh0) 1227 { 1228 CPUS390XState *env = &cpu->env; 1229 uint64_t sccb; 1230 uint32_t code; 1231 int r; 1232 1233 sccb = env->regs[ipbh0 & 0xf]; 1234 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1235 1236 switch (run->s390_sieic.icptcode) { 1237 case ICPT_PV_INSTR_NOTIFICATION: 1238 g_assert(s390_is_pv()); 1239 /* The notification intercepts are currently handled by KVM */ 1240 error_report("unexpected SCLP PV notification"); 1241 exit(1); 1242 break; 1243 case ICPT_PV_INSTR: 1244 g_assert(s390_is_pv()); 1245 sclp_service_call_protected(env, sccb, code); 1246 /* Setting the CC is done by the Ultravisor. */ 1247 break; 1248 case ICPT_INSTRUCTION: 1249 g_assert(!s390_is_pv()); 1250 r = sclp_service_call(env, sccb, code); 1251 if (r < 0) { 1252 kvm_s390_program_interrupt(cpu, -r); 1253 return; 1254 } 1255 setcc(cpu, r); 1256 } 1257 } 1258 1259 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1260 { 1261 CPUS390XState *env = &cpu->env; 1262 int rc = 0; 1263 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1264 1265 switch (ipa1) { 1266 case PRIV_B2_XSCH: 1267 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1268 break; 1269 case PRIV_B2_CSCH: 1270 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1271 break; 1272 case PRIV_B2_HSCH: 1273 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1274 break; 1275 case PRIV_B2_MSCH: 1276 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1277 break; 1278 case PRIV_B2_SSCH: 1279 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1280 break; 1281 case PRIV_B2_STCRW: 1282 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1283 break; 1284 case PRIV_B2_STSCH: 1285 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1286 break; 1287 case PRIV_B2_TSCH: 1288 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1289 fprintf(stderr, "Spurious tsch intercept\n"); 1290 break; 1291 case PRIV_B2_CHSC: 1292 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1293 break; 1294 case PRIV_B2_TPI: 1295 /* This should have been handled by kvm already. */ 1296 fprintf(stderr, "Spurious tpi intercept\n"); 1297 break; 1298 case PRIV_B2_SCHM: 1299 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1300 run->s390_sieic.ipb, RA_IGNORED); 1301 break; 1302 case PRIV_B2_RSCH: 1303 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1304 break; 1305 case PRIV_B2_RCHP: 1306 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1307 break; 1308 case PRIV_B2_STCPS: 1309 /* We do not provide this instruction, it is suppressed. */ 1310 break; 1311 case PRIV_B2_SAL: 1312 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1313 break; 1314 case PRIV_B2_SIGA: 1315 /* Not provided, set CC = 3 for subchannel not operational */ 1316 setcc(cpu, 3); 1317 break; 1318 case PRIV_B2_SCLP_CALL: 1319 kvm_sclp_service_call(cpu, run, ipbh0); 1320 break; 1321 default: 1322 rc = -1; 1323 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); 1324 break; 1325 } 1326 1327 return rc; 1328 } 1329 1330 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1331 uint8_t *ar) 1332 { 1333 CPUS390XState *env = &cpu->env; 1334 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1335 uint32_t base2 = run->s390_sieic.ipb >> 28; 1336 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1337 ((run->s390_sieic.ipb & 0xff00) << 4); 1338 1339 if (disp2 & 0x80000) { 1340 disp2 += 0xfff00000; 1341 } 1342 if (ar) { 1343 *ar = base2; 1344 } 1345 1346 return (base2 ? env->regs[base2] : 0) + 1347 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1348 } 1349 1350 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1351 uint8_t *ar) 1352 { 1353 CPUS390XState *env = &cpu->env; 1354 uint32_t base2 = run->s390_sieic.ipb >> 28; 1355 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1356 ((run->s390_sieic.ipb & 0xff00) << 4); 1357 1358 if (disp2 & 0x80000) { 1359 disp2 += 0xfff00000; 1360 } 1361 if (ar) { 1362 *ar = base2; 1363 } 1364 1365 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1366 } 1367 1368 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1369 { 1370 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1371 1372 if (s390_has_feat(S390_FEAT_ZPCI)) { 1373 return clp_service_call(cpu, r2, RA_IGNORED); 1374 } else { 1375 return -1; 1376 } 1377 } 1378 1379 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1380 { 1381 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1382 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1383 1384 if (s390_has_feat(S390_FEAT_ZPCI)) { 1385 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1386 } else { 1387 return -1; 1388 } 1389 } 1390 1391 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1392 { 1393 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1394 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1395 1396 if (s390_has_feat(S390_FEAT_ZPCI)) { 1397 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1398 } else { 1399 return -1; 1400 } 1401 } 1402 1403 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1404 { 1405 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1406 uint64_t fiba; 1407 uint8_t ar; 1408 1409 if (s390_has_feat(S390_FEAT_ZPCI)) { 1410 fiba = get_base_disp_rxy(cpu, run, &ar); 1411 1412 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1413 } else { 1414 return -1; 1415 } 1416 } 1417 1418 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1419 { 1420 CPUS390XState *env = &cpu->env; 1421 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1422 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1423 uint8_t isc; 1424 uint16_t mode; 1425 int r; 1426 1427 mode = env->regs[r1] & 0xffff; 1428 isc = (env->regs[r3] >> 27) & 0x7; 1429 r = css_do_sic(env, isc, mode); 1430 if (r) { 1431 kvm_s390_program_interrupt(cpu, -r); 1432 } 1433 1434 return 0; 1435 } 1436 1437 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1438 { 1439 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1440 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1441 1442 if (s390_has_feat(S390_FEAT_ZPCI)) { 1443 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1444 } else { 1445 return -1; 1446 } 1447 } 1448 1449 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1450 { 1451 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1452 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1453 uint64_t gaddr; 1454 uint8_t ar; 1455 1456 if (s390_has_feat(S390_FEAT_ZPCI)) { 1457 gaddr = get_base_disp_rsy(cpu, run, &ar); 1458 1459 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1460 } else { 1461 return -1; 1462 } 1463 } 1464 1465 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1466 { 1467 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1468 uint64_t fiba; 1469 uint8_t ar; 1470 1471 if (s390_has_feat(S390_FEAT_ZPCI)) { 1472 fiba = get_base_disp_rxy(cpu, run, &ar); 1473 1474 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1475 } else { 1476 return -1; 1477 } 1478 } 1479 1480 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1481 { 1482 int r = 0; 1483 1484 switch (ipa1) { 1485 case PRIV_B9_CLP: 1486 r = kvm_clp_service_call(cpu, run); 1487 break; 1488 case PRIV_B9_PCISTG: 1489 r = kvm_pcistg_service_call(cpu, run); 1490 break; 1491 case PRIV_B9_PCILG: 1492 r = kvm_pcilg_service_call(cpu, run); 1493 break; 1494 case PRIV_B9_RPCIT: 1495 r = kvm_rpcit_service_call(cpu, run); 1496 break; 1497 case PRIV_B9_EQBS: 1498 /* just inject exception */ 1499 r = -1; 1500 break; 1501 default: 1502 r = -1; 1503 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); 1504 break; 1505 } 1506 1507 return r; 1508 } 1509 1510 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1511 { 1512 int r = 0; 1513 1514 switch (ipbl) { 1515 case PRIV_EB_PCISTB: 1516 r = kvm_pcistb_service_call(cpu, run); 1517 break; 1518 case PRIV_EB_SIC: 1519 r = kvm_sic_service_call(cpu, run); 1520 break; 1521 case PRIV_EB_SQBS: 1522 /* just inject exception */ 1523 r = -1; 1524 break; 1525 default: 1526 r = -1; 1527 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); 1528 break; 1529 } 1530 1531 return r; 1532 } 1533 1534 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1535 { 1536 int r = 0; 1537 1538 switch (ipbl) { 1539 case PRIV_E3_MPCIFC: 1540 r = kvm_mpcifc_service_call(cpu, run); 1541 break; 1542 case PRIV_E3_STPCIFC: 1543 r = kvm_stpcifc_service_call(cpu, run); 1544 break; 1545 default: 1546 r = -1; 1547 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); 1548 break; 1549 } 1550 1551 return r; 1552 } 1553 1554 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) 1555 { 1556 CPUS390XState *env = &cpu->env; 1557 int ret; 1558 1559 ret = s390_virtio_hypercall(env); 1560 if (ret == -EINVAL) { 1561 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1562 return 0; 1563 } 1564 1565 return ret; 1566 } 1567 1568 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1569 { 1570 uint64_t r1, r3; 1571 int rc; 1572 1573 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1574 r3 = run->s390_sieic.ipa & 0x000f; 1575 rc = handle_diag_288(&cpu->env, r1, r3); 1576 if (rc) { 1577 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1578 } 1579 } 1580 1581 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1582 { 1583 uint64_t r1, r3; 1584 1585 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1586 r3 = run->s390_sieic.ipa & 0x000f; 1587 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1588 } 1589 1590 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1591 { 1592 CPUS390XState *env = &cpu->env; 1593 unsigned long pc; 1594 1595 pc = env->psw.addr - sw_bp_ilen; 1596 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1597 env->psw.addr = pc; 1598 return EXCP_DEBUG; 1599 } 1600 1601 return -ENOENT; 1602 } 1603 1604 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1605 1606 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1607 { 1608 int r = 0; 1609 uint16_t func_code; 1610 1611 /* 1612 * For any diagnose call we support, bits 48-63 of the resulting 1613 * address specify the function code; the remainder is ignored. 1614 */ 1615 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1616 switch (func_code) { 1617 case DIAG_TIMEREVENT: 1618 kvm_handle_diag_288(cpu, run); 1619 break; 1620 case DIAG_IPL: 1621 kvm_handle_diag_308(cpu, run); 1622 break; 1623 case DIAG_KVM_HYPERCALL: 1624 r = handle_hypercall(cpu, run); 1625 break; 1626 case DIAG_KVM_BREAKPOINT: 1627 r = handle_sw_breakpoint(cpu, run); 1628 break; 1629 default: 1630 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); 1631 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1632 break; 1633 } 1634 1635 return r; 1636 } 1637 1638 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1639 { 1640 CPUS390XState *env = &cpu->env; 1641 const uint8_t r1 = ipa1 >> 4; 1642 const uint8_t r3 = ipa1 & 0x0f; 1643 int ret; 1644 uint8_t order; 1645 1646 /* get order code */ 1647 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1648 1649 ret = handle_sigp(env, order, r1, r3); 1650 setcc(cpu, ret); 1651 return 0; 1652 } 1653 1654 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1655 { 1656 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1657 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1658 int r = -1; 1659 1660 DPRINTF("handle_instruction 0x%x 0x%x\n", 1661 run->s390_sieic.ipa, run->s390_sieic.ipb); 1662 switch (ipa0) { 1663 case IPA0_B2: 1664 r = handle_b2(cpu, run, ipa1); 1665 break; 1666 case IPA0_B9: 1667 r = handle_b9(cpu, run, ipa1); 1668 break; 1669 case IPA0_EB: 1670 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1671 break; 1672 case IPA0_E3: 1673 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1674 break; 1675 case IPA0_DIAG: 1676 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1677 break; 1678 case IPA0_SIGP: 1679 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1680 break; 1681 } 1682 1683 if (r < 0) { 1684 r = 0; 1685 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1686 } 1687 1688 return r; 1689 } 1690 1691 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, 1692 int pswoffset) 1693 { 1694 CPUState *cs = CPU(cpu); 1695 1696 s390_cpu_halt(cpu); 1697 cpu->env.crash_reason = reason; 1698 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 1699 } 1700 1701 /* try to detect pgm check loops */ 1702 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1703 { 1704 CPUState *cs = CPU(cpu); 1705 PSW oldpsw, newpsw; 1706 1707 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1708 offsetof(LowCore, program_new_psw)); 1709 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1710 offsetof(LowCore, program_new_psw) + 8); 1711 oldpsw.mask = run->psw_mask; 1712 oldpsw.addr = run->psw_addr; 1713 /* 1714 * Avoid endless loops of operation exceptions, if the pgm new 1715 * PSW will cause a new operation exception. 1716 * The heuristic checks if the pgm new psw is within 6 bytes before 1717 * the faulting psw address (with same DAT, AS settings) and the 1718 * new psw is not a wait psw and the fault was not triggered by 1719 * problem state. In that case go into crashed state. 1720 */ 1721 1722 if (oldpsw.addr - newpsw.addr <= 6 && 1723 !(newpsw.mask & PSW_MASK_WAIT) && 1724 !(oldpsw.mask & PSW_MASK_PSTATE) && 1725 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1726 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1727 unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, 1728 offsetof(LowCore, program_new_psw)); 1729 return EXCP_HALTED; 1730 } 1731 return 0; 1732 } 1733 1734 static int handle_intercept(S390CPU *cpu) 1735 { 1736 CPUState *cs = CPU(cpu); 1737 struct kvm_run *run = cs->kvm_run; 1738 int icpt_code = run->s390_sieic.icptcode; 1739 int r = 0; 1740 1741 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, 1742 (long)cs->kvm_run->psw_addr); 1743 switch (icpt_code) { 1744 case ICPT_INSTRUCTION: 1745 case ICPT_PV_INSTR: 1746 case ICPT_PV_INSTR_NOTIFICATION: 1747 r = handle_instruction(cpu, run); 1748 break; 1749 case ICPT_PROGRAM: 1750 unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, 1751 offsetof(LowCore, program_new_psw)); 1752 r = EXCP_HALTED; 1753 break; 1754 case ICPT_EXT_INT: 1755 unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, 1756 offsetof(LowCore, external_new_psw)); 1757 r = EXCP_HALTED; 1758 break; 1759 case ICPT_WAITPSW: 1760 /* disabled wait, since enabled wait is handled in kernel */ 1761 s390_handle_wait(cpu); 1762 r = EXCP_HALTED; 1763 break; 1764 case ICPT_CPU_STOP: 1765 do_stop_interrupt(&cpu->env); 1766 r = EXCP_HALTED; 1767 break; 1768 case ICPT_OPEREXC: 1769 /* check for break points */ 1770 r = handle_sw_breakpoint(cpu, run); 1771 if (r == -ENOENT) { 1772 /* Then check for potential pgm check loops */ 1773 r = handle_oper_loop(cpu, run); 1774 if (r == 0) { 1775 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1776 } 1777 } 1778 break; 1779 case ICPT_SOFT_INTERCEPT: 1780 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1781 exit(1); 1782 break; 1783 case ICPT_IO: 1784 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1785 exit(1); 1786 break; 1787 default: 1788 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1789 exit(1); 1790 break; 1791 } 1792 1793 return r; 1794 } 1795 1796 static int handle_tsch(S390CPU *cpu) 1797 { 1798 CPUState *cs = CPU(cpu); 1799 struct kvm_run *run = cs->kvm_run; 1800 int ret; 1801 1802 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1803 RA_IGNORED); 1804 if (ret < 0) { 1805 /* 1806 * Failure. 1807 * If an I/O interrupt had been dequeued, we have to reinject it. 1808 */ 1809 if (run->s390_tsch.dequeued) { 1810 s390_io_interrupt(run->s390_tsch.subchannel_id, 1811 run->s390_tsch.subchannel_nr, 1812 run->s390_tsch.io_int_parm, 1813 run->s390_tsch.io_int_word); 1814 } 1815 ret = 0; 1816 } 1817 return ret; 1818 } 1819 1820 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1821 { 1822 const MachineState *ms = MACHINE(qdev_get_machine()); 1823 uint16_t conf_cpus = 0, reserved_cpus = 0; 1824 SysIB_322 sysib; 1825 int del, i; 1826 1827 if (s390_is_pv()) { 1828 s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); 1829 } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1830 return; 1831 } 1832 /* Shift the stack of Extended Names to prepare for our own data */ 1833 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1834 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1835 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1836 * assumed it's not capable of managing Extended Names for lower levels. 1837 */ 1838 for (del = 1; del < sysib.count; del++) { 1839 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1840 break; 1841 } 1842 } 1843 if (del < sysib.count) { 1844 memset(sysib.ext_names[del], 0, 1845 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1846 } 1847 1848 /* count the cpus and split them into configured and reserved ones */ 1849 for (i = 0; i < ms->possible_cpus->len; i++) { 1850 if (ms->possible_cpus->cpus[i].cpu) { 1851 conf_cpus++; 1852 } else { 1853 reserved_cpus++; 1854 } 1855 } 1856 sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; 1857 sysib.vm[0].conf_cpus = conf_cpus; 1858 sysib.vm[0].reserved_cpus = reserved_cpus; 1859 1860 /* Insert short machine name in EBCDIC, padded with blanks */ 1861 if (qemu_name) { 1862 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1863 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1864 strlen(qemu_name))); 1865 } 1866 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1867 memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0])); 1868 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1869 * considered by s390 as not capable of providing any Extended Name. 1870 * Therefore if no name was specified on qemu invocation, we go with the 1871 * same "KVMguest" default, which KVM has filled into short name field. 1872 */ 1873 if (qemu_name) { 1874 strncpy((char *)sysib.ext_names[0], qemu_name, 1875 sizeof(sysib.ext_names[0])); 1876 } else { 1877 strcpy((char *)sysib.ext_names[0], "KVMguest"); 1878 } 1879 /* Insert UUID */ 1880 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1881 1882 if (s390_is_pv()) { 1883 s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); 1884 } else { 1885 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1886 } 1887 } 1888 1889 static int handle_stsi(S390CPU *cpu) 1890 { 1891 CPUState *cs = CPU(cpu); 1892 struct kvm_run *run = cs->kvm_run; 1893 1894 switch (run->s390_stsi.fc) { 1895 case 3: 1896 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1897 return 0; 1898 } 1899 /* Only sysib 3.2.2 needs post-handling for now. */ 1900 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1901 return 0; 1902 default: 1903 return 0; 1904 } 1905 } 1906 1907 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1908 { 1909 CPUState *cs = CPU(cpu); 1910 struct kvm_run *run = cs->kvm_run; 1911 1912 int ret = 0; 1913 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1914 1915 switch (arch_info->type) { 1916 case KVM_HW_WP_WRITE: 1917 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1918 cs->watchpoint_hit = &hw_watchpoint; 1919 hw_watchpoint.vaddr = arch_info->addr; 1920 hw_watchpoint.flags = BP_MEM_WRITE; 1921 ret = EXCP_DEBUG; 1922 } 1923 break; 1924 case KVM_HW_BP: 1925 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1926 ret = EXCP_DEBUG; 1927 } 1928 break; 1929 case KVM_SINGLESTEP: 1930 if (cs->singlestep_enabled) { 1931 ret = EXCP_DEBUG; 1932 } 1933 break; 1934 default: 1935 ret = -ENOSYS; 1936 } 1937 1938 return ret; 1939 } 1940 1941 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1942 { 1943 S390CPU *cpu = S390_CPU(cs); 1944 int ret = 0; 1945 1946 qemu_mutex_lock_iothread(); 1947 1948 kvm_cpu_synchronize_state(cs); 1949 1950 switch (run->exit_reason) { 1951 case KVM_EXIT_S390_SIEIC: 1952 ret = handle_intercept(cpu); 1953 break; 1954 case KVM_EXIT_S390_RESET: 1955 s390_ipl_reset_request(cs, S390_RESET_REIPL); 1956 break; 1957 case KVM_EXIT_S390_TSCH: 1958 ret = handle_tsch(cpu); 1959 break; 1960 case KVM_EXIT_S390_STSI: 1961 ret = handle_stsi(cpu); 1962 break; 1963 case KVM_EXIT_DEBUG: 1964 ret = kvm_arch_handle_debug_exit(cpu); 1965 break; 1966 default: 1967 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1968 break; 1969 } 1970 qemu_mutex_unlock_iothread(); 1971 1972 if (ret == 0) { 1973 ret = EXCP_INTERRUPT; 1974 } 1975 return ret; 1976 } 1977 1978 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 1979 { 1980 return true; 1981 } 1982 1983 void kvm_s390_enable_css_support(S390CPU *cpu) 1984 { 1985 int r; 1986 1987 /* Activate host kernel channel subsystem support. */ 1988 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 1989 assert(r == 0); 1990 } 1991 1992 void kvm_arch_init_irq_routing(KVMState *s) 1993 { 1994 /* 1995 * Note that while irqchip capabilities generally imply that cpustates 1996 * are handled in-kernel, it is not true for s390 (yet); therefore, we 1997 * have to override the common code kvm_halt_in_kernel_allowed setting. 1998 */ 1999 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2000 kvm_gsi_routing_allowed = true; 2001 kvm_halt_in_kernel_allowed = false; 2002 } 2003 } 2004 2005 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 2006 int vq, bool assign) 2007 { 2008 struct kvm_ioeventfd kick = { 2009 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 2010 KVM_IOEVENTFD_FLAG_DATAMATCH, 2011 .fd = event_notifier_get_fd(notifier), 2012 .datamatch = vq, 2013 .addr = sch, 2014 .len = 8, 2015 }; 2016 trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, 2017 kick.datamatch); 2018 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 2019 return -ENOSYS; 2020 } 2021 if (!assign) { 2022 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 2023 } 2024 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 2025 } 2026 2027 int kvm_s390_get_ri(void) 2028 { 2029 return cap_ri; 2030 } 2031 2032 int kvm_s390_get_gs(void) 2033 { 2034 return cap_gs; 2035 } 2036 2037 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2038 { 2039 struct kvm_mp_state mp_state = {}; 2040 int ret; 2041 2042 /* the kvm part might not have been initialized yet */ 2043 if (CPU(cpu)->kvm_state == NULL) { 2044 return 0; 2045 } 2046 2047 switch (cpu_state) { 2048 case S390_CPU_STATE_STOPPED: 2049 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2050 break; 2051 case S390_CPU_STATE_CHECK_STOP: 2052 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2053 break; 2054 case S390_CPU_STATE_OPERATING: 2055 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2056 break; 2057 case S390_CPU_STATE_LOAD: 2058 mp_state.mp_state = KVM_MP_STATE_LOAD; 2059 break; 2060 default: 2061 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2062 cpu_state); 2063 exit(1); 2064 } 2065 2066 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2067 if (ret) { 2068 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2069 strerror(-ret)); 2070 } 2071 2072 return ret; 2073 } 2074 2075 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2076 { 2077 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 2078 struct kvm_s390_irq_state irq_state = { 2079 .buf = (uint64_t) cpu->irqstate, 2080 .len = VCPU_IRQ_BUF_SIZE(max_cpus), 2081 }; 2082 CPUState *cs = CPU(cpu); 2083 int32_t bytes; 2084 2085 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2086 return; 2087 } 2088 2089 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2090 if (bytes < 0) { 2091 cpu->irqstate_saved_size = 0; 2092 error_report("Migration of interrupt state failed"); 2093 return; 2094 } 2095 2096 cpu->irqstate_saved_size = bytes; 2097 } 2098 2099 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2100 { 2101 CPUState *cs = CPU(cpu); 2102 struct kvm_s390_irq_state irq_state = { 2103 .buf = (uint64_t) cpu->irqstate, 2104 .len = cpu->irqstate_saved_size, 2105 }; 2106 int r; 2107 2108 if (cpu->irqstate_saved_size == 0) { 2109 return 0; 2110 } 2111 2112 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2113 return -ENOSYS; 2114 } 2115 2116 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2117 if (r) { 2118 error_report("Setting interrupt state failed %d", r); 2119 } 2120 return r; 2121 } 2122 2123 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2124 uint64_t address, uint32_t data, PCIDevice *dev) 2125 { 2126 S390PCIBusDevice *pbdev; 2127 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2128 2129 if (!dev) { 2130 DPRINTF("add_msi_route no pci device\n"); 2131 return -ENODEV; 2132 } 2133 2134 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2135 if (!pbdev) { 2136 DPRINTF("add_msi_route no zpci device\n"); 2137 return -ENODEV; 2138 } 2139 2140 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2141 route->flags = 0; 2142 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2143 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2144 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2145 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2146 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2147 return 0; 2148 } 2149 2150 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2151 int vector, PCIDevice *dev) 2152 { 2153 return 0; 2154 } 2155 2156 int kvm_arch_release_virq_post(int virq) 2157 { 2158 return 0; 2159 } 2160 2161 int kvm_arch_msi_data_to_gsi(uint32_t data) 2162 { 2163 abort(); 2164 } 2165 2166 static int query_cpu_subfunc(S390FeatBitmap features) 2167 { 2168 struct kvm_s390_vm_cpu_subfunc prop = {}; 2169 struct kvm_device_attr attr = { 2170 .group = KVM_S390_VM_CPU_MODEL, 2171 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2172 .addr = (uint64_t) &prop, 2173 }; 2174 int rc; 2175 2176 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2177 if (rc) { 2178 return rc; 2179 } 2180 2181 /* 2182 * We're going to add all subfunctions now, if the corresponding feature 2183 * is available that unlocks the query functions. 2184 */ 2185 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2186 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2187 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2188 } 2189 if (test_bit(S390_FEAT_MSA, features)) { 2190 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2191 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2192 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2193 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2194 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2195 } 2196 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2197 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2198 } 2199 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2200 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2201 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2202 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2203 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2204 } 2205 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2206 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2207 } 2208 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2209 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2210 } 2211 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2212 s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2213 } 2214 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2215 s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2216 } 2217 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2218 s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2219 } 2220 return 0; 2221 } 2222 2223 static int configure_cpu_subfunc(const S390FeatBitmap features) 2224 { 2225 struct kvm_s390_vm_cpu_subfunc prop = {}; 2226 struct kvm_device_attr attr = { 2227 .group = KVM_S390_VM_CPU_MODEL, 2228 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2229 .addr = (uint64_t) &prop, 2230 }; 2231 2232 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2233 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2234 /* hardware support might be missing, IBC will handle most of this */ 2235 return 0; 2236 } 2237 2238 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2239 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2240 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2241 } 2242 if (test_bit(S390_FEAT_MSA, features)) { 2243 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2244 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2245 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2246 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2247 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2248 } 2249 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2250 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2251 } 2252 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2253 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2254 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2255 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2256 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2257 } 2258 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2259 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2260 } 2261 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2262 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2263 } 2264 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2265 s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2266 } 2267 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2268 s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2269 } 2270 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2271 s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2272 } 2273 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2274 } 2275 2276 static int kvm_to_feat[][2] = { 2277 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2278 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2279 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2280 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2281 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2282 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2283 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2284 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2285 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2286 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2287 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2288 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2289 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2290 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2291 }; 2292 2293 static int query_cpu_feat(S390FeatBitmap features) 2294 { 2295 struct kvm_s390_vm_cpu_feat prop = {}; 2296 struct kvm_device_attr attr = { 2297 .group = KVM_S390_VM_CPU_MODEL, 2298 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2299 .addr = (uint64_t) &prop, 2300 }; 2301 int rc; 2302 int i; 2303 2304 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2305 if (rc) { 2306 return rc; 2307 } 2308 2309 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2310 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2311 set_bit(kvm_to_feat[i][1], features); 2312 } 2313 } 2314 return 0; 2315 } 2316 2317 static int configure_cpu_feat(const S390FeatBitmap features) 2318 { 2319 struct kvm_s390_vm_cpu_feat prop = {}; 2320 struct kvm_device_attr attr = { 2321 .group = KVM_S390_VM_CPU_MODEL, 2322 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2323 .addr = (uint64_t) &prop, 2324 }; 2325 int i; 2326 2327 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2328 if (test_bit(kvm_to_feat[i][1], features)) { 2329 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2330 } 2331 } 2332 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2333 } 2334 2335 bool kvm_s390_cpu_models_supported(void) 2336 { 2337 if (!cpu_model_allowed()) { 2338 /* compatibility machines interfere with the cpu model */ 2339 return false; 2340 } 2341 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2342 KVM_S390_VM_CPU_MACHINE) && 2343 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2344 KVM_S390_VM_CPU_PROCESSOR) && 2345 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2346 KVM_S390_VM_CPU_MACHINE_FEAT) && 2347 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2348 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2349 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2350 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2351 } 2352 2353 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2354 { 2355 struct kvm_s390_vm_cpu_machine prop = {}; 2356 struct kvm_device_attr attr = { 2357 .group = KVM_S390_VM_CPU_MODEL, 2358 .attr = KVM_S390_VM_CPU_MACHINE, 2359 .addr = (uint64_t) &prop, 2360 }; 2361 uint16_t unblocked_ibc = 0, cpu_type = 0; 2362 int rc; 2363 2364 memset(model, 0, sizeof(*model)); 2365 2366 if (!kvm_s390_cpu_models_supported()) { 2367 error_setg(errp, "KVM doesn't support CPU models"); 2368 return; 2369 } 2370 2371 /* query the basic cpu model properties */ 2372 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2373 if (rc) { 2374 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2375 return; 2376 } 2377 2378 cpu_type = cpuid_type(prop.cpuid); 2379 if (has_ibc(prop.ibc)) { 2380 model->lowest_ibc = lowest_ibc(prop.ibc); 2381 unblocked_ibc = unblocked_ibc(prop.ibc); 2382 } 2383 model->cpu_id = cpuid_id(prop.cpuid); 2384 model->cpu_id_format = cpuid_format(prop.cpuid); 2385 model->cpu_ver = 0xff; 2386 2387 /* get supported cpu features indicated via STFL(E) */ 2388 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2389 (uint8_t *) prop.fac_mask); 2390 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2391 if (test_bit(S390_FEAT_STFLE, model->features)) { 2392 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2393 } 2394 /* get supported cpu features indicated e.g. via SCLP */ 2395 rc = query_cpu_feat(model->features); 2396 if (rc) { 2397 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2398 return; 2399 } 2400 /* get supported cpu subfunctions indicated via query / test bit */ 2401 rc = query_cpu_subfunc(model->features); 2402 if (rc) { 2403 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2404 return; 2405 } 2406 2407 /* PTFF subfunctions might be indicated although kernel support missing */ 2408 if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { 2409 clear_bit(S390_FEAT_PTFF_QSIE, model->features); 2410 clear_bit(S390_FEAT_PTFF_QTOUE, model->features); 2411 clear_bit(S390_FEAT_PTFF_STOE, model->features); 2412 clear_bit(S390_FEAT_PTFF_STOUE, model->features); 2413 } 2414 2415 /* with cpu model support, CMM is only indicated if really available */ 2416 if (kvm_s390_cmma_available()) { 2417 set_bit(S390_FEAT_CMM, model->features); 2418 } else { 2419 /* no cmm -> no cmm nt */ 2420 clear_bit(S390_FEAT_CMM_NT, model->features); 2421 } 2422 2423 /* bpb needs kernel support for migration, VSIE and reset */ 2424 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { 2425 clear_bit(S390_FEAT_BPB, model->features); 2426 } 2427 2428 /* 2429 * If we have support for protected virtualization, indicate 2430 * the protected virtualization IPL unpack facility. 2431 */ 2432 if (cap_protected) { 2433 set_bit(S390_FEAT_UNPACK, model->features); 2434 } 2435 2436 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2437 set_bit(S390_FEAT_ZPCI, model->features); 2438 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2439 2440 if (s390_known_cpu_type(cpu_type)) { 2441 /* we want the exact model, even if some features are missing */ 2442 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2443 ibc_ec_ga(unblocked_ibc), NULL); 2444 } else { 2445 /* model unknown, e.g. too new - search using features */ 2446 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2447 ibc_ec_ga(unblocked_ibc), 2448 model->features); 2449 } 2450 if (!model->def) { 2451 error_setg(errp, "KVM: host CPU model could not be identified"); 2452 return; 2453 } 2454 /* for now, we can only provide the AP feature with HW support */ 2455 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, 2456 KVM_S390_VM_CRYPTO_ENABLE_APIE)) { 2457 set_bit(S390_FEAT_AP, model->features); 2458 } 2459 2460 /* 2461 * Extended-Length SCCB is handled entirely within QEMU. 2462 * For PV guests this is completely fenced by the Ultravisor, as Service 2463 * Call error checking and STFLE interpretation are handled via SIE. 2464 */ 2465 set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); 2466 2467 /* strip of features that are not part of the maximum model */ 2468 bitmap_and(model->features, model->features, model->def->full_feat, 2469 S390_FEAT_MAX); 2470 } 2471 2472 static void kvm_s390_configure_apie(bool interpret) 2473 { 2474 uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : 2475 KVM_S390_VM_CRYPTO_DISABLE_APIE; 2476 2477 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 2478 kvm_s390_set_attr(attr); 2479 } 2480 } 2481 2482 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2483 { 2484 struct kvm_s390_vm_cpu_processor prop = { 2485 .fac_list = { 0 }, 2486 }; 2487 struct kvm_device_attr attr = { 2488 .group = KVM_S390_VM_CPU_MODEL, 2489 .attr = KVM_S390_VM_CPU_PROCESSOR, 2490 .addr = (uint64_t) &prop, 2491 }; 2492 int rc; 2493 2494 if (!model) { 2495 /* compatibility handling if cpu models are disabled */ 2496 if (kvm_s390_cmma_available()) { 2497 kvm_s390_enable_cmma(); 2498 } 2499 return; 2500 } 2501 if (!kvm_s390_cpu_models_supported()) { 2502 error_setg(errp, "KVM doesn't support CPU models"); 2503 return; 2504 } 2505 prop.cpuid = s390_cpuid_from_cpu_model(model); 2506 prop.ibc = s390_ibc_from_cpu_model(model); 2507 /* configure cpu features indicated via STFL(e) */ 2508 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2509 (uint8_t *) prop.fac_list); 2510 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2511 if (rc) { 2512 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2513 return; 2514 } 2515 /* configure cpu features indicated e.g. via SCLP */ 2516 rc = configure_cpu_feat(model->features); 2517 if (rc) { 2518 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2519 return; 2520 } 2521 /* configure cpu subfunctions indicated via query / test bit */ 2522 rc = configure_cpu_subfunc(model->features); 2523 if (rc) { 2524 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2525 return; 2526 } 2527 /* enable CMM via CMMA */ 2528 if (test_bit(S390_FEAT_CMM, model->features)) { 2529 kvm_s390_enable_cmma(); 2530 } 2531 2532 if (test_bit(S390_FEAT_AP, model->features)) { 2533 kvm_s390_configure_apie(true); 2534 } 2535 } 2536 2537 void kvm_s390_restart_interrupt(S390CPU *cpu) 2538 { 2539 struct kvm_s390_irq irq = { 2540 .type = KVM_S390_RESTART, 2541 }; 2542 2543 kvm_s390_vcpu_interrupt(cpu, &irq); 2544 } 2545 2546 void kvm_s390_stop_interrupt(S390CPU *cpu) 2547 { 2548 struct kvm_s390_irq irq = { 2549 .type = KVM_S390_SIGP_STOP, 2550 }; 2551 2552 kvm_s390_vcpu_interrupt(cpu, &irq); 2553 } 2554