1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include <linux/kvm.h> 25 #include <asm/ptrace.h> 26 27 #include "cpu.h" 28 #include "s390x-internal.h" 29 #include "kvm_s390x.h" 30 #include "system/kvm_int.h" 31 #include "qemu/cutils.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "qemu/timer.h" 35 #include "qemu/units.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/mmap-alloc.h" 38 #include "qemu/log.h" 39 #include "system/system.h" 40 #include "system/hw_accel.h" 41 #include "system/runstate.h" 42 #include "system/device_tree.h" 43 #include "gdbstub/enums.h" 44 #include "system/ram_addr.h" 45 #include "trace.h" 46 #include "hw/s390x/s390-pci-inst.h" 47 #include "hw/s390x/s390-pci-bus.h" 48 #include "hw/s390x/ipl.h" 49 #include "hw/s390x/ebcdic.h" 50 #include "exec/memattrs.h" 51 #include "hw/s390x/s390-virtio-ccw.h" 52 #include "hw/s390x/s390-hypercall.h" 53 #include "target/s390x/kvm/pv.h" 54 #include CONFIG_DEVICES 55 56 #define kvm_vm_check_mem_attr(s, attr) \ 57 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 58 59 #define IPA0_DIAG 0x8300 60 #define IPA0_SIGP 0xae00 61 #define IPA0_B2 0xb200 62 #define IPA0_B9 0xb900 63 #define IPA0_EB 0xeb00 64 #define IPA0_E3 0xe300 65 66 #define PRIV_B2_SCLP_CALL 0x20 67 #define PRIV_B2_CSCH 0x30 68 #define PRIV_B2_HSCH 0x31 69 #define PRIV_B2_MSCH 0x32 70 #define PRIV_B2_SSCH 0x33 71 #define PRIV_B2_STSCH 0x34 72 #define PRIV_B2_TSCH 0x35 73 #define PRIV_B2_TPI 0x36 74 #define PRIV_B2_SAL 0x37 75 #define PRIV_B2_RSCH 0x38 76 #define PRIV_B2_STCRW 0x39 77 #define PRIV_B2_STCPS 0x3a 78 #define PRIV_B2_RCHP 0x3b 79 #define PRIV_B2_SCHM 0x3c 80 #define PRIV_B2_CHSC 0x5f 81 #define PRIV_B2_SIGA 0x74 82 #define PRIV_B2_XSCH 0x76 83 84 #define PRIV_EB_SQBS 0x8a 85 #define PRIV_EB_PCISTB 0xd0 86 #define PRIV_EB_SIC 0xd1 87 88 #define PRIV_B9_EQBS 0x9c 89 #define PRIV_B9_CLP 0xa0 90 #define PRIV_B9_PTF 0xa2 91 #define PRIV_B9_PCISTG 0xd0 92 #define PRIV_B9_PCILG 0xd2 93 #define PRIV_B9_RPCIT 0xd3 94 95 #define PRIV_E3_MPCIFC 0xd0 96 #define PRIV_E3_STPCIFC 0xd4 97 98 #define DIAG_TIMEREVENT 0x288 99 #define DIAG_IPL 0x308 100 #define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 101 #define DIAG_KVM_HYPERCALL 0x500 102 #define DIAG_KVM_BREAKPOINT 0x501 103 104 #define ICPT_INSTRUCTION 0x04 105 #define ICPT_PROGRAM 0x08 106 #define ICPT_EXT_INT 0x14 107 #define ICPT_WAITPSW 0x1c 108 #define ICPT_SOFT_INTERCEPT 0x24 109 #define ICPT_CPU_STOP 0x28 110 #define ICPT_OPEREXC 0x2c 111 #define ICPT_IO 0x40 112 #define ICPT_PV_INSTR 0x68 113 #define ICPT_PV_INSTR_NOTIFICATION 0x6c 114 115 #define NR_LOCAL_IRQS 32 116 /* 117 * Needs to be big enough to contain max_cpus emergency signals 118 * and in addition NR_LOCAL_IRQS interrupts 119 */ 120 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ 121 (max_cpus + NR_LOCAL_IRQS)) 122 /* 123 * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages 124 * as the dirty bitmap must be managed by bitops that take an int as 125 * position indicator. This would end at an unaligned address 126 * (0x7fffff00000). As future variants might provide larger pages 127 * and to make all addresses properly aligned, let us split at 4TB. 128 */ 129 #define KVM_SLOT_MAX_BYTES (4UL * TiB) 130 131 static CPUWatchpoint hw_watchpoint; 132 /* 133 * We don't use a list because this structure is also used to transmit the 134 * hardware breakpoints to the kernel. 135 */ 136 static struct kvm_hw_breakpoint *hw_breakpoints; 137 static int nb_hw_breakpoints; 138 139 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 140 KVM_CAP_LAST_INFO 141 }; 142 143 static int cap_async_pf; 144 static int cap_mem_op; 145 static int cap_mem_op_extension; 146 static int cap_s390_irq; 147 static int cap_ri; 148 static int cap_hpage_1m; 149 static int cap_vcpu_resets; 150 static int cap_protected; 151 static int cap_zpci_op; 152 static int cap_protected_dump; 153 154 static bool mem_op_storage_key_support; 155 156 static int active_cmma; 157 158 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 159 { 160 struct kvm_device_attr attr = { 161 .group = KVM_S390_VM_MEM_CTRL, 162 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 163 .addr = (uint64_t) memory_limit, 164 }; 165 166 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 167 } 168 169 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 170 { 171 int rc; 172 173 struct kvm_device_attr attr = { 174 .group = KVM_S390_VM_MEM_CTRL, 175 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 176 .addr = (uint64_t) &new_limit, 177 }; 178 179 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 180 return 0; 181 } 182 183 rc = kvm_s390_query_mem_limit(hw_limit); 184 if (rc) { 185 return rc; 186 } else if (*hw_limit < new_limit) { 187 return -E2BIG; 188 } 189 190 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 191 } 192 193 int kvm_s390_cmma_active(void) 194 { 195 return active_cmma; 196 } 197 198 static bool kvm_s390_cmma_available(void) 199 { 200 static bool initialized, value; 201 202 if (!initialized) { 203 initialized = true; 204 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 205 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 206 } 207 return value; 208 } 209 210 void kvm_s390_cmma_reset(void) 211 { 212 int rc; 213 struct kvm_device_attr attr = { 214 .group = KVM_S390_VM_MEM_CTRL, 215 .attr = KVM_S390_VM_MEM_CLR_CMMA, 216 }; 217 218 if (!kvm_s390_cmma_active()) { 219 return; 220 } 221 222 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 223 trace_kvm_clear_cmma(rc); 224 } 225 226 static void kvm_s390_enable_cmma(void) 227 { 228 int rc; 229 struct kvm_device_attr attr = { 230 .group = KVM_S390_VM_MEM_CTRL, 231 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 232 }; 233 234 if (cap_hpage_1m) { 235 warn_report("CMM will not be enabled because it is not " 236 "compatible with huge memory backings."); 237 return; 238 } 239 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 240 active_cmma = !rc; 241 trace_kvm_enable_cmma(rc); 242 } 243 244 static void kvm_s390_set_crypto_attr(uint64_t attr) 245 { 246 struct kvm_device_attr attribute = { 247 .group = KVM_S390_VM_CRYPTO, 248 .attr = attr, 249 }; 250 251 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 252 253 if (ret) { 254 error_report("Failed to set crypto device attribute %lu: %s", 255 attr, strerror(-ret)); 256 } 257 } 258 259 static void kvm_s390_init_aes_kw(void) 260 { 261 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 262 263 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 264 NULL)) { 265 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 266 } 267 268 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 269 kvm_s390_set_crypto_attr(attr); 270 } 271 } 272 273 static void kvm_s390_init_dea_kw(void) 274 { 275 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 276 277 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 278 NULL)) { 279 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 280 } 281 282 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 283 kvm_s390_set_crypto_attr(attr); 284 } 285 } 286 287 void kvm_s390_crypto_reset(void) 288 { 289 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 290 kvm_s390_init_aes_kw(); 291 kvm_s390_init_dea_kw(); 292 } 293 } 294 295 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) 296 { 297 if (pagesize == 4 * KiB) { 298 return; 299 } 300 301 if (pagesize != 1 * MiB) { 302 error_setg(errp, "Memory backing with 2G pages was specified, " 303 "but KVM does not support this memory backing"); 304 return; 305 } 306 307 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { 308 error_setg(errp, "Memory backing with 1M pages was specified, " 309 "but KVM does not support this memory backing"); 310 return; 311 } 312 313 cap_hpage_1m = 1; 314 } 315 316 int kvm_s390_get_hpage_1m(void) 317 { 318 return cap_hpage_1m; 319 } 320 321 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) 322 { 323 MachineClass *mc = MACHINE_CLASS(oc); 324 325 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 326 } 327 328 int kvm_arch_get_default_type(MachineState *ms) 329 { 330 return 0; 331 } 332 333 int kvm_arch_init(MachineState *ms, KVMState *s) 334 { 335 int required_caps[] = { 336 KVM_CAP_DEVICE_CTRL, 337 KVM_CAP_SYNC_REGS, 338 }; 339 340 for (int i = 0; i < ARRAY_SIZE(required_caps); i++) { 341 if (!kvm_check_extension(s, required_caps[i])) { 342 error_report("KVM is missing capability #%d - " 343 "please use kernel 3.15 or newer", required_caps[i]); 344 return -1; 345 } 346 } 347 348 object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, 349 false, NULL); 350 351 if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { 352 error_report("KVM is missing capability KVM_CAP_S390_COW - " 353 "unsupported environment"); 354 return -1; 355 } 356 357 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 358 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 359 cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); 360 mem_op_storage_key_support = cap_mem_op_extension > 0; 361 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 362 cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); 363 cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); 364 cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP); 365 cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP); 366 367 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 368 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 369 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 370 kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0); 371 kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); 372 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 373 cap_ri = 1; 374 } 375 376 /* 377 * The migration interface for ais was introduced with kernel 4.13 378 * but the capability itself had been active since 4.12. As migration 379 * support is considered necessary, we only try to enable this for 380 * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. 381 */ 382 if (kvm_kernel_irqchip_allowed() && 383 kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { 384 kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); 385 } 386 387 kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); 388 return 0; 389 } 390 391 int kvm_arch_irqchip_create(KVMState *s) 392 { 393 return 0; 394 } 395 396 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 397 { 398 return cpu->cpu_index; 399 } 400 401 int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) 402 { 403 return 0; 404 } 405 406 int kvm_arch_init_vcpu(CPUState *cs) 407 { 408 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 409 S390CPU *cpu = S390_CPU(cs); 410 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 411 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); 412 return 0; 413 } 414 415 int kvm_arch_destroy_vcpu(CPUState *cs) 416 { 417 S390CPU *cpu = S390_CPU(cs); 418 419 g_free(cpu->irqstate); 420 cpu->irqstate = NULL; 421 422 return 0; 423 } 424 425 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) 426 { 427 CPUState *cs = CPU(cpu); 428 429 /* 430 * The reset call is needed here to reset in-kernel vcpu data that 431 * we can't access directly from QEMU (i.e. with older kernels 432 * which don't support sync_regs/ONE_REG). Before this ioctl 433 * cpu_synchronize_state() is called in common kvm code 434 * (kvm-all). 435 */ 436 if (kvm_vcpu_ioctl(cs, type)) { 437 error_report("CPU reset failed on CPU %i type %lx", 438 cs->cpu_index, type); 439 } 440 } 441 442 void kvm_s390_reset_vcpu_initial(S390CPU *cpu) 443 { 444 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 445 } 446 447 void kvm_s390_reset_vcpu_clear(S390CPU *cpu) 448 { 449 if (cap_vcpu_resets) { 450 kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); 451 } else { 452 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 453 } 454 } 455 456 void kvm_s390_reset_vcpu_normal(S390CPU *cpu) 457 { 458 if (cap_vcpu_resets) { 459 kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); 460 } 461 } 462 463 static int can_sync_regs(CPUState *cs, int regs) 464 { 465 return (cs->kvm_run->kvm_valid_regs & regs) == regs; 466 } 467 468 #define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \ 469 KVM_SYNC_CRS | KVM_SYNC_PREFIX) 470 471 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) 472 { 473 CPUS390XState *env = cpu_env(cs); 474 struct kvm_fpu fpu = {}; 475 int r; 476 int i; 477 478 g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS)); 479 480 /* always save the PSW and the GPRS*/ 481 cs->kvm_run->psw_addr = env->psw.addr; 482 cs->kvm_run->psw_mask = env->psw.mask; 483 484 memcpy(cs->kvm_run->s.regs.gprs, env->regs, sizeof(cs->kvm_run->s.regs.gprs)); 485 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 486 487 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 488 for (i = 0; i < 32; i++) { 489 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; 490 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; 491 } 492 cs->kvm_run->s.regs.fpc = env->fpc; 493 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 494 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 495 for (i = 0; i < 16; i++) { 496 cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); 497 } 498 cs->kvm_run->s.regs.fpc = env->fpc; 499 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 500 } else { 501 /* Floating point */ 502 for (i = 0; i < 16; i++) { 503 fpu.fprs[i] = *get_freg(env, i); 504 } 505 fpu.fpc = env->fpc; 506 507 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 508 if (r < 0) { 509 return r; 510 } 511 } 512 513 /* Do we need to save more than that? */ 514 if (level == KVM_PUT_RUNTIME_STATE) { 515 return 0; 516 } 517 518 /* 519 * Access registers, control registers and the prefix - these are 520 * always available via kvm_sync_regs in the kernels that we support 521 */ 522 memcpy(cs->kvm_run->s.regs.acrs, env->aregs, sizeof(cs->kvm_run->s.regs.acrs)); 523 memcpy(cs->kvm_run->s.regs.crs, env->cregs, sizeof(cs->kvm_run->s.regs.crs)); 524 cs->kvm_run->s.regs.prefix = env->psa; 525 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_PREFIX; 526 527 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 528 cs->kvm_run->s.regs.cputm = env->cputm; 529 cs->kvm_run->s.regs.ckc = env->ckc; 530 cs->kvm_run->s.regs.todpr = env->todpr; 531 cs->kvm_run->s.regs.gbea = env->gbea; 532 cs->kvm_run->s.regs.pp = env->pp; 533 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 534 } else { 535 /* 536 * These ONE_REGS are not protected by a capability. As they are only 537 * necessary for migration we just trace a possible error, but don't 538 * return with an error return code. 539 */ 540 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 541 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 542 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 543 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 544 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 545 } 546 547 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 548 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 549 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 550 } 551 552 /* pfault parameters */ 553 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 554 cs->kvm_run->s.regs.pft = env->pfault_token; 555 cs->kvm_run->s.regs.pfs = env->pfault_select; 556 cs->kvm_run->s.regs.pfc = env->pfault_compare; 557 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 558 } else if (cap_async_pf) { 559 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 560 if (r < 0) { 561 return r; 562 } 563 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 564 if (r < 0) { 565 return r; 566 } 567 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 568 if (r < 0) { 569 return r; 570 } 571 } 572 573 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 574 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 575 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 576 } 577 578 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 579 cs->kvm_run->s.regs.bpbc = env->bpbc; 580 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; 581 } 582 583 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 584 cs->kvm_run->s.regs.etoken = env->etoken; 585 cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; 586 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; 587 } 588 589 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 590 cs->kvm_run->s.regs.diag318 = env->diag318_info; 591 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 592 } 593 594 return 0; 595 } 596 597 int kvm_arch_get_registers(CPUState *cs, Error **errp) 598 { 599 CPUS390XState *env = cpu_env(cs); 600 struct kvm_fpu fpu; 601 int i, r; 602 603 /* get the PSW */ 604 env->psw.addr = cs->kvm_run->psw_addr; 605 env->psw.mask = cs->kvm_run->psw_mask; 606 607 /* the GPRS, ACRS and CRS */ 608 g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS)); 609 memcpy(env->regs, cs->kvm_run->s.regs.gprs, sizeof(env->regs)); 610 memcpy(env->aregs, cs->kvm_run->s.regs.acrs, sizeof(env->aregs)); 611 memcpy(env->cregs, cs->kvm_run->s.regs.crs, sizeof(env->cregs)); 612 613 /* The prefix */ 614 env->psa = cs->kvm_run->s.regs.prefix; 615 616 /* Floating point and vector registers */ 617 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 618 for (i = 0; i < 32; i++) { 619 env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; 620 env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; 621 } 622 env->fpc = cs->kvm_run->s.regs.fpc; 623 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 624 for (i = 0; i < 16; i++) { 625 *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; 626 } 627 env->fpc = cs->kvm_run->s.regs.fpc; 628 } else { 629 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 630 if (r < 0) { 631 return r; 632 } 633 for (i = 0; i < 16; i++) { 634 *get_freg(env, i) = fpu.fprs[i]; 635 } 636 env->fpc = fpu.fpc; 637 } 638 639 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 640 env->cputm = cs->kvm_run->s.regs.cputm; 641 env->ckc = cs->kvm_run->s.regs.ckc; 642 env->todpr = cs->kvm_run->s.regs.todpr; 643 env->gbea = cs->kvm_run->s.regs.gbea; 644 env->pp = cs->kvm_run->s.regs.pp; 645 } else { 646 /* 647 * These ONE_REGS are not protected by a capability. As they are only 648 * necessary for migration we just trace a possible error, but don't 649 * return with an error return code. 650 */ 651 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 652 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 653 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 654 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 655 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 656 } 657 658 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 659 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 660 } 661 662 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 663 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 664 } 665 666 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 667 env->bpbc = cs->kvm_run->s.regs.bpbc; 668 } 669 670 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 671 env->etoken = cs->kvm_run->s.regs.etoken; 672 env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; 673 } 674 675 /* pfault parameters */ 676 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 677 env->pfault_token = cs->kvm_run->s.regs.pft; 678 env->pfault_select = cs->kvm_run->s.regs.pfs; 679 env->pfault_compare = cs->kvm_run->s.regs.pfc; 680 } else if (cap_async_pf) { 681 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 682 if (r < 0) { 683 return r; 684 } 685 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 686 if (r < 0) { 687 return r; 688 } 689 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 690 if (r < 0) { 691 return r; 692 } 693 } 694 695 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 696 env->diag318_info = cs->kvm_run->s.regs.diag318; 697 } 698 699 return 0; 700 } 701 702 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 703 { 704 int r; 705 struct kvm_device_attr attr = { 706 .group = KVM_S390_VM_TOD, 707 .attr = KVM_S390_VM_TOD_LOW, 708 .addr = (uint64_t)tod_low, 709 }; 710 711 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 712 if (r) { 713 return r; 714 } 715 716 attr.attr = KVM_S390_VM_TOD_HIGH; 717 attr.addr = (uint64_t)tod_high; 718 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 719 } 720 721 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 722 { 723 int r; 724 struct kvm_s390_vm_tod_clock gtod; 725 struct kvm_device_attr attr = { 726 .group = KVM_S390_VM_TOD, 727 .attr = KVM_S390_VM_TOD_EXT, 728 .addr = (uint64_t)>od, 729 }; 730 731 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 732 *tod_high = gtod.epoch_idx; 733 *tod_low = gtod.tod; 734 735 return r; 736 } 737 738 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) 739 { 740 int r; 741 struct kvm_device_attr attr = { 742 .group = KVM_S390_VM_TOD, 743 .attr = KVM_S390_VM_TOD_LOW, 744 .addr = (uint64_t)&tod_low, 745 }; 746 747 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 748 if (r) { 749 return r; 750 } 751 752 attr.attr = KVM_S390_VM_TOD_HIGH; 753 attr.addr = (uint64_t)&tod_high; 754 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 755 } 756 757 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) 758 { 759 struct kvm_s390_vm_tod_clock gtod = { 760 .epoch_idx = tod_high, 761 .tod = tod_low, 762 }; 763 struct kvm_device_attr attr = { 764 .group = KVM_S390_VM_TOD, 765 .attr = KVM_S390_VM_TOD_EXT, 766 .addr = (uint64_t)>od, 767 }; 768 769 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 770 } 771 772 /** 773 * kvm_s390_mem_op: 774 * @addr: the logical start address in guest memory 775 * @ar: the access register number 776 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 777 * @len: length that should be transferred 778 * @is_write: true = write, false = read 779 * Returns: 0 on success, non-zero if an exception or error occurred 780 * 781 * Use KVM ioctl to read/write from/to guest memory. An access exception 782 * is injected into the vCPU in case of translation errors. 783 */ 784 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 785 int len, bool is_write) 786 { 787 struct kvm_s390_mem_op mem_op = { 788 .gaddr = addr, 789 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 790 .size = len, 791 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 792 : KVM_S390_MEMOP_LOGICAL_READ, 793 .buf = (uint64_t)hostbuf, 794 .ar = ar, 795 .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, 796 }; 797 int ret; 798 799 if (!cap_mem_op) { 800 return -ENOSYS; 801 } 802 if (!hostbuf) { 803 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 804 } 805 if (mem_op_storage_key_support) { 806 mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; 807 } 808 809 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 810 if (ret < 0) { 811 warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 812 } 813 return ret; 814 } 815 816 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, 817 int len, bool is_write) 818 { 819 struct kvm_s390_mem_op mem_op = { 820 .sida_offset = offset, 821 .size = len, 822 .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE 823 : KVM_S390_MEMOP_SIDA_READ, 824 .buf = (uint64_t)hostbuf, 825 }; 826 int ret; 827 828 if (!cap_mem_op || !cap_protected) { 829 return -ENOSYS; 830 } 831 832 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 833 if (ret < 0) { 834 error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 835 abort(); 836 } 837 return ret; 838 } 839 840 static uint8_t const *sw_bp_inst; 841 static uint8_t sw_bp_ilen; 842 843 static void determine_sw_breakpoint_instr(void) 844 { 845 /* DIAG 501 is used for sw breakpoints with old kernels */ 846 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 847 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 848 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 849 850 if (sw_bp_inst) { 851 return; 852 } 853 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 854 sw_bp_inst = diag_501; 855 sw_bp_ilen = sizeof(diag_501); 856 trace_kvm_sw_breakpoint(4); 857 } else { 858 sw_bp_inst = instr_0x0000; 859 sw_bp_ilen = sizeof(instr_0x0000); 860 trace_kvm_sw_breakpoint(2); 861 } 862 } 863 864 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 865 { 866 determine_sw_breakpoint_instr(); 867 868 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 869 sw_bp_ilen, 0) || 870 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 871 return -EINVAL; 872 } 873 return 0; 874 } 875 876 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 877 { 878 uint8_t t[MAX_ILEN]; 879 880 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 881 return -EINVAL; 882 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 883 return -EINVAL; 884 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 885 sw_bp_ilen, 1)) { 886 return -EINVAL; 887 } 888 889 return 0; 890 } 891 892 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 893 int len, int type) 894 { 895 int n; 896 897 for (n = 0; n < nb_hw_breakpoints; n++) { 898 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 899 (hw_breakpoints[n].len == len || len == -1)) { 900 return &hw_breakpoints[n]; 901 } 902 } 903 904 return NULL; 905 } 906 907 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 908 { 909 int size; 910 911 if (find_hw_breakpoint(addr, len, type)) { 912 return -EEXIST; 913 } 914 915 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 916 917 if (!hw_breakpoints) { 918 nb_hw_breakpoints = 0; 919 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 920 } else { 921 hw_breakpoints = 922 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 923 } 924 925 if (!hw_breakpoints) { 926 nb_hw_breakpoints = 0; 927 return -ENOMEM; 928 } 929 930 hw_breakpoints[nb_hw_breakpoints].addr = addr; 931 hw_breakpoints[nb_hw_breakpoints].len = len; 932 hw_breakpoints[nb_hw_breakpoints].type = type; 933 934 nb_hw_breakpoints++; 935 936 return 0; 937 } 938 939 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) 940 { 941 switch (type) { 942 case GDB_BREAKPOINT_HW: 943 type = KVM_HW_BP; 944 break; 945 case GDB_WATCHPOINT_WRITE: 946 if (len < 1) { 947 return -EINVAL; 948 } 949 type = KVM_HW_WP_WRITE; 950 break; 951 default: 952 return -ENOSYS; 953 } 954 return insert_hw_breakpoint(addr, len, type); 955 } 956 957 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) 958 { 959 int size; 960 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 961 962 if (bp == NULL) { 963 return -ENOENT; 964 } 965 966 nb_hw_breakpoints--; 967 if (nb_hw_breakpoints > 0) { 968 /* 969 * In order to trim the array, move the last element to the position to 970 * be removed - if necessary. 971 */ 972 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 973 *bp = hw_breakpoints[nb_hw_breakpoints]; 974 } 975 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 976 hw_breakpoints = 977 g_realloc(hw_breakpoints, size); 978 } else { 979 g_free(hw_breakpoints); 980 hw_breakpoints = NULL; 981 } 982 983 return 0; 984 } 985 986 void kvm_arch_remove_all_hw_breakpoints(void) 987 { 988 nb_hw_breakpoints = 0; 989 g_free(hw_breakpoints); 990 hw_breakpoints = NULL; 991 } 992 993 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 994 { 995 int i; 996 997 if (nb_hw_breakpoints > 0) { 998 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 999 dbg->arch.hw_bp = hw_breakpoints; 1000 1001 for (i = 0; i < nb_hw_breakpoints; ++i) { 1002 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 1003 hw_breakpoints[i].addr); 1004 } 1005 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 1006 } else { 1007 dbg->arch.nr_hw_bp = 0; 1008 dbg->arch.hw_bp = NULL; 1009 } 1010 } 1011 1012 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 1013 { 1014 } 1015 1016 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1017 { 1018 return MEMTXATTRS_UNSPECIFIED; 1019 } 1020 1021 int kvm_arch_process_async_events(CPUState *cs) 1022 { 1023 return cs->halted; 1024 } 1025 1026 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 1027 struct kvm_s390_interrupt *interrupt) 1028 { 1029 int r = 0; 1030 1031 interrupt->type = irq->type; 1032 switch (irq->type) { 1033 case KVM_S390_INT_VIRTIO: 1034 interrupt->parm = irq->u.ext.ext_params; 1035 /* fall through */ 1036 case KVM_S390_INT_PFAULT_INIT: 1037 case KVM_S390_INT_PFAULT_DONE: 1038 interrupt->parm64 = irq->u.ext.ext_params2; 1039 break; 1040 case KVM_S390_PROGRAM_INT: 1041 interrupt->parm = irq->u.pgm.code; 1042 break; 1043 case KVM_S390_SIGP_SET_PREFIX: 1044 interrupt->parm = irq->u.prefix.address; 1045 break; 1046 case KVM_S390_INT_SERVICE: 1047 interrupt->parm = irq->u.ext.ext_params; 1048 break; 1049 case KVM_S390_MCHK: 1050 interrupt->parm = irq->u.mchk.cr14; 1051 interrupt->parm64 = irq->u.mchk.mcic; 1052 break; 1053 case KVM_S390_INT_EXTERNAL_CALL: 1054 interrupt->parm = irq->u.extcall.code; 1055 break; 1056 case KVM_S390_INT_EMERGENCY: 1057 interrupt->parm = irq->u.emerg.code; 1058 break; 1059 case KVM_S390_SIGP_STOP: 1060 case KVM_S390_RESTART: 1061 break; /* These types have no parameters */ 1062 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1063 interrupt->parm = irq->u.io.subchannel_id << 16; 1064 interrupt->parm |= irq->u.io.subchannel_nr; 1065 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 1066 interrupt->parm64 |= irq->u.io.io_int_word; 1067 break; 1068 default: 1069 r = -EINVAL; 1070 break; 1071 } 1072 return r; 1073 } 1074 1075 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 1076 { 1077 struct kvm_s390_interrupt kvmint = {}; 1078 int r; 1079 1080 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1081 if (r < 0) { 1082 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1083 exit(1); 1084 } 1085 1086 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1087 if (r < 0) { 1088 fprintf(stderr, "KVM failed to inject interrupt\n"); 1089 exit(1); 1090 } 1091 } 1092 1093 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1094 { 1095 CPUState *cs = CPU(cpu); 1096 int r; 1097 1098 if (cap_s390_irq) { 1099 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1100 if (!r) { 1101 return; 1102 } 1103 error_report("KVM failed to inject interrupt %llx", irq->type); 1104 exit(1); 1105 } 1106 1107 inject_vcpu_irq_legacy(cs, irq); 1108 } 1109 1110 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) 1111 { 1112 struct kvm_s390_interrupt kvmint = {}; 1113 int r; 1114 1115 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1116 if (r < 0) { 1117 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1118 exit(1); 1119 } 1120 1121 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1122 if (r < 0) { 1123 fprintf(stderr, "KVM failed to inject interrupt\n"); 1124 exit(1); 1125 } 1126 } 1127 1128 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1129 { 1130 struct kvm_s390_irq irq = { 1131 .type = KVM_S390_PROGRAM_INT, 1132 .u.pgm.code = code, 1133 }; 1134 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", 1135 cpu->env.psw.addr); 1136 kvm_s390_vcpu_interrupt(cpu, &irq); 1137 } 1138 1139 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1140 { 1141 struct kvm_s390_irq irq = { 1142 .type = KVM_S390_PROGRAM_INT, 1143 .u.pgm.code = code, 1144 .u.pgm.trans_exc_code = te_code, 1145 .u.pgm.exc_access_id = te_code & 3, 1146 }; 1147 1148 kvm_s390_vcpu_interrupt(cpu, &irq); 1149 } 1150 1151 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1152 uint16_t ipbh0) 1153 { 1154 CPUS390XState *env = &cpu->env; 1155 uint64_t sccb; 1156 uint32_t code; 1157 int r; 1158 1159 sccb = env->regs[ipbh0 & 0xf]; 1160 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1161 1162 switch (run->s390_sieic.icptcode) { 1163 case ICPT_PV_INSTR_NOTIFICATION: 1164 g_assert(s390_is_pv()); 1165 /* The notification intercepts are currently handled by KVM */ 1166 error_report("unexpected SCLP PV notification"); 1167 exit(1); 1168 break; 1169 case ICPT_PV_INSTR: 1170 g_assert(s390_is_pv()); 1171 sclp_service_call_protected(cpu, sccb, code); 1172 /* Setting the CC is done by the Ultravisor. */ 1173 break; 1174 case ICPT_INSTRUCTION: 1175 g_assert(!s390_is_pv()); 1176 r = sclp_service_call(cpu, sccb, code); 1177 if (r < 0) { 1178 kvm_s390_program_interrupt(cpu, -r); 1179 return; 1180 } 1181 setcc(cpu, r); 1182 } 1183 } 1184 1185 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1186 { 1187 CPUS390XState *env = &cpu->env; 1188 int rc = 0; 1189 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1190 1191 switch (ipa1) { 1192 case PRIV_B2_XSCH: 1193 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1194 break; 1195 case PRIV_B2_CSCH: 1196 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1197 break; 1198 case PRIV_B2_HSCH: 1199 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1200 break; 1201 case PRIV_B2_MSCH: 1202 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1203 break; 1204 case PRIV_B2_SSCH: 1205 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1206 break; 1207 case PRIV_B2_STCRW: 1208 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1209 break; 1210 case PRIV_B2_STSCH: 1211 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1212 break; 1213 case PRIV_B2_TSCH: 1214 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1215 fprintf(stderr, "Spurious tsch intercept\n"); 1216 break; 1217 case PRIV_B2_CHSC: 1218 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1219 break; 1220 case PRIV_B2_TPI: 1221 /* This should have been handled by kvm already. */ 1222 fprintf(stderr, "Spurious tpi intercept\n"); 1223 break; 1224 case PRIV_B2_SCHM: 1225 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1226 run->s390_sieic.ipb, RA_IGNORED); 1227 break; 1228 case PRIV_B2_RSCH: 1229 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1230 break; 1231 case PRIV_B2_RCHP: 1232 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1233 break; 1234 case PRIV_B2_STCPS: 1235 /* We do not provide this instruction, it is suppressed. */ 1236 break; 1237 case PRIV_B2_SAL: 1238 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1239 break; 1240 case PRIV_B2_SIGA: 1241 /* Not provided, set CC = 3 for subchannel not operational */ 1242 setcc(cpu, 3); 1243 break; 1244 case PRIV_B2_SCLP_CALL: 1245 kvm_sclp_service_call(cpu, run, ipbh0); 1246 break; 1247 default: 1248 rc = -1; 1249 trace_kvm_insn_unhandled_priv(ipa1); 1250 break; 1251 } 1252 1253 return rc; 1254 } 1255 1256 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1257 uint8_t *ar) 1258 { 1259 CPUS390XState *env = &cpu->env; 1260 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1261 uint32_t base2 = run->s390_sieic.ipb >> 28; 1262 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1263 ((run->s390_sieic.ipb & 0xff00) << 4); 1264 1265 if (disp2 & 0x80000) { 1266 disp2 += 0xfff00000; 1267 } 1268 if (ar) { 1269 *ar = base2; 1270 } 1271 1272 return (base2 ? env->regs[base2] : 0) + 1273 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1274 } 1275 1276 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1277 uint8_t *ar) 1278 { 1279 CPUS390XState *env = &cpu->env; 1280 uint32_t base2 = run->s390_sieic.ipb >> 28; 1281 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1282 ((run->s390_sieic.ipb & 0xff00) << 4); 1283 1284 if (disp2 & 0x80000) { 1285 disp2 += 0xfff00000; 1286 } 1287 if (ar) { 1288 *ar = base2; 1289 } 1290 1291 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1292 } 1293 1294 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1295 { 1296 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1297 1298 if (s390_has_feat(S390_FEAT_ZPCI)) { 1299 return clp_service_call(cpu, r2, RA_IGNORED); 1300 } else { 1301 return -1; 1302 } 1303 } 1304 1305 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1306 { 1307 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1308 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1309 1310 if (s390_has_feat(S390_FEAT_ZPCI)) { 1311 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1312 } else { 1313 return -1; 1314 } 1315 } 1316 1317 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1318 { 1319 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1320 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1321 1322 if (s390_has_feat(S390_FEAT_ZPCI)) { 1323 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1324 } else { 1325 return -1; 1326 } 1327 } 1328 1329 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1330 { 1331 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1332 uint64_t fiba; 1333 uint8_t ar; 1334 1335 if (s390_has_feat(S390_FEAT_ZPCI)) { 1336 fiba = get_base_disp_rxy(cpu, run, &ar); 1337 1338 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1339 } else { 1340 return -1; 1341 } 1342 } 1343 1344 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1345 { 1346 CPUS390XState *env = &cpu->env; 1347 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1348 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1349 uint8_t isc; 1350 uint16_t mode; 1351 int r; 1352 1353 mode = env->regs[r1] & 0xffff; 1354 isc = (env->regs[r3] >> 27) & 0x7; 1355 r = css_do_sic(cpu, isc, mode); 1356 if (r) { 1357 kvm_s390_program_interrupt(cpu, -r); 1358 } 1359 1360 return 0; 1361 } 1362 1363 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1364 { 1365 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1366 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1367 1368 if (s390_has_feat(S390_FEAT_ZPCI)) { 1369 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1370 } else { 1371 return -1; 1372 } 1373 } 1374 1375 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1376 { 1377 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1378 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1379 uint64_t gaddr; 1380 uint8_t ar; 1381 1382 if (s390_has_feat(S390_FEAT_ZPCI)) { 1383 gaddr = get_base_disp_rsy(cpu, run, &ar); 1384 1385 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1386 } else { 1387 return -1; 1388 } 1389 } 1390 1391 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1392 { 1393 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1394 uint64_t fiba; 1395 uint8_t ar; 1396 1397 if (s390_has_feat(S390_FEAT_ZPCI)) { 1398 fiba = get_base_disp_rxy(cpu, run, &ar); 1399 1400 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1401 } else { 1402 return -1; 1403 } 1404 } 1405 1406 static void kvm_handle_ptf(S390CPU *cpu, struct kvm_run *run) 1407 { 1408 uint8_t r1 = (run->s390_sieic.ipb >> 20) & 0x0f; 1409 1410 s390_handle_ptf(cpu, r1, RA_IGNORED); 1411 } 1412 1413 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1414 { 1415 int r = 0; 1416 1417 switch (ipa1) { 1418 case PRIV_B9_CLP: 1419 r = kvm_clp_service_call(cpu, run); 1420 break; 1421 case PRIV_B9_PCISTG: 1422 r = kvm_pcistg_service_call(cpu, run); 1423 break; 1424 case PRIV_B9_PCILG: 1425 r = kvm_pcilg_service_call(cpu, run); 1426 break; 1427 case PRIV_B9_RPCIT: 1428 r = kvm_rpcit_service_call(cpu, run); 1429 break; 1430 case PRIV_B9_PTF: 1431 kvm_handle_ptf(cpu, run); 1432 break; 1433 case PRIV_B9_EQBS: 1434 /* just inject exception */ 1435 r = -1; 1436 break; 1437 default: 1438 r = -1; 1439 trace_kvm_insn_unhandled_priv(ipa1); 1440 break; 1441 } 1442 1443 return r; 1444 } 1445 1446 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1447 { 1448 int r = 0; 1449 1450 switch (ipbl) { 1451 case PRIV_EB_PCISTB: 1452 r = kvm_pcistb_service_call(cpu, run); 1453 break; 1454 case PRIV_EB_SIC: 1455 r = kvm_sic_service_call(cpu, run); 1456 break; 1457 case PRIV_EB_SQBS: 1458 /* just inject exception */ 1459 r = -1; 1460 break; 1461 default: 1462 r = -1; 1463 trace_kvm_insn_unhandled_priv(ipbl); 1464 break; 1465 } 1466 1467 return r; 1468 } 1469 1470 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1471 { 1472 int r = 0; 1473 1474 switch (ipbl) { 1475 case PRIV_E3_MPCIFC: 1476 r = kvm_mpcifc_service_call(cpu, run); 1477 break; 1478 case PRIV_E3_STPCIFC: 1479 r = kvm_stpcifc_service_call(cpu, run); 1480 break; 1481 default: 1482 r = -1; 1483 trace_kvm_insn_unhandled_priv(ipbl); 1484 break; 1485 } 1486 1487 return r; 1488 } 1489 1490 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1491 { 1492 uint64_t r1, r3; 1493 int rc; 1494 1495 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1496 r3 = run->s390_sieic.ipa & 0x000f; 1497 rc = handle_diag_288(&cpu->env, r1, r3); 1498 if (rc) { 1499 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1500 } 1501 } 1502 1503 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1504 { 1505 uint64_t r1, r3; 1506 1507 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1508 r3 = run->s390_sieic.ipa & 0x000f; 1509 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1510 } 1511 1512 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1513 { 1514 CPUS390XState *env = &cpu->env; 1515 unsigned long pc; 1516 1517 pc = env->psw.addr - sw_bp_ilen; 1518 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1519 env->psw.addr = pc; 1520 return EXCP_DEBUG; 1521 } 1522 1523 return -ENOENT; 1524 } 1525 1526 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) 1527 { 1528 CPUS390XState *env = &S390_CPU(cs)->env; 1529 1530 /* Feat bit is set only if KVM supports sync for diag318 */ 1531 if (s390_has_feat(S390_FEAT_DIAG_318)) { 1532 env->diag318_info = diag318_info; 1533 cs->kvm_run->s.regs.diag318 = diag318_info; 1534 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 1535 /* 1536 * diag 318 info is zeroed during a clear reset and 1537 * diag 308 IPL subcodes. 1538 */ 1539 } 1540 } 1541 1542 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) 1543 { 1544 uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; 1545 uint64_t diag318_info = run->s.regs.gprs[reg]; 1546 CPUState *t; 1547 1548 /* 1549 * DIAG 318 can only be enabled with KVM support. As such, let's 1550 * ensure a guest cannot execute this instruction erroneously. 1551 */ 1552 if (!s390_has_feat(S390_FEAT_DIAG_318)) { 1553 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1554 return; 1555 } 1556 1557 CPU_FOREACH(t) { 1558 run_on_cpu(t, s390_do_cpu_set_diag318, 1559 RUN_ON_CPU_HOST_ULONG(diag318_info)); 1560 } 1561 } 1562 1563 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1564 1565 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1566 { 1567 int r = 0; 1568 uint16_t func_code; 1569 1570 /* 1571 * For any diagnose call we support, bits 48-63 of the resulting 1572 * address specify the function code; the remainder is ignored. 1573 */ 1574 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1575 switch (func_code) { 1576 case DIAG_TIMEREVENT: 1577 kvm_handle_diag_288(cpu, run); 1578 break; 1579 case DIAG_IPL: 1580 kvm_handle_diag_308(cpu, run); 1581 break; 1582 case DIAG_SET_CONTROL_PROGRAM_CODES: 1583 handle_diag_318(cpu, run); 1584 break; 1585 #ifdef CONFIG_S390_CCW_VIRTIO 1586 case DIAG_KVM_HYPERCALL: 1587 handle_diag_500(cpu, RA_IGNORED); 1588 break; 1589 #endif /* CONFIG_S390_CCW_VIRTIO */ 1590 case DIAG_KVM_BREAKPOINT: 1591 r = handle_sw_breakpoint(cpu, run); 1592 break; 1593 default: 1594 trace_kvm_insn_diag(func_code); 1595 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1596 break; 1597 } 1598 1599 return r; 1600 } 1601 1602 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1603 { 1604 CPUS390XState *env = &cpu->env; 1605 const uint8_t r1 = ipa1 >> 4; 1606 const uint8_t r3 = ipa1 & 0x0f; 1607 int ret; 1608 uint8_t order; 1609 1610 /* get order code */ 1611 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1612 1613 ret = handle_sigp(env, order, r1, r3); 1614 setcc(cpu, ret); 1615 return 0; 1616 } 1617 1618 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1619 { 1620 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1621 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1622 int r = -1; 1623 1624 trace_kvm_insn(run->s390_sieic.ipa, run->s390_sieic.ipb); 1625 switch (ipa0) { 1626 case IPA0_B2: 1627 r = handle_b2(cpu, run, ipa1); 1628 break; 1629 case IPA0_B9: 1630 r = handle_b9(cpu, run, ipa1); 1631 break; 1632 case IPA0_EB: 1633 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1634 break; 1635 case IPA0_E3: 1636 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1637 break; 1638 case IPA0_DIAG: 1639 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1640 break; 1641 case IPA0_SIGP: 1642 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1643 break; 1644 } 1645 1646 if (r < 0) { 1647 r = 0; 1648 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1649 } 1650 1651 return r; 1652 } 1653 1654 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, 1655 int pswoffset) 1656 { 1657 CPUState *cs = CPU(cpu); 1658 1659 s390_cpu_halt(cpu); 1660 cpu->env.crash_reason = reason; 1661 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 1662 } 1663 1664 /* try to detect pgm check loops */ 1665 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1666 { 1667 CPUState *cs = CPU(cpu); 1668 PSW oldpsw, newpsw; 1669 1670 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1671 offsetof(LowCore, program_new_psw)); 1672 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1673 offsetof(LowCore, program_new_psw) + 8); 1674 oldpsw.mask = run->psw_mask; 1675 oldpsw.addr = run->psw_addr; 1676 /* 1677 * Avoid endless loops of operation exceptions, if the pgm new 1678 * PSW will cause a new operation exception. 1679 * The heuristic checks if the pgm new psw is within 6 bytes before 1680 * the faulting psw address (with same DAT, AS settings) and the 1681 * new psw is not a wait psw and the fault was not triggered by 1682 * problem state. In that case go into crashed state. 1683 */ 1684 1685 if (oldpsw.addr - newpsw.addr <= 6 && 1686 !(newpsw.mask & PSW_MASK_WAIT) && 1687 !(oldpsw.mask & PSW_MASK_PSTATE) && 1688 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1689 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1690 unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, 1691 offsetof(LowCore, program_new_psw)); 1692 return EXCP_HALTED; 1693 } 1694 return 0; 1695 } 1696 1697 static int handle_intercept(S390CPU *cpu) 1698 { 1699 CPUState *cs = CPU(cpu); 1700 struct kvm_run *run = cs->kvm_run; 1701 int icpt_code = run->s390_sieic.icptcode; 1702 int r = 0; 1703 1704 trace_kvm_intercept(icpt_code, (long)run->psw_addr); 1705 switch (icpt_code) { 1706 case ICPT_INSTRUCTION: 1707 case ICPT_PV_INSTR: 1708 case ICPT_PV_INSTR_NOTIFICATION: 1709 r = handle_instruction(cpu, run); 1710 break; 1711 case ICPT_PROGRAM: 1712 unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, 1713 offsetof(LowCore, program_new_psw)); 1714 r = EXCP_HALTED; 1715 break; 1716 case ICPT_EXT_INT: 1717 unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, 1718 offsetof(LowCore, external_new_psw)); 1719 r = EXCP_HALTED; 1720 break; 1721 case ICPT_WAITPSW: 1722 /* disabled wait, since enabled wait is handled in kernel */ 1723 s390_handle_wait(cpu); 1724 r = EXCP_HALTED; 1725 break; 1726 case ICPT_CPU_STOP: 1727 do_stop_interrupt(&cpu->env); 1728 r = EXCP_HALTED; 1729 break; 1730 case ICPT_OPEREXC: 1731 /* check for break points */ 1732 r = handle_sw_breakpoint(cpu, run); 1733 if (r == -ENOENT) { 1734 /* Then check for potential pgm check loops */ 1735 r = handle_oper_loop(cpu, run); 1736 if (r == 0) { 1737 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1738 } 1739 } 1740 break; 1741 case ICPT_SOFT_INTERCEPT: 1742 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1743 exit(1); 1744 break; 1745 case ICPT_IO: 1746 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1747 exit(1); 1748 break; 1749 default: 1750 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1751 exit(1); 1752 break; 1753 } 1754 1755 return r; 1756 } 1757 1758 static int handle_tsch(S390CPU *cpu) 1759 { 1760 CPUState *cs = CPU(cpu); 1761 struct kvm_run *run = cs->kvm_run; 1762 int ret; 1763 1764 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1765 RA_IGNORED); 1766 if (ret < 0) { 1767 /* 1768 * Failure. 1769 * If an I/O interrupt had been dequeued, we have to reinject it. 1770 */ 1771 if (run->s390_tsch.dequeued) { 1772 s390_io_interrupt(run->s390_tsch.subchannel_id, 1773 run->s390_tsch.subchannel_nr, 1774 run->s390_tsch.io_int_parm, 1775 run->s390_tsch.io_int_word); 1776 } 1777 ret = 0; 1778 } 1779 return ret; 1780 } 1781 1782 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1783 { 1784 const MachineState *ms = MACHINE(qdev_get_machine()); 1785 uint16_t conf_cpus = 0, reserved_cpus = 0; 1786 SysIB_322 sysib; 1787 int del, i; 1788 1789 if (s390_is_pv()) { 1790 s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); 1791 } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1792 return; 1793 } 1794 /* Shift the stack of Extended Names to prepare for our own data */ 1795 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1796 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1797 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1798 * assumed it's not capable of managing Extended Names for lower levels. 1799 */ 1800 for (del = 1; del < sysib.count; del++) { 1801 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1802 break; 1803 } 1804 } 1805 if (del < sysib.count) { 1806 memset(sysib.ext_names[del], 0, 1807 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1808 } 1809 1810 /* count the cpus and split them into configured and reserved ones */ 1811 for (i = 0; i < ms->possible_cpus->len; i++) { 1812 if (ms->possible_cpus->cpus[i].cpu) { 1813 conf_cpus++; 1814 } else { 1815 reserved_cpus++; 1816 } 1817 } 1818 sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; 1819 sysib.vm[0].conf_cpus = conf_cpus; 1820 sysib.vm[0].reserved_cpus = reserved_cpus; 1821 1822 /* Insert short machine name in EBCDIC, padded with blanks */ 1823 if (qemu_name) { 1824 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1825 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1826 strlen(qemu_name))); 1827 } 1828 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1829 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1830 * considered by s390 as not capable of providing any Extended Name. 1831 * Therefore if no name was specified on qemu invocation, we go with the 1832 * same "KVMguest" default, which KVM has filled into short name field. 1833 */ 1834 strpadcpy((char *)sysib.ext_names[0], 1835 sizeof(sysib.ext_names[0]), 1836 qemu_name ?: "KVMguest", '\0'); 1837 1838 /* Insert UUID */ 1839 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1840 1841 if (s390_is_pv()) { 1842 s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); 1843 } else { 1844 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1845 } 1846 } 1847 1848 static int handle_stsi(S390CPU *cpu) 1849 { 1850 CPUState *cs = CPU(cpu); 1851 struct kvm_run *run = cs->kvm_run; 1852 1853 switch (run->s390_stsi.fc) { 1854 case 3: 1855 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1856 return 0; 1857 } 1858 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1859 return 0; 1860 case 15: 1861 insert_stsi_15_1_x(cpu, run->s390_stsi.sel2, run->s390_stsi.addr, 1862 run->s390_stsi.ar, RA_IGNORED); 1863 return 0; 1864 default: 1865 return 0; 1866 } 1867 } 1868 1869 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1870 { 1871 CPUState *cs = CPU(cpu); 1872 struct kvm_run *run = cs->kvm_run; 1873 1874 int ret = 0; 1875 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1876 1877 switch (arch_info->type) { 1878 case KVM_HW_WP_WRITE: 1879 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1880 cs->watchpoint_hit = &hw_watchpoint; 1881 hw_watchpoint.vaddr = arch_info->addr; 1882 hw_watchpoint.flags = BP_MEM_WRITE; 1883 ret = EXCP_DEBUG; 1884 } 1885 break; 1886 case KVM_HW_BP: 1887 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1888 ret = EXCP_DEBUG; 1889 } 1890 break; 1891 case KVM_SINGLESTEP: 1892 if (cs->singlestep_enabled) { 1893 ret = EXCP_DEBUG; 1894 } 1895 break; 1896 default: 1897 ret = -ENOSYS; 1898 } 1899 1900 return ret; 1901 } 1902 1903 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1904 { 1905 S390CPU *cpu = S390_CPU(cs); 1906 int ret = 0; 1907 1908 bql_lock(); 1909 1910 kvm_cpu_synchronize_state(cs); 1911 1912 switch (run->exit_reason) { 1913 case KVM_EXIT_S390_SIEIC: 1914 ret = handle_intercept(cpu); 1915 break; 1916 case KVM_EXIT_S390_RESET: 1917 s390_ipl_reset_request(cs, S390_RESET_REIPL); 1918 break; 1919 case KVM_EXIT_S390_TSCH: 1920 ret = handle_tsch(cpu); 1921 break; 1922 case KVM_EXIT_S390_STSI: 1923 ret = handle_stsi(cpu); 1924 break; 1925 case KVM_EXIT_DEBUG: 1926 ret = kvm_arch_handle_debug_exit(cpu); 1927 break; 1928 default: 1929 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1930 break; 1931 } 1932 bql_unlock(); 1933 1934 if (ret == 0) { 1935 ret = EXCP_INTERRUPT; 1936 } 1937 return ret; 1938 } 1939 1940 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 1941 { 1942 return true; 1943 } 1944 1945 void kvm_s390_enable_css_support(S390CPU *cpu) 1946 { 1947 int r; 1948 1949 /* Activate host kernel channel subsystem support. */ 1950 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 1951 assert(r == 0); 1952 } 1953 1954 void kvm_arch_init_irq_routing(KVMState *s) 1955 { 1956 /* 1957 * Note that while irqchip capabilities generally imply that cpustates 1958 * are handled in-kernel, it is not true for s390 (yet); therefore, we 1959 * have to override the common code kvm_halt_in_kernel_allowed setting. 1960 */ 1961 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 1962 kvm_gsi_routing_allowed = true; 1963 kvm_halt_in_kernel_allowed = false; 1964 } 1965 } 1966 1967 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 1968 int vq, bool assign) 1969 { 1970 struct kvm_ioeventfd kick = { 1971 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 1972 KVM_IOEVENTFD_FLAG_DATAMATCH, 1973 .fd = event_notifier_get_fd(notifier), 1974 .datamatch = vq, 1975 .addr = sch, 1976 .len = 8, 1977 }; 1978 trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, 1979 kick.datamatch); 1980 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 1981 return -ENOSYS; 1982 } 1983 if (!assign) { 1984 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1985 } 1986 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 1987 } 1988 1989 int kvm_s390_get_protected_dump(void) 1990 { 1991 return cap_protected_dump; 1992 } 1993 1994 int kvm_s390_get_ri(void) 1995 { 1996 return cap_ri; 1997 } 1998 1999 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2000 { 2001 struct kvm_mp_state mp_state = {}; 2002 int ret; 2003 2004 /* the kvm part might not have been initialized yet */ 2005 if (CPU(cpu)->kvm_state == NULL) { 2006 return 0; 2007 } 2008 2009 switch (cpu_state) { 2010 case S390_CPU_STATE_STOPPED: 2011 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2012 break; 2013 case S390_CPU_STATE_CHECK_STOP: 2014 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2015 break; 2016 case S390_CPU_STATE_OPERATING: 2017 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2018 break; 2019 case S390_CPU_STATE_LOAD: 2020 mp_state.mp_state = KVM_MP_STATE_LOAD; 2021 break; 2022 default: 2023 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2024 cpu_state); 2025 exit(1); 2026 } 2027 2028 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2029 if (ret) { 2030 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2031 strerror(-ret)); 2032 } 2033 2034 return ret; 2035 } 2036 2037 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2038 { 2039 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 2040 struct kvm_s390_irq_state irq_state = { 2041 .buf = (uint64_t) cpu->irqstate, 2042 .len = VCPU_IRQ_BUF_SIZE(max_cpus), 2043 }; 2044 CPUState *cs = CPU(cpu); 2045 int32_t bytes; 2046 2047 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2048 return; 2049 } 2050 2051 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2052 if (bytes < 0) { 2053 cpu->irqstate_saved_size = 0; 2054 error_report("Migration of interrupt state failed"); 2055 return; 2056 } 2057 2058 cpu->irqstate_saved_size = bytes; 2059 } 2060 2061 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2062 { 2063 CPUState *cs = CPU(cpu); 2064 struct kvm_s390_irq_state irq_state = { 2065 .buf = (uint64_t) cpu->irqstate, 2066 .len = cpu->irqstate_saved_size, 2067 }; 2068 int r; 2069 2070 if (cpu->irqstate_saved_size == 0) { 2071 return 0; 2072 } 2073 2074 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2075 return -ENOSYS; 2076 } 2077 2078 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2079 if (r) { 2080 error_report("Setting interrupt state failed %d", r); 2081 } 2082 return r; 2083 } 2084 2085 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2086 uint64_t address, uint32_t data, PCIDevice *dev) 2087 { 2088 S390PCIBusDevice *pbdev; 2089 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2090 2091 if (!dev) { 2092 trace_kvm_msi_route_fixup("no pci device"); 2093 return -ENODEV; 2094 } 2095 2096 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2097 if (!pbdev) { 2098 trace_kvm_msi_route_fixup("no zpci device"); 2099 return -ENODEV; 2100 } 2101 2102 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2103 route->flags = 0; 2104 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2105 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2106 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2107 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2108 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2109 return 0; 2110 } 2111 2112 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2113 int vector, PCIDevice *dev) 2114 { 2115 return 0; 2116 } 2117 2118 int kvm_arch_release_virq_post(int virq) 2119 { 2120 return 0; 2121 } 2122 2123 int kvm_arch_msi_data_to_gsi(uint32_t data) 2124 { 2125 abort(); 2126 } 2127 2128 static int query_cpu_subfunc(S390FeatBitmap features) 2129 { 2130 struct kvm_s390_vm_cpu_subfunc prop = {}; 2131 struct kvm_device_attr attr = { 2132 .group = KVM_S390_VM_CPU_MODEL, 2133 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2134 .addr = (uint64_t) &prop, 2135 }; 2136 int rc; 2137 2138 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2139 if (rc) { 2140 return rc; 2141 } 2142 2143 /* 2144 * We're going to add all subfunctions now, if the corresponding feature 2145 * is available that unlocks the query functions. 2146 */ 2147 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2148 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2149 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2150 } 2151 if (test_bit(S390_FEAT_MSA, features)) { 2152 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2153 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2154 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2155 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2156 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2157 } 2158 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2159 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2160 } 2161 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2162 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2163 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2164 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2165 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2166 } 2167 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2168 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2169 } 2170 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2171 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2172 } 2173 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2174 s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2175 } 2176 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2177 s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2178 } 2179 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2180 s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2181 } 2182 if (test_bit(S390_FEAT_CCF_BASE, features)) { 2183 s390_add_from_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr); 2184 } 2185 return 0; 2186 } 2187 2188 static int configure_cpu_subfunc(const S390FeatBitmap features) 2189 { 2190 struct kvm_s390_vm_cpu_subfunc prop = {}; 2191 struct kvm_device_attr attr = { 2192 .group = KVM_S390_VM_CPU_MODEL, 2193 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2194 .addr = (uint64_t) &prop, 2195 }; 2196 2197 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2198 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2199 /* hardware support might be missing, IBC will handle most of this */ 2200 return 0; 2201 } 2202 2203 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2204 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2205 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2206 } 2207 if (test_bit(S390_FEAT_MSA, features)) { 2208 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2209 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2210 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2211 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2212 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2213 } 2214 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2215 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2216 } 2217 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2218 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2219 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2220 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2221 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2222 } 2223 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2224 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2225 } 2226 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2227 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2228 } 2229 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2230 s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2231 } 2232 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2233 s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2234 } 2235 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2236 s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2237 } 2238 if (test_bit(S390_FEAT_CCF_BASE, features)) { 2239 s390_fill_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr); 2240 } 2241 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2242 } 2243 2244 static bool ap_available(void) 2245 { 2246 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, 2247 KVM_S390_VM_CRYPTO_ENABLE_APIE); 2248 } 2249 2250 static bool ap_enabled(const S390FeatBitmap features) 2251 { 2252 return test_bit(S390_FEAT_AP, features); 2253 } 2254 2255 static bool uv_feat_supported(void) 2256 { 2257 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2258 KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST); 2259 } 2260 2261 static int query_uv_feat_guest(S390FeatBitmap features) 2262 { 2263 struct kvm_s390_vm_cpu_uv_feat prop = {}; 2264 struct kvm_device_attr attr = { 2265 .group = KVM_S390_VM_CPU_MODEL, 2266 .attr = KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST, 2267 .addr = (uint64_t) &prop, 2268 }; 2269 int rc; 2270 2271 /* AP support check is currently the only user of the UV feature test */ 2272 if (!(uv_feat_supported() && ap_available())) { 2273 return 0; 2274 } 2275 2276 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2277 if (rc) { 2278 return rc; 2279 } 2280 2281 if (prop.ap) { 2282 set_bit(S390_FEAT_UV_FEAT_AP, features); 2283 } 2284 if (prop.ap_intr) { 2285 set_bit(S390_FEAT_UV_FEAT_AP_INTR, features); 2286 } 2287 2288 return 0; 2289 } 2290 2291 static int kvm_to_feat[][2] = { 2292 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2293 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2294 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2295 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2296 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2297 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2298 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2299 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2300 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2301 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2302 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2303 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2304 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2305 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2306 }; 2307 2308 static int query_cpu_feat(S390FeatBitmap features) 2309 { 2310 struct kvm_s390_vm_cpu_feat prop = {}; 2311 struct kvm_device_attr attr = { 2312 .group = KVM_S390_VM_CPU_MODEL, 2313 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2314 .addr = (uint64_t) &prop, 2315 }; 2316 int rc; 2317 int i; 2318 2319 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2320 if (rc) { 2321 return rc; 2322 } 2323 2324 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2325 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2326 set_bit(kvm_to_feat[i][1], features); 2327 } 2328 } 2329 return 0; 2330 } 2331 2332 static int configure_cpu_feat(const S390FeatBitmap features) 2333 { 2334 struct kvm_s390_vm_cpu_feat prop = {}; 2335 struct kvm_device_attr attr = { 2336 .group = KVM_S390_VM_CPU_MODEL, 2337 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2338 .addr = (uint64_t) &prop, 2339 }; 2340 int i; 2341 2342 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2343 if (test_bit(kvm_to_feat[i][1], features)) { 2344 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2345 } 2346 } 2347 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2348 } 2349 2350 bool kvm_s390_cpu_models_supported(void) 2351 { 2352 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2353 KVM_S390_VM_CPU_MACHINE) && 2354 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2355 KVM_S390_VM_CPU_PROCESSOR) && 2356 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2357 KVM_S390_VM_CPU_MACHINE_FEAT) && 2358 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2359 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2360 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2361 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2362 } 2363 2364 bool kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2365 { 2366 struct kvm_s390_vm_cpu_machine prop = {}; 2367 struct kvm_device_attr attr = { 2368 .group = KVM_S390_VM_CPU_MODEL, 2369 .attr = KVM_S390_VM_CPU_MACHINE, 2370 .addr = (uint64_t) &prop, 2371 }; 2372 uint16_t unblocked_ibc = 0, cpu_type = 0; 2373 int rc; 2374 2375 memset(model, 0, sizeof(*model)); 2376 2377 if (!kvm_s390_cpu_models_supported()) { 2378 error_setg(errp, "KVM doesn't support CPU models"); 2379 return false; 2380 } 2381 2382 /* query the basic cpu model properties */ 2383 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2384 if (rc) { 2385 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2386 return false; 2387 } 2388 2389 cpu_type = cpuid_type(prop.cpuid); 2390 if (has_ibc(prop.ibc)) { 2391 model->lowest_ibc = lowest_ibc(prop.ibc); 2392 unblocked_ibc = unblocked_ibc(prop.ibc); 2393 } 2394 model->cpu_id = cpuid_id(prop.cpuid); 2395 model->cpu_id_format = cpuid_format(prop.cpuid); 2396 model->cpu_ver = 0xff; 2397 2398 /* get supported cpu features indicated via STFL(E) */ 2399 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2400 (uint8_t *) prop.fac_mask); 2401 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2402 if (test_bit(S390_FEAT_STFLE, model->features)) { 2403 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2404 } 2405 /* get supported cpu features indicated e.g. via SCLP */ 2406 rc = query_cpu_feat(model->features); 2407 if (rc) { 2408 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2409 return false; 2410 } 2411 /* get supported cpu subfunctions indicated via query / test bit */ 2412 rc = query_cpu_subfunc(model->features); 2413 if (rc) { 2414 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2415 return false; 2416 } 2417 2418 /* PTFF subfunctions might be indicated although kernel support missing */ 2419 if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { 2420 clear_bit(S390_FEAT_PTFF_QSIE, model->features); 2421 clear_bit(S390_FEAT_PTFF_QTOUE, model->features); 2422 clear_bit(S390_FEAT_PTFF_STOE, model->features); 2423 clear_bit(S390_FEAT_PTFF_STOUE, model->features); 2424 } 2425 2426 /* with cpu model support, CMM is only indicated if really available */ 2427 if (kvm_s390_cmma_available()) { 2428 set_bit(S390_FEAT_CMM, model->features); 2429 } else { 2430 /* no cmm -> no cmm nt */ 2431 clear_bit(S390_FEAT_CMM_NT, model->features); 2432 } 2433 2434 /* bpb needs kernel support for migration, VSIE and reset */ 2435 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { 2436 clear_bit(S390_FEAT_BPB, model->features); 2437 } 2438 2439 /* 2440 * If we have support for protected virtualization, indicate 2441 * the protected virtualization IPL unpack facility. 2442 */ 2443 if (cap_protected) { 2444 set_bit(S390_FEAT_UNPACK, model->features); 2445 } 2446 2447 /* 2448 * If we have kernel support for CPU Topology indicate the 2449 * configuration-topology facility. 2450 */ 2451 if (kvm_check_extension(kvm_state, KVM_CAP_S390_CPU_TOPOLOGY)) { 2452 set_bit(S390_FEAT_CONFIGURATION_TOPOLOGY, model->features); 2453 } 2454 2455 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2456 set_bit(S390_FEAT_ZPCI, model->features); 2457 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2458 2459 if (s390_known_cpu_type(cpu_type)) { 2460 /* we want the exact model, even if some features are missing */ 2461 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2462 ibc_ec_ga(unblocked_ibc), NULL); 2463 } else { 2464 /* model unknown, e.g. too new - search using features */ 2465 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2466 ibc_ec_ga(unblocked_ibc), 2467 model->features); 2468 } 2469 if (!model->def) { 2470 error_setg(errp, "KVM: host CPU model could not be identified"); 2471 return false; 2472 } 2473 /* for now, we can only provide the AP feature with HW support */ 2474 if (ap_available()) { 2475 set_bit(S390_FEAT_AP, model->features); 2476 } 2477 2478 /* 2479 * Extended-Length SCCB is handled entirely within QEMU. 2480 * For PV guests this is completely fenced by the Ultravisor, as Service 2481 * Call error checking and STFLE interpretation are handled via SIE. 2482 */ 2483 set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); 2484 2485 if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { 2486 set_bit(S390_FEAT_DIAG_318, model->features); 2487 } 2488 2489 /* Test for Ultravisor features that influence secure guest behavior */ 2490 query_uv_feat_guest(model->features); 2491 2492 /* strip of features that are not part of the maximum model */ 2493 bitmap_and(model->features, model->features, model->def->full_feat, 2494 S390_FEAT_MAX); 2495 return true; 2496 } 2497 2498 static int configure_uv_feat_guest(const S390FeatBitmap features) 2499 { 2500 struct kvm_s390_vm_cpu_uv_feat uv_feat = {}; 2501 struct kvm_device_attr attribute = { 2502 .group = KVM_S390_VM_CPU_MODEL, 2503 .attr = KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST, 2504 .addr = (__u64) &uv_feat, 2505 }; 2506 2507 /* AP support check is currently the only user of the UV feature test */ 2508 if (!(uv_feat_supported() && ap_enabled(features))) { 2509 return 0; 2510 } 2511 2512 if (test_bit(S390_FEAT_UV_FEAT_AP, features)) { 2513 uv_feat.ap = 1; 2514 } 2515 if (test_bit(S390_FEAT_UV_FEAT_AP_INTR, features)) { 2516 uv_feat.ap_intr = 1; 2517 } 2518 2519 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 2520 } 2521 2522 static void kvm_s390_configure_apie(bool interpret) 2523 { 2524 uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : 2525 KVM_S390_VM_CRYPTO_DISABLE_APIE; 2526 2527 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 2528 kvm_s390_set_crypto_attr(attr); 2529 } 2530 } 2531 2532 bool kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2533 { 2534 struct kvm_s390_vm_cpu_processor prop = { 2535 .fac_list = { 0 }, 2536 }; 2537 struct kvm_device_attr attr = { 2538 .group = KVM_S390_VM_CPU_MODEL, 2539 .attr = KVM_S390_VM_CPU_PROCESSOR, 2540 .addr = (uint64_t) &prop, 2541 }; 2542 int rc; 2543 2544 if (!model) { 2545 /* compatibility handling if cpu models are disabled */ 2546 if (kvm_s390_cmma_available()) { 2547 kvm_s390_enable_cmma(); 2548 } 2549 return true; 2550 } 2551 if (!kvm_s390_cpu_models_supported()) { 2552 error_setg(errp, "KVM doesn't support CPU models"); 2553 return false; 2554 } 2555 prop.cpuid = s390_cpuid_from_cpu_model(model); 2556 prop.ibc = s390_ibc_from_cpu_model(model); 2557 /* configure cpu features indicated via STFL(e) */ 2558 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2559 (uint8_t *) prop.fac_list); 2560 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2561 if (rc) { 2562 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2563 return false; 2564 } 2565 /* configure cpu features indicated e.g. via SCLP */ 2566 rc = configure_cpu_feat(model->features); 2567 if (rc) { 2568 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2569 return false; 2570 } 2571 /* configure cpu subfunctions indicated via query / test bit */ 2572 rc = configure_cpu_subfunc(model->features); 2573 if (rc) { 2574 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2575 return false; 2576 } 2577 /* enable CMM via CMMA */ 2578 if (test_bit(S390_FEAT_CMM, model->features)) { 2579 kvm_s390_enable_cmma(); 2580 } 2581 2582 if (ap_enabled(model->features)) { 2583 kvm_s390_configure_apie(true); 2584 } 2585 2586 /* configure UV-features for the guest indicated via query / test_bit */ 2587 rc = configure_uv_feat_guest(model->features); 2588 if (rc) { 2589 error_setg(errp, "KVM: Error configuring CPU UV features %d", rc); 2590 return false; 2591 } 2592 return true; 2593 } 2594 2595 void kvm_s390_restart_interrupt(S390CPU *cpu) 2596 { 2597 struct kvm_s390_irq irq = { 2598 .type = KVM_S390_RESTART, 2599 }; 2600 2601 kvm_s390_vcpu_interrupt(cpu, &irq); 2602 } 2603 2604 void kvm_s390_stop_interrupt(S390CPU *cpu) 2605 { 2606 struct kvm_s390_irq irq = { 2607 .type = KVM_S390_SIGP_STOP, 2608 }; 2609 2610 kvm_s390_vcpu_interrupt(cpu, &irq); 2611 } 2612 2613 int kvm_s390_get_zpci_op(void) 2614 { 2615 return cap_zpci_op; 2616 } 2617 2618 int kvm_s390_topology_set_mtcr(uint64_t attr) 2619 { 2620 struct kvm_device_attr attribute = { 2621 .group = KVM_S390_VM_CPU_TOPOLOGY, 2622 .attr = attr, 2623 }; 2624 2625 if (!s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY)) { 2626 return 0; 2627 } 2628 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_TOPOLOGY, attr)) { 2629 return -ENOTSUP; 2630 } 2631 2632 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 2633 } 2634 2635 void kvm_arch_accel_class_init(ObjectClass *oc) 2636 { 2637 } 2638