1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include <sys/ioctl.h> 23 24 #include <linux/kvm.h> 25 #include <asm/ptrace.h> 26 27 #include "cpu.h" 28 #include "s390x-internal.h" 29 #include "kvm_s390x.h" 30 #include "system/kvm_int.h" 31 #include "qemu/cutils.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "qemu/timer.h" 35 #include "qemu/units.h" 36 #include "qemu/main-loop.h" 37 #include "qemu/mmap-alloc.h" 38 #include "qemu/log.h" 39 #include "system/system.h" 40 #include "system/hw_accel.h" 41 #include "system/runstate.h" 42 #include "system/device_tree.h" 43 #include "gdbstub/enums.h" 44 #include "exec/ram_addr.h" 45 #include "trace.h" 46 #include "hw/s390x/s390-pci-inst.h" 47 #include "hw/s390x/s390-pci-bus.h" 48 #include "hw/s390x/ipl.h" 49 #include "hw/s390x/ebcdic.h" 50 #include "exec/memattrs.h" 51 #include "hw/s390x/s390-virtio-ccw.h" 52 #include "hw/s390x/s390-hypercall.h" 53 #include "target/s390x/kvm/pv.h" 54 #include CONFIG_DEVICES 55 56 #define kvm_vm_check_mem_attr(s, attr) \ 57 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 58 59 #define IPA0_DIAG 0x8300 60 #define IPA0_SIGP 0xae00 61 #define IPA0_B2 0xb200 62 #define IPA0_B9 0xb900 63 #define IPA0_EB 0xeb00 64 #define IPA0_E3 0xe300 65 66 #define PRIV_B2_SCLP_CALL 0x20 67 #define PRIV_B2_CSCH 0x30 68 #define PRIV_B2_HSCH 0x31 69 #define PRIV_B2_MSCH 0x32 70 #define PRIV_B2_SSCH 0x33 71 #define PRIV_B2_STSCH 0x34 72 #define PRIV_B2_TSCH 0x35 73 #define PRIV_B2_TPI 0x36 74 #define PRIV_B2_SAL 0x37 75 #define PRIV_B2_RSCH 0x38 76 #define PRIV_B2_STCRW 0x39 77 #define PRIV_B2_STCPS 0x3a 78 #define PRIV_B2_RCHP 0x3b 79 #define PRIV_B2_SCHM 0x3c 80 #define PRIV_B2_CHSC 0x5f 81 #define PRIV_B2_SIGA 0x74 82 #define PRIV_B2_XSCH 0x76 83 84 #define PRIV_EB_SQBS 0x8a 85 #define PRIV_EB_PCISTB 0xd0 86 #define PRIV_EB_SIC 0xd1 87 88 #define PRIV_B9_EQBS 0x9c 89 #define PRIV_B9_CLP 0xa0 90 #define PRIV_B9_PTF 0xa2 91 #define PRIV_B9_PCISTG 0xd0 92 #define PRIV_B9_PCILG 0xd2 93 #define PRIV_B9_RPCIT 0xd3 94 95 #define PRIV_E3_MPCIFC 0xd0 96 #define PRIV_E3_STPCIFC 0xd4 97 98 #define DIAG_TIMEREVENT 0x288 99 #define DIAG_IPL 0x308 100 #define DIAG_SET_CONTROL_PROGRAM_CODES 0x318 101 #define DIAG_KVM_HYPERCALL 0x500 102 #define DIAG_KVM_BREAKPOINT 0x501 103 104 #define ICPT_INSTRUCTION 0x04 105 #define ICPT_PROGRAM 0x08 106 #define ICPT_EXT_INT 0x14 107 #define ICPT_WAITPSW 0x1c 108 #define ICPT_SOFT_INTERCEPT 0x24 109 #define ICPT_CPU_STOP 0x28 110 #define ICPT_OPEREXC 0x2c 111 #define ICPT_IO 0x40 112 #define ICPT_PV_INSTR 0x68 113 #define ICPT_PV_INSTR_NOTIFICATION 0x6c 114 115 #define NR_LOCAL_IRQS 32 116 /* 117 * Needs to be big enough to contain max_cpus emergency signals 118 * and in addition NR_LOCAL_IRQS interrupts 119 */ 120 #define VCPU_IRQ_BUF_SIZE(max_cpus) (sizeof(struct kvm_s390_irq) * \ 121 (max_cpus + NR_LOCAL_IRQS)) 122 /* 123 * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages 124 * as the dirty bitmap must be managed by bitops that take an int as 125 * position indicator. This would end at an unaligned address 126 * (0x7fffff00000). As future variants might provide larger pages 127 * and to make all addresses properly aligned, let us split at 4TB. 128 */ 129 #define KVM_SLOT_MAX_BYTES (4UL * TiB) 130 131 static CPUWatchpoint hw_watchpoint; 132 /* 133 * We don't use a list because this structure is also used to transmit the 134 * hardware breakpoints to the kernel. 135 */ 136 static struct kvm_hw_breakpoint *hw_breakpoints; 137 static int nb_hw_breakpoints; 138 139 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 140 KVM_CAP_LAST_INFO 141 }; 142 143 static int cap_async_pf; 144 static int cap_mem_op; 145 static int cap_mem_op_extension; 146 static int cap_s390_irq; 147 static int cap_ri; 148 static int cap_hpage_1m; 149 static int cap_vcpu_resets; 150 static int cap_protected; 151 static int cap_zpci_op; 152 static int cap_protected_dump; 153 154 static bool mem_op_storage_key_support; 155 156 static int active_cmma; 157 158 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 159 { 160 struct kvm_device_attr attr = { 161 .group = KVM_S390_VM_MEM_CTRL, 162 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 163 .addr = (uint64_t) memory_limit, 164 }; 165 166 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 167 } 168 169 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 170 { 171 int rc; 172 173 struct kvm_device_attr attr = { 174 .group = KVM_S390_VM_MEM_CTRL, 175 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 176 .addr = (uint64_t) &new_limit, 177 }; 178 179 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 180 return 0; 181 } 182 183 rc = kvm_s390_query_mem_limit(hw_limit); 184 if (rc) { 185 return rc; 186 } else if (*hw_limit < new_limit) { 187 return -E2BIG; 188 } 189 190 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 191 } 192 193 int kvm_s390_cmma_active(void) 194 { 195 return active_cmma; 196 } 197 198 static bool kvm_s390_cmma_available(void) 199 { 200 static bool initialized, value; 201 202 if (!initialized) { 203 initialized = true; 204 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 205 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 206 } 207 return value; 208 } 209 210 void kvm_s390_cmma_reset(void) 211 { 212 int rc; 213 struct kvm_device_attr attr = { 214 .group = KVM_S390_VM_MEM_CTRL, 215 .attr = KVM_S390_VM_MEM_CLR_CMMA, 216 }; 217 218 if (!kvm_s390_cmma_active()) { 219 return; 220 } 221 222 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 223 trace_kvm_clear_cmma(rc); 224 } 225 226 static void kvm_s390_enable_cmma(void) 227 { 228 int rc; 229 struct kvm_device_attr attr = { 230 .group = KVM_S390_VM_MEM_CTRL, 231 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 232 }; 233 234 if (cap_hpage_1m) { 235 warn_report("CMM will not be enabled because it is not " 236 "compatible with huge memory backings."); 237 return; 238 } 239 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 240 active_cmma = !rc; 241 trace_kvm_enable_cmma(rc); 242 } 243 244 static void kvm_s390_set_crypto_attr(uint64_t attr) 245 { 246 struct kvm_device_attr attribute = { 247 .group = KVM_S390_VM_CRYPTO, 248 .attr = attr, 249 }; 250 251 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 252 253 if (ret) { 254 error_report("Failed to set crypto device attribute %lu: %s", 255 attr, strerror(-ret)); 256 } 257 } 258 259 static void kvm_s390_init_aes_kw(void) 260 { 261 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 262 263 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 264 NULL)) { 265 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 266 } 267 268 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 269 kvm_s390_set_crypto_attr(attr); 270 } 271 } 272 273 static void kvm_s390_init_dea_kw(void) 274 { 275 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 276 277 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 278 NULL)) { 279 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 280 } 281 282 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 283 kvm_s390_set_crypto_attr(attr); 284 } 285 } 286 287 void kvm_s390_crypto_reset(void) 288 { 289 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 290 kvm_s390_init_aes_kw(); 291 kvm_s390_init_dea_kw(); 292 } 293 } 294 295 void kvm_s390_set_max_pagesize(uint64_t pagesize, Error **errp) 296 { 297 if (pagesize == 4 * KiB) { 298 return; 299 } 300 301 if (!hpage_1m_allowed()) { 302 error_setg(errp, "This QEMU machine does not support huge page " 303 "mappings"); 304 return; 305 } 306 307 if (pagesize != 1 * MiB) { 308 error_setg(errp, "Memory backing with 2G pages was specified, " 309 "but KVM does not support this memory backing"); 310 return; 311 } 312 313 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_HPAGE_1M, 0)) { 314 error_setg(errp, "Memory backing with 1M pages was specified, " 315 "but KVM does not support this memory backing"); 316 return; 317 } 318 319 cap_hpage_1m = 1; 320 } 321 322 int kvm_s390_get_hpage_1m(void) 323 { 324 return cap_hpage_1m; 325 } 326 327 static void ccw_machine_class_foreach(ObjectClass *oc, void *opaque) 328 { 329 MachineClass *mc = MACHINE_CLASS(oc); 330 331 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 332 } 333 334 int kvm_arch_get_default_type(MachineState *ms) 335 { 336 return 0; 337 } 338 339 int kvm_arch_init(MachineState *ms, KVMState *s) 340 { 341 int required_caps[] = { 342 KVM_CAP_DEVICE_CTRL, 343 KVM_CAP_SYNC_REGS, 344 }; 345 346 for (int i = 0; i < ARRAY_SIZE(required_caps); i++) { 347 if (!kvm_check_extension(s, required_caps[i])) { 348 error_report("KVM is missing capability #%d - " 349 "please use kernel 3.15 or newer", required_caps[i]); 350 return -1; 351 } 352 } 353 354 object_class_foreach(ccw_machine_class_foreach, TYPE_S390_CCW_MACHINE, 355 false, NULL); 356 357 if (!kvm_check_extension(s, KVM_CAP_S390_COW)) { 358 error_report("KVM is missing capability KVM_CAP_S390_COW - " 359 "unsupported environment"); 360 return -1; 361 } 362 363 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 364 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 365 cap_mem_op_extension = kvm_check_extension(s, KVM_CAP_S390_MEM_OP_EXTENSION); 366 mem_op_storage_key_support = cap_mem_op_extension > 0; 367 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 368 cap_vcpu_resets = kvm_check_extension(s, KVM_CAP_S390_VCPU_RESETS); 369 cap_protected = kvm_check_extension(s, KVM_CAP_S390_PROTECTED); 370 cap_zpci_op = kvm_check_extension(s, KVM_CAP_S390_ZPCI_OP); 371 cap_protected_dump = kvm_check_extension(s, KVM_CAP_S390_PROTECTED_DUMP); 372 373 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 374 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 375 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 376 kvm_vm_enable_cap(s, KVM_CAP_S390_CPU_TOPOLOGY, 0); 377 kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0); 378 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 379 cap_ri = 1; 380 } 381 382 /* 383 * The migration interface for ais was introduced with kernel 4.13 384 * but the capability itself had been active since 4.12. As migration 385 * support is considered necessary, we only try to enable this for 386 * newer machine types if KVM_CAP_S390_AIS_MIGRATION is available. 387 */ 388 if (kvm_kernel_irqchip_allowed() && 389 kvm_check_extension(s, KVM_CAP_S390_AIS_MIGRATION)) { 390 kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); 391 } 392 393 kvm_set_max_memslot_size(KVM_SLOT_MAX_BYTES); 394 return 0; 395 } 396 397 int kvm_arch_irqchip_create(KVMState *s) 398 { 399 return 0; 400 } 401 402 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 403 { 404 return cpu->cpu_index; 405 } 406 407 int kvm_arch_init_vcpu(CPUState *cs) 408 { 409 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 410 S390CPU *cpu = S390_CPU(cs); 411 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 412 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE(max_cpus)); 413 return 0; 414 } 415 416 int kvm_arch_destroy_vcpu(CPUState *cs) 417 { 418 S390CPU *cpu = S390_CPU(cs); 419 420 g_free(cpu->irqstate); 421 cpu->irqstate = NULL; 422 423 return 0; 424 } 425 426 static void kvm_s390_reset_vcpu(S390CPU *cpu, unsigned long type) 427 { 428 CPUState *cs = CPU(cpu); 429 430 /* 431 * The reset call is needed here to reset in-kernel vcpu data that 432 * we can't access directly from QEMU (i.e. with older kernels 433 * which don't support sync_regs/ONE_REG). Before this ioctl 434 * cpu_synchronize_state() is called in common kvm code 435 * (kvm-all). 436 */ 437 if (kvm_vcpu_ioctl(cs, type)) { 438 error_report("CPU reset failed on CPU %i type %lx", 439 cs->cpu_index, type); 440 } 441 } 442 443 void kvm_s390_reset_vcpu_initial(S390CPU *cpu) 444 { 445 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 446 } 447 448 void kvm_s390_reset_vcpu_clear(S390CPU *cpu) 449 { 450 if (cap_vcpu_resets) { 451 kvm_s390_reset_vcpu(cpu, KVM_S390_CLEAR_RESET); 452 } else { 453 kvm_s390_reset_vcpu(cpu, KVM_S390_INITIAL_RESET); 454 } 455 } 456 457 void kvm_s390_reset_vcpu_normal(S390CPU *cpu) 458 { 459 if (cap_vcpu_resets) { 460 kvm_s390_reset_vcpu(cpu, KVM_S390_NORMAL_RESET); 461 } 462 } 463 464 static int can_sync_regs(CPUState *cs, int regs) 465 { 466 return (cs->kvm_run->kvm_valid_regs & regs) == regs; 467 } 468 469 #define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \ 470 KVM_SYNC_CRS | KVM_SYNC_PREFIX) 471 472 int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) 473 { 474 CPUS390XState *env = cpu_env(cs); 475 struct kvm_fpu fpu = {}; 476 int r; 477 int i; 478 479 g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS)); 480 481 /* always save the PSW and the GPRS*/ 482 cs->kvm_run->psw_addr = env->psw.addr; 483 cs->kvm_run->psw_mask = env->psw.mask; 484 485 memcpy(cs->kvm_run->s.regs.gprs, env->regs, sizeof(cs->kvm_run->s.regs.gprs)); 486 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 487 488 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 489 for (i = 0; i < 32; i++) { 490 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0]; 491 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1]; 492 } 493 cs->kvm_run->s.regs.fpc = env->fpc; 494 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 495 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 496 for (i = 0; i < 16; i++) { 497 cs->kvm_run->s.regs.fprs[i] = *get_freg(env, i); 498 } 499 cs->kvm_run->s.regs.fpc = env->fpc; 500 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 501 } else { 502 /* Floating point */ 503 for (i = 0; i < 16; i++) { 504 fpu.fprs[i] = *get_freg(env, i); 505 } 506 fpu.fpc = env->fpc; 507 508 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 509 if (r < 0) { 510 return r; 511 } 512 } 513 514 /* Do we need to save more than that? */ 515 if (level == KVM_PUT_RUNTIME_STATE) { 516 return 0; 517 } 518 519 /* 520 * Access registers, control registers and the prefix - these are 521 * always available via kvm_sync_regs in the kernels that we support 522 */ 523 memcpy(cs->kvm_run->s.regs.acrs, env->aregs, sizeof(cs->kvm_run->s.regs.acrs)); 524 memcpy(cs->kvm_run->s.regs.crs, env->cregs, sizeof(cs->kvm_run->s.regs.crs)); 525 cs->kvm_run->s.regs.prefix = env->psa; 526 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_PREFIX; 527 528 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 529 cs->kvm_run->s.regs.cputm = env->cputm; 530 cs->kvm_run->s.regs.ckc = env->ckc; 531 cs->kvm_run->s.regs.todpr = env->todpr; 532 cs->kvm_run->s.regs.gbea = env->gbea; 533 cs->kvm_run->s.regs.pp = env->pp; 534 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 535 } else { 536 /* 537 * These ONE_REGS are not protected by a capability. As they are only 538 * necessary for migration we just trace a possible error, but don't 539 * return with an error return code. 540 */ 541 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 542 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 543 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 544 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 545 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 546 } 547 548 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 549 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 550 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 551 } 552 553 /* pfault parameters */ 554 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 555 cs->kvm_run->s.regs.pft = env->pfault_token; 556 cs->kvm_run->s.regs.pfs = env->pfault_select; 557 cs->kvm_run->s.regs.pfc = env->pfault_compare; 558 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 559 } else if (cap_async_pf) { 560 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 561 if (r < 0) { 562 return r; 563 } 564 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 565 if (r < 0) { 566 return r; 567 } 568 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 569 if (r < 0) { 570 return r; 571 } 572 } 573 574 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 575 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 576 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 577 } 578 579 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 580 cs->kvm_run->s.regs.bpbc = env->bpbc; 581 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_BPBC; 582 } 583 584 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 585 cs->kvm_run->s.regs.etoken = env->etoken; 586 cs->kvm_run->s.regs.etoken_extension = env->etoken_extension; 587 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ETOKEN; 588 } 589 590 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 591 cs->kvm_run->s.regs.diag318 = env->diag318_info; 592 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 593 } 594 595 return 0; 596 } 597 598 int kvm_arch_get_registers(CPUState *cs, Error **errp) 599 { 600 CPUS390XState *env = cpu_env(cs); 601 struct kvm_fpu fpu; 602 int i, r; 603 604 /* get the PSW */ 605 env->psw.addr = cs->kvm_run->psw_addr; 606 env->psw.mask = cs->kvm_run->psw_mask; 607 608 /* the GPRS, ACRS and CRS */ 609 g_assert(can_sync_regs(cs, KVM_SYNC_REQUIRED_REGS)); 610 memcpy(env->regs, cs->kvm_run->s.regs.gprs, sizeof(env->regs)); 611 memcpy(env->aregs, cs->kvm_run->s.regs.acrs, sizeof(env->aregs)); 612 memcpy(env->cregs, cs->kvm_run->s.regs.crs, sizeof(env->cregs)); 613 614 /* The prefix */ 615 env->psa = cs->kvm_run->s.regs.prefix; 616 617 /* Floating point and vector registers */ 618 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 619 for (i = 0; i < 32; i++) { 620 env->vregs[i][0] = cs->kvm_run->s.regs.vrs[i][0]; 621 env->vregs[i][1] = cs->kvm_run->s.regs.vrs[i][1]; 622 } 623 env->fpc = cs->kvm_run->s.regs.fpc; 624 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 625 for (i = 0; i < 16; i++) { 626 *get_freg(env, i) = cs->kvm_run->s.regs.fprs[i]; 627 } 628 env->fpc = cs->kvm_run->s.regs.fpc; 629 } else { 630 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 631 if (r < 0) { 632 return r; 633 } 634 for (i = 0; i < 16; i++) { 635 *get_freg(env, i) = fpu.fprs[i]; 636 } 637 env->fpc = fpu.fpc; 638 } 639 640 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 641 env->cputm = cs->kvm_run->s.regs.cputm; 642 env->ckc = cs->kvm_run->s.regs.ckc; 643 env->todpr = cs->kvm_run->s.regs.todpr; 644 env->gbea = cs->kvm_run->s.regs.gbea; 645 env->pp = cs->kvm_run->s.regs.pp; 646 } else { 647 /* 648 * These ONE_REGS are not protected by a capability. As they are only 649 * necessary for migration we just trace a possible error, but don't 650 * return with an error return code. 651 */ 652 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 653 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 654 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 655 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 656 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 657 } 658 659 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 660 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 661 } 662 663 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 664 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 665 } 666 667 if (can_sync_regs(cs, KVM_SYNC_BPBC)) { 668 env->bpbc = cs->kvm_run->s.regs.bpbc; 669 } 670 671 if (can_sync_regs(cs, KVM_SYNC_ETOKEN)) { 672 env->etoken = cs->kvm_run->s.regs.etoken; 673 env->etoken_extension = cs->kvm_run->s.regs.etoken_extension; 674 } 675 676 /* pfault parameters */ 677 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 678 env->pfault_token = cs->kvm_run->s.regs.pft; 679 env->pfault_select = cs->kvm_run->s.regs.pfs; 680 env->pfault_compare = cs->kvm_run->s.regs.pfc; 681 } else if (cap_async_pf) { 682 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 683 if (r < 0) { 684 return r; 685 } 686 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 687 if (r < 0) { 688 return r; 689 } 690 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 691 if (r < 0) { 692 return r; 693 } 694 } 695 696 if (can_sync_regs(cs, KVM_SYNC_DIAG318)) { 697 env->diag318_info = cs->kvm_run->s.regs.diag318; 698 } 699 700 return 0; 701 } 702 703 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 704 { 705 int r; 706 struct kvm_device_attr attr = { 707 .group = KVM_S390_VM_TOD, 708 .attr = KVM_S390_VM_TOD_LOW, 709 .addr = (uint64_t)tod_low, 710 }; 711 712 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 713 if (r) { 714 return r; 715 } 716 717 attr.attr = KVM_S390_VM_TOD_HIGH; 718 attr.addr = (uint64_t)tod_high; 719 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 720 } 721 722 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 723 { 724 int r; 725 struct kvm_s390_vm_tod_clock gtod; 726 struct kvm_device_attr attr = { 727 .group = KVM_S390_VM_TOD, 728 .attr = KVM_S390_VM_TOD_EXT, 729 .addr = (uint64_t)>od, 730 }; 731 732 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 733 *tod_high = gtod.epoch_idx; 734 *tod_low = gtod.tod; 735 736 return r; 737 } 738 739 int kvm_s390_set_clock(uint8_t tod_high, uint64_t tod_low) 740 { 741 int r; 742 struct kvm_device_attr attr = { 743 .group = KVM_S390_VM_TOD, 744 .attr = KVM_S390_VM_TOD_LOW, 745 .addr = (uint64_t)&tod_low, 746 }; 747 748 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 749 if (r) { 750 return r; 751 } 752 753 attr.attr = KVM_S390_VM_TOD_HIGH; 754 attr.addr = (uint64_t)&tod_high; 755 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 756 } 757 758 int kvm_s390_set_clock_ext(uint8_t tod_high, uint64_t tod_low) 759 { 760 struct kvm_s390_vm_tod_clock gtod = { 761 .epoch_idx = tod_high, 762 .tod = tod_low, 763 }; 764 struct kvm_device_attr attr = { 765 .group = KVM_S390_VM_TOD, 766 .attr = KVM_S390_VM_TOD_EXT, 767 .addr = (uint64_t)>od, 768 }; 769 770 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 771 } 772 773 /** 774 * kvm_s390_mem_op: 775 * @addr: the logical start address in guest memory 776 * @ar: the access register number 777 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 778 * @len: length that should be transferred 779 * @is_write: true = write, false = read 780 * Returns: 0 on success, non-zero if an exception or error occurred 781 * 782 * Use KVM ioctl to read/write from/to guest memory. An access exception 783 * is injected into the vCPU in case of translation errors. 784 */ 785 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 786 int len, bool is_write) 787 { 788 struct kvm_s390_mem_op mem_op = { 789 .gaddr = addr, 790 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 791 .size = len, 792 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 793 : KVM_S390_MEMOP_LOGICAL_READ, 794 .buf = (uint64_t)hostbuf, 795 .ar = ar, 796 .key = (cpu->env.psw.mask & PSW_MASK_KEY) >> PSW_SHIFT_KEY, 797 }; 798 int ret; 799 800 if (!cap_mem_op) { 801 return -ENOSYS; 802 } 803 if (!hostbuf) { 804 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 805 } 806 if (mem_op_storage_key_support) { 807 mem_op.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION; 808 } 809 810 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 811 if (ret < 0) { 812 warn_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 813 } 814 return ret; 815 } 816 817 int kvm_s390_mem_op_pv(S390CPU *cpu, uint64_t offset, void *hostbuf, 818 int len, bool is_write) 819 { 820 struct kvm_s390_mem_op mem_op = { 821 .sida_offset = offset, 822 .size = len, 823 .op = is_write ? KVM_S390_MEMOP_SIDA_WRITE 824 : KVM_S390_MEMOP_SIDA_READ, 825 .buf = (uint64_t)hostbuf, 826 }; 827 int ret; 828 829 if (!cap_mem_op || !cap_protected) { 830 return -ENOSYS; 831 } 832 833 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 834 if (ret < 0) { 835 error_report("KVM_S390_MEM_OP failed: %s", strerror(-ret)); 836 abort(); 837 } 838 return ret; 839 } 840 841 static uint8_t const *sw_bp_inst; 842 static uint8_t sw_bp_ilen; 843 844 static void determine_sw_breakpoint_instr(void) 845 { 846 /* DIAG 501 is used for sw breakpoints with old kernels */ 847 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 848 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 849 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 850 851 if (sw_bp_inst) { 852 return; 853 } 854 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 855 sw_bp_inst = diag_501; 856 sw_bp_ilen = sizeof(diag_501); 857 trace_kvm_sw_breakpoint(4); 858 } else { 859 sw_bp_inst = instr_0x0000; 860 sw_bp_ilen = sizeof(instr_0x0000); 861 trace_kvm_sw_breakpoint(2); 862 } 863 } 864 865 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 866 { 867 determine_sw_breakpoint_instr(); 868 869 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 870 sw_bp_ilen, 0) || 871 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 872 return -EINVAL; 873 } 874 return 0; 875 } 876 877 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 878 { 879 uint8_t t[MAX_ILEN]; 880 881 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 882 return -EINVAL; 883 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 884 return -EINVAL; 885 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 886 sw_bp_ilen, 1)) { 887 return -EINVAL; 888 } 889 890 return 0; 891 } 892 893 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 894 int len, int type) 895 { 896 int n; 897 898 for (n = 0; n < nb_hw_breakpoints; n++) { 899 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 900 (hw_breakpoints[n].len == len || len == -1)) { 901 return &hw_breakpoints[n]; 902 } 903 } 904 905 return NULL; 906 } 907 908 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 909 { 910 int size; 911 912 if (find_hw_breakpoint(addr, len, type)) { 913 return -EEXIST; 914 } 915 916 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 917 918 if (!hw_breakpoints) { 919 nb_hw_breakpoints = 0; 920 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 921 } else { 922 hw_breakpoints = 923 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 924 } 925 926 if (!hw_breakpoints) { 927 nb_hw_breakpoints = 0; 928 return -ENOMEM; 929 } 930 931 hw_breakpoints[nb_hw_breakpoints].addr = addr; 932 hw_breakpoints[nb_hw_breakpoints].len = len; 933 hw_breakpoints[nb_hw_breakpoints].type = type; 934 935 nb_hw_breakpoints++; 936 937 return 0; 938 } 939 940 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) 941 { 942 switch (type) { 943 case GDB_BREAKPOINT_HW: 944 type = KVM_HW_BP; 945 break; 946 case GDB_WATCHPOINT_WRITE: 947 if (len < 1) { 948 return -EINVAL; 949 } 950 type = KVM_HW_WP_WRITE; 951 break; 952 default: 953 return -ENOSYS; 954 } 955 return insert_hw_breakpoint(addr, len, type); 956 } 957 958 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) 959 { 960 int size; 961 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 962 963 if (bp == NULL) { 964 return -ENOENT; 965 } 966 967 nb_hw_breakpoints--; 968 if (nb_hw_breakpoints > 0) { 969 /* 970 * In order to trim the array, move the last element to the position to 971 * be removed - if necessary. 972 */ 973 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 974 *bp = hw_breakpoints[nb_hw_breakpoints]; 975 } 976 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 977 hw_breakpoints = 978 g_realloc(hw_breakpoints, size); 979 } else { 980 g_free(hw_breakpoints); 981 hw_breakpoints = NULL; 982 } 983 984 return 0; 985 } 986 987 void kvm_arch_remove_all_hw_breakpoints(void) 988 { 989 nb_hw_breakpoints = 0; 990 g_free(hw_breakpoints); 991 hw_breakpoints = NULL; 992 } 993 994 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 995 { 996 int i; 997 998 if (nb_hw_breakpoints > 0) { 999 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 1000 dbg->arch.hw_bp = hw_breakpoints; 1001 1002 for (i = 0; i < nb_hw_breakpoints; ++i) { 1003 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 1004 hw_breakpoints[i].addr); 1005 } 1006 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 1007 } else { 1008 dbg->arch.nr_hw_bp = 0; 1009 dbg->arch.hw_bp = NULL; 1010 } 1011 } 1012 1013 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 1014 { 1015 } 1016 1017 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 1018 { 1019 return MEMTXATTRS_UNSPECIFIED; 1020 } 1021 1022 int kvm_arch_process_async_events(CPUState *cs) 1023 { 1024 return cs->halted; 1025 } 1026 1027 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 1028 struct kvm_s390_interrupt *interrupt) 1029 { 1030 int r = 0; 1031 1032 interrupt->type = irq->type; 1033 switch (irq->type) { 1034 case KVM_S390_INT_VIRTIO: 1035 interrupt->parm = irq->u.ext.ext_params; 1036 /* fall through */ 1037 case KVM_S390_INT_PFAULT_INIT: 1038 case KVM_S390_INT_PFAULT_DONE: 1039 interrupt->parm64 = irq->u.ext.ext_params2; 1040 break; 1041 case KVM_S390_PROGRAM_INT: 1042 interrupt->parm = irq->u.pgm.code; 1043 break; 1044 case KVM_S390_SIGP_SET_PREFIX: 1045 interrupt->parm = irq->u.prefix.address; 1046 break; 1047 case KVM_S390_INT_SERVICE: 1048 interrupt->parm = irq->u.ext.ext_params; 1049 break; 1050 case KVM_S390_MCHK: 1051 interrupt->parm = irq->u.mchk.cr14; 1052 interrupt->parm64 = irq->u.mchk.mcic; 1053 break; 1054 case KVM_S390_INT_EXTERNAL_CALL: 1055 interrupt->parm = irq->u.extcall.code; 1056 break; 1057 case KVM_S390_INT_EMERGENCY: 1058 interrupt->parm = irq->u.emerg.code; 1059 break; 1060 case KVM_S390_SIGP_STOP: 1061 case KVM_S390_RESTART: 1062 break; /* These types have no parameters */ 1063 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1064 interrupt->parm = irq->u.io.subchannel_id << 16; 1065 interrupt->parm |= irq->u.io.subchannel_nr; 1066 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 1067 interrupt->parm64 |= irq->u.io.io_int_word; 1068 break; 1069 default: 1070 r = -EINVAL; 1071 break; 1072 } 1073 return r; 1074 } 1075 1076 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 1077 { 1078 struct kvm_s390_interrupt kvmint = {}; 1079 int r; 1080 1081 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1082 if (r < 0) { 1083 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1084 exit(1); 1085 } 1086 1087 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1088 if (r < 0) { 1089 fprintf(stderr, "KVM failed to inject interrupt\n"); 1090 exit(1); 1091 } 1092 } 1093 1094 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1095 { 1096 CPUState *cs = CPU(cpu); 1097 int r; 1098 1099 if (cap_s390_irq) { 1100 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1101 if (!r) { 1102 return; 1103 } 1104 error_report("KVM failed to inject interrupt %llx", irq->type); 1105 exit(1); 1106 } 1107 1108 inject_vcpu_irq_legacy(cs, irq); 1109 } 1110 1111 void kvm_s390_floating_interrupt_legacy(struct kvm_s390_irq *irq) 1112 { 1113 struct kvm_s390_interrupt kvmint = {}; 1114 int r; 1115 1116 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1117 if (r < 0) { 1118 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1119 exit(1); 1120 } 1121 1122 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1123 if (r < 0) { 1124 fprintf(stderr, "KVM failed to inject interrupt\n"); 1125 exit(1); 1126 } 1127 } 1128 1129 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1130 { 1131 struct kvm_s390_irq irq = { 1132 .type = KVM_S390_PROGRAM_INT, 1133 .u.pgm.code = code, 1134 }; 1135 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", 1136 cpu->env.psw.addr); 1137 kvm_s390_vcpu_interrupt(cpu, &irq); 1138 } 1139 1140 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1141 { 1142 struct kvm_s390_irq irq = { 1143 .type = KVM_S390_PROGRAM_INT, 1144 .u.pgm.code = code, 1145 .u.pgm.trans_exc_code = te_code, 1146 .u.pgm.exc_access_id = te_code & 3, 1147 }; 1148 1149 kvm_s390_vcpu_interrupt(cpu, &irq); 1150 } 1151 1152 static void kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1153 uint16_t ipbh0) 1154 { 1155 CPUS390XState *env = &cpu->env; 1156 uint64_t sccb; 1157 uint32_t code; 1158 int r; 1159 1160 sccb = env->regs[ipbh0 & 0xf]; 1161 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1162 1163 switch (run->s390_sieic.icptcode) { 1164 case ICPT_PV_INSTR_NOTIFICATION: 1165 g_assert(s390_is_pv()); 1166 /* The notification intercepts are currently handled by KVM */ 1167 error_report("unexpected SCLP PV notification"); 1168 exit(1); 1169 break; 1170 case ICPT_PV_INSTR: 1171 g_assert(s390_is_pv()); 1172 sclp_service_call_protected(cpu, sccb, code); 1173 /* Setting the CC is done by the Ultravisor. */ 1174 break; 1175 case ICPT_INSTRUCTION: 1176 g_assert(!s390_is_pv()); 1177 r = sclp_service_call(cpu, sccb, code); 1178 if (r < 0) { 1179 kvm_s390_program_interrupt(cpu, -r); 1180 return; 1181 } 1182 setcc(cpu, r); 1183 } 1184 } 1185 1186 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1187 { 1188 CPUS390XState *env = &cpu->env; 1189 int rc = 0; 1190 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1191 1192 switch (ipa1) { 1193 case PRIV_B2_XSCH: 1194 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1195 break; 1196 case PRIV_B2_CSCH: 1197 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1198 break; 1199 case PRIV_B2_HSCH: 1200 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1201 break; 1202 case PRIV_B2_MSCH: 1203 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1204 break; 1205 case PRIV_B2_SSCH: 1206 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1207 break; 1208 case PRIV_B2_STCRW: 1209 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1210 break; 1211 case PRIV_B2_STSCH: 1212 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1213 break; 1214 case PRIV_B2_TSCH: 1215 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1216 fprintf(stderr, "Spurious tsch intercept\n"); 1217 break; 1218 case PRIV_B2_CHSC: 1219 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1220 break; 1221 case PRIV_B2_TPI: 1222 /* This should have been handled by kvm already. */ 1223 fprintf(stderr, "Spurious tpi intercept\n"); 1224 break; 1225 case PRIV_B2_SCHM: 1226 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1227 run->s390_sieic.ipb, RA_IGNORED); 1228 break; 1229 case PRIV_B2_RSCH: 1230 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1231 break; 1232 case PRIV_B2_RCHP: 1233 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1234 break; 1235 case PRIV_B2_STCPS: 1236 /* We do not provide this instruction, it is suppressed. */ 1237 break; 1238 case PRIV_B2_SAL: 1239 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1240 break; 1241 case PRIV_B2_SIGA: 1242 /* Not provided, set CC = 3 for subchannel not operational */ 1243 setcc(cpu, 3); 1244 break; 1245 case PRIV_B2_SCLP_CALL: 1246 kvm_sclp_service_call(cpu, run, ipbh0); 1247 break; 1248 default: 1249 rc = -1; 1250 trace_kvm_insn_unhandled_priv(ipa1); 1251 break; 1252 } 1253 1254 return rc; 1255 } 1256 1257 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1258 uint8_t *ar) 1259 { 1260 CPUS390XState *env = &cpu->env; 1261 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1262 uint32_t base2 = run->s390_sieic.ipb >> 28; 1263 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1264 ((run->s390_sieic.ipb & 0xff00) << 4); 1265 1266 if (disp2 & 0x80000) { 1267 disp2 += 0xfff00000; 1268 } 1269 if (ar) { 1270 *ar = base2; 1271 } 1272 1273 return (base2 ? env->regs[base2] : 0) + 1274 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1275 } 1276 1277 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1278 uint8_t *ar) 1279 { 1280 CPUS390XState *env = &cpu->env; 1281 uint32_t base2 = run->s390_sieic.ipb >> 28; 1282 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1283 ((run->s390_sieic.ipb & 0xff00) << 4); 1284 1285 if (disp2 & 0x80000) { 1286 disp2 += 0xfff00000; 1287 } 1288 if (ar) { 1289 *ar = base2; 1290 } 1291 1292 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1293 } 1294 1295 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1296 { 1297 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1298 1299 if (s390_has_feat(S390_FEAT_ZPCI)) { 1300 return clp_service_call(cpu, r2, RA_IGNORED); 1301 } else { 1302 return -1; 1303 } 1304 } 1305 1306 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1307 { 1308 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1309 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1310 1311 if (s390_has_feat(S390_FEAT_ZPCI)) { 1312 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1313 } else { 1314 return -1; 1315 } 1316 } 1317 1318 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1319 { 1320 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1321 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1322 1323 if (s390_has_feat(S390_FEAT_ZPCI)) { 1324 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1325 } else { 1326 return -1; 1327 } 1328 } 1329 1330 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1331 { 1332 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1333 uint64_t fiba; 1334 uint8_t ar; 1335 1336 if (s390_has_feat(S390_FEAT_ZPCI)) { 1337 fiba = get_base_disp_rxy(cpu, run, &ar); 1338 1339 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1340 } else { 1341 return -1; 1342 } 1343 } 1344 1345 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1346 { 1347 CPUS390XState *env = &cpu->env; 1348 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1349 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1350 uint8_t isc; 1351 uint16_t mode; 1352 int r; 1353 1354 mode = env->regs[r1] & 0xffff; 1355 isc = (env->regs[r3] >> 27) & 0x7; 1356 r = css_do_sic(cpu, isc, mode); 1357 if (r) { 1358 kvm_s390_program_interrupt(cpu, -r); 1359 } 1360 1361 return 0; 1362 } 1363 1364 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1365 { 1366 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1367 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1368 1369 if (s390_has_feat(S390_FEAT_ZPCI)) { 1370 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1371 } else { 1372 return -1; 1373 } 1374 } 1375 1376 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1377 { 1378 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1379 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1380 uint64_t gaddr; 1381 uint8_t ar; 1382 1383 if (s390_has_feat(S390_FEAT_ZPCI)) { 1384 gaddr = get_base_disp_rsy(cpu, run, &ar); 1385 1386 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1387 } else { 1388 return -1; 1389 } 1390 } 1391 1392 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1393 { 1394 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1395 uint64_t fiba; 1396 uint8_t ar; 1397 1398 if (s390_has_feat(S390_FEAT_ZPCI)) { 1399 fiba = get_base_disp_rxy(cpu, run, &ar); 1400 1401 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1402 } else { 1403 return -1; 1404 } 1405 } 1406 1407 static void kvm_handle_ptf(S390CPU *cpu, struct kvm_run *run) 1408 { 1409 uint8_t r1 = (run->s390_sieic.ipb >> 20) & 0x0f; 1410 1411 s390_handle_ptf(cpu, r1, RA_IGNORED); 1412 } 1413 1414 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1415 { 1416 int r = 0; 1417 1418 switch (ipa1) { 1419 case PRIV_B9_CLP: 1420 r = kvm_clp_service_call(cpu, run); 1421 break; 1422 case PRIV_B9_PCISTG: 1423 r = kvm_pcistg_service_call(cpu, run); 1424 break; 1425 case PRIV_B9_PCILG: 1426 r = kvm_pcilg_service_call(cpu, run); 1427 break; 1428 case PRIV_B9_RPCIT: 1429 r = kvm_rpcit_service_call(cpu, run); 1430 break; 1431 case PRIV_B9_PTF: 1432 kvm_handle_ptf(cpu, run); 1433 break; 1434 case PRIV_B9_EQBS: 1435 /* just inject exception */ 1436 r = -1; 1437 break; 1438 default: 1439 r = -1; 1440 trace_kvm_insn_unhandled_priv(ipa1); 1441 break; 1442 } 1443 1444 return r; 1445 } 1446 1447 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1448 { 1449 int r = 0; 1450 1451 switch (ipbl) { 1452 case PRIV_EB_PCISTB: 1453 r = kvm_pcistb_service_call(cpu, run); 1454 break; 1455 case PRIV_EB_SIC: 1456 r = kvm_sic_service_call(cpu, run); 1457 break; 1458 case PRIV_EB_SQBS: 1459 /* just inject exception */ 1460 r = -1; 1461 break; 1462 default: 1463 r = -1; 1464 trace_kvm_insn_unhandled_priv(ipbl); 1465 break; 1466 } 1467 1468 return r; 1469 } 1470 1471 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1472 { 1473 int r = 0; 1474 1475 switch (ipbl) { 1476 case PRIV_E3_MPCIFC: 1477 r = kvm_mpcifc_service_call(cpu, run); 1478 break; 1479 case PRIV_E3_STPCIFC: 1480 r = kvm_stpcifc_service_call(cpu, run); 1481 break; 1482 default: 1483 r = -1; 1484 trace_kvm_insn_unhandled_priv(ipbl); 1485 break; 1486 } 1487 1488 return r; 1489 } 1490 1491 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1492 { 1493 uint64_t r1, r3; 1494 int rc; 1495 1496 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1497 r3 = run->s390_sieic.ipa & 0x000f; 1498 rc = handle_diag_288(&cpu->env, r1, r3); 1499 if (rc) { 1500 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1501 } 1502 } 1503 1504 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1505 { 1506 uint64_t r1, r3; 1507 1508 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1509 r3 = run->s390_sieic.ipa & 0x000f; 1510 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1511 } 1512 1513 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1514 { 1515 CPUS390XState *env = &cpu->env; 1516 unsigned long pc; 1517 1518 pc = env->psw.addr - sw_bp_ilen; 1519 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1520 env->psw.addr = pc; 1521 return EXCP_DEBUG; 1522 } 1523 1524 return -ENOENT; 1525 } 1526 1527 void kvm_s390_set_diag318(CPUState *cs, uint64_t diag318_info) 1528 { 1529 CPUS390XState *env = &S390_CPU(cs)->env; 1530 1531 /* Feat bit is set only if KVM supports sync for diag318 */ 1532 if (s390_has_feat(S390_FEAT_DIAG_318)) { 1533 env->diag318_info = diag318_info; 1534 cs->kvm_run->s.regs.diag318 = diag318_info; 1535 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_DIAG318; 1536 /* 1537 * diag 318 info is zeroed during a clear reset and 1538 * diag 308 IPL subcodes. 1539 */ 1540 } 1541 } 1542 1543 static void handle_diag_318(S390CPU *cpu, struct kvm_run *run) 1544 { 1545 uint64_t reg = (run->s390_sieic.ipa & 0x00f0) >> 4; 1546 uint64_t diag318_info = run->s.regs.gprs[reg]; 1547 CPUState *t; 1548 1549 /* 1550 * DIAG 318 can only be enabled with KVM support. As such, let's 1551 * ensure a guest cannot execute this instruction erroneously. 1552 */ 1553 if (!s390_has_feat(S390_FEAT_DIAG_318)) { 1554 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1555 return; 1556 } 1557 1558 CPU_FOREACH(t) { 1559 run_on_cpu(t, s390_do_cpu_set_diag318, 1560 RUN_ON_CPU_HOST_ULONG(diag318_info)); 1561 } 1562 } 1563 1564 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1565 1566 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1567 { 1568 int r = 0; 1569 uint16_t func_code; 1570 1571 /* 1572 * For any diagnose call we support, bits 48-63 of the resulting 1573 * address specify the function code; the remainder is ignored. 1574 */ 1575 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1576 switch (func_code) { 1577 case DIAG_TIMEREVENT: 1578 kvm_handle_diag_288(cpu, run); 1579 break; 1580 case DIAG_IPL: 1581 kvm_handle_diag_308(cpu, run); 1582 break; 1583 case DIAG_SET_CONTROL_PROGRAM_CODES: 1584 handle_diag_318(cpu, run); 1585 break; 1586 #ifdef CONFIG_S390_CCW_VIRTIO 1587 case DIAG_KVM_HYPERCALL: 1588 handle_diag_500(cpu, RA_IGNORED); 1589 break; 1590 #endif /* CONFIG_S390_CCW_VIRTIO */ 1591 case DIAG_KVM_BREAKPOINT: 1592 r = handle_sw_breakpoint(cpu, run); 1593 break; 1594 default: 1595 trace_kvm_insn_diag(func_code); 1596 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1597 break; 1598 } 1599 1600 return r; 1601 } 1602 1603 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1604 { 1605 CPUS390XState *env = &cpu->env; 1606 const uint8_t r1 = ipa1 >> 4; 1607 const uint8_t r3 = ipa1 & 0x0f; 1608 int ret; 1609 uint8_t order; 1610 1611 /* get order code */ 1612 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1613 1614 ret = handle_sigp(env, order, r1, r3); 1615 setcc(cpu, ret); 1616 return 0; 1617 } 1618 1619 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1620 { 1621 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1622 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1623 int r = -1; 1624 1625 trace_kvm_insn(run->s390_sieic.ipa, run->s390_sieic.ipb); 1626 switch (ipa0) { 1627 case IPA0_B2: 1628 r = handle_b2(cpu, run, ipa1); 1629 break; 1630 case IPA0_B9: 1631 r = handle_b9(cpu, run, ipa1); 1632 break; 1633 case IPA0_EB: 1634 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1635 break; 1636 case IPA0_E3: 1637 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1638 break; 1639 case IPA0_DIAG: 1640 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1641 break; 1642 case IPA0_SIGP: 1643 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1644 break; 1645 } 1646 1647 if (r < 0) { 1648 r = 0; 1649 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1650 } 1651 1652 return r; 1653 } 1654 1655 static void unmanageable_intercept(S390CPU *cpu, S390CrashReason reason, 1656 int pswoffset) 1657 { 1658 CPUState *cs = CPU(cpu); 1659 1660 s390_cpu_halt(cpu); 1661 cpu->env.crash_reason = reason; 1662 qemu_system_guest_panicked(cpu_get_crash_info(cs)); 1663 } 1664 1665 /* try to detect pgm check loops */ 1666 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1667 { 1668 CPUState *cs = CPU(cpu); 1669 PSW oldpsw, newpsw; 1670 1671 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1672 offsetof(LowCore, program_new_psw)); 1673 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1674 offsetof(LowCore, program_new_psw) + 8); 1675 oldpsw.mask = run->psw_mask; 1676 oldpsw.addr = run->psw_addr; 1677 /* 1678 * Avoid endless loops of operation exceptions, if the pgm new 1679 * PSW will cause a new operation exception. 1680 * The heuristic checks if the pgm new psw is within 6 bytes before 1681 * the faulting psw address (with same DAT, AS settings) and the 1682 * new psw is not a wait psw and the fault was not triggered by 1683 * problem state. In that case go into crashed state. 1684 */ 1685 1686 if (oldpsw.addr - newpsw.addr <= 6 && 1687 !(newpsw.mask & PSW_MASK_WAIT) && 1688 !(oldpsw.mask & PSW_MASK_PSTATE) && 1689 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1690 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1691 unmanageable_intercept(cpu, S390_CRASH_REASON_OPINT_LOOP, 1692 offsetof(LowCore, program_new_psw)); 1693 return EXCP_HALTED; 1694 } 1695 return 0; 1696 } 1697 1698 static int handle_intercept(S390CPU *cpu) 1699 { 1700 CPUState *cs = CPU(cpu); 1701 struct kvm_run *run = cs->kvm_run; 1702 int icpt_code = run->s390_sieic.icptcode; 1703 int r = 0; 1704 1705 trace_kvm_intercept(icpt_code, (long)run->psw_addr); 1706 switch (icpt_code) { 1707 case ICPT_INSTRUCTION: 1708 case ICPT_PV_INSTR: 1709 case ICPT_PV_INSTR_NOTIFICATION: 1710 r = handle_instruction(cpu, run); 1711 break; 1712 case ICPT_PROGRAM: 1713 unmanageable_intercept(cpu, S390_CRASH_REASON_PGMINT_LOOP, 1714 offsetof(LowCore, program_new_psw)); 1715 r = EXCP_HALTED; 1716 break; 1717 case ICPT_EXT_INT: 1718 unmanageable_intercept(cpu, S390_CRASH_REASON_EXTINT_LOOP, 1719 offsetof(LowCore, external_new_psw)); 1720 r = EXCP_HALTED; 1721 break; 1722 case ICPT_WAITPSW: 1723 /* disabled wait, since enabled wait is handled in kernel */ 1724 s390_handle_wait(cpu); 1725 r = EXCP_HALTED; 1726 break; 1727 case ICPT_CPU_STOP: 1728 do_stop_interrupt(&cpu->env); 1729 r = EXCP_HALTED; 1730 break; 1731 case ICPT_OPEREXC: 1732 /* check for break points */ 1733 r = handle_sw_breakpoint(cpu, run); 1734 if (r == -ENOENT) { 1735 /* Then check for potential pgm check loops */ 1736 r = handle_oper_loop(cpu, run); 1737 if (r == 0) { 1738 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1739 } 1740 } 1741 break; 1742 case ICPT_SOFT_INTERCEPT: 1743 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1744 exit(1); 1745 break; 1746 case ICPT_IO: 1747 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1748 exit(1); 1749 break; 1750 default: 1751 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1752 exit(1); 1753 break; 1754 } 1755 1756 return r; 1757 } 1758 1759 static int handle_tsch(S390CPU *cpu) 1760 { 1761 CPUState *cs = CPU(cpu); 1762 struct kvm_run *run = cs->kvm_run; 1763 int ret; 1764 1765 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1766 RA_IGNORED); 1767 if (ret < 0) { 1768 /* 1769 * Failure. 1770 * If an I/O interrupt had been dequeued, we have to reinject it. 1771 */ 1772 if (run->s390_tsch.dequeued) { 1773 s390_io_interrupt(run->s390_tsch.subchannel_id, 1774 run->s390_tsch.subchannel_nr, 1775 run->s390_tsch.io_int_parm, 1776 run->s390_tsch.io_int_word); 1777 } 1778 ret = 0; 1779 } 1780 return ret; 1781 } 1782 1783 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1784 { 1785 const MachineState *ms = MACHINE(qdev_get_machine()); 1786 uint16_t conf_cpus = 0, reserved_cpus = 0; 1787 SysIB_322 sysib; 1788 int del, i; 1789 1790 if (s390_is_pv()) { 1791 s390_cpu_pv_mem_read(cpu, 0, &sysib, sizeof(sysib)); 1792 } else if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1793 return; 1794 } 1795 /* Shift the stack of Extended Names to prepare for our own data */ 1796 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1797 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1798 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1799 * assumed it's not capable of managing Extended Names for lower levels. 1800 */ 1801 for (del = 1; del < sysib.count; del++) { 1802 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1803 break; 1804 } 1805 } 1806 if (del < sysib.count) { 1807 memset(sysib.ext_names[del], 0, 1808 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1809 } 1810 1811 /* count the cpus and split them into configured and reserved ones */ 1812 for (i = 0; i < ms->possible_cpus->len; i++) { 1813 if (ms->possible_cpus->cpus[i].cpu) { 1814 conf_cpus++; 1815 } else { 1816 reserved_cpus++; 1817 } 1818 } 1819 sysib.vm[0].total_cpus = conf_cpus + reserved_cpus; 1820 sysib.vm[0].conf_cpus = conf_cpus; 1821 sysib.vm[0].reserved_cpus = reserved_cpus; 1822 1823 /* Insert short machine name in EBCDIC, padded with blanks */ 1824 if (qemu_name) { 1825 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1826 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1827 strlen(qemu_name))); 1828 } 1829 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1830 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1831 * considered by s390 as not capable of providing any Extended Name. 1832 * Therefore if no name was specified on qemu invocation, we go with the 1833 * same "KVMguest" default, which KVM has filled into short name field. 1834 */ 1835 strpadcpy((char *)sysib.ext_names[0], 1836 sizeof(sysib.ext_names[0]), 1837 qemu_name ?: "KVMguest", '\0'); 1838 1839 /* Insert UUID */ 1840 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1841 1842 if (s390_is_pv()) { 1843 s390_cpu_pv_mem_write(cpu, 0, &sysib, sizeof(sysib)); 1844 } else { 1845 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1846 } 1847 } 1848 1849 static int handle_stsi(S390CPU *cpu) 1850 { 1851 CPUState *cs = CPU(cpu); 1852 struct kvm_run *run = cs->kvm_run; 1853 1854 switch (run->s390_stsi.fc) { 1855 case 3: 1856 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1857 return 0; 1858 } 1859 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1860 return 0; 1861 case 15: 1862 insert_stsi_15_1_x(cpu, run->s390_stsi.sel2, run->s390_stsi.addr, 1863 run->s390_stsi.ar, RA_IGNORED); 1864 return 0; 1865 default: 1866 return 0; 1867 } 1868 } 1869 1870 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1871 { 1872 CPUState *cs = CPU(cpu); 1873 struct kvm_run *run = cs->kvm_run; 1874 1875 int ret = 0; 1876 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1877 1878 switch (arch_info->type) { 1879 case KVM_HW_WP_WRITE: 1880 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1881 cs->watchpoint_hit = &hw_watchpoint; 1882 hw_watchpoint.vaddr = arch_info->addr; 1883 hw_watchpoint.flags = BP_MEM_WRITE; 1884 ret = EXCP_DEBUG; 1885 } 1886 break; 1887 case KVM_HW_BP: 1888 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1889 ret = EXCP_DEBUG; 1890 } 1891 break; 1892 case KVM_SINGLESTEP: 1893 if (cs->singlestep_enabled) { 1894 ret = EXCP_DEBUG; 1895 } 1896 break; 1897 default: 1898 ret = -ENOSYS; 1899 } 1900 1901 return ret; 1902 } 1903 1904 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1905 { 1906 S390CPU *cpu = S390_CPU(cs); 1907 int ret = 0; 1908 1909 bql_lock(); 1910 1911 kvm_cpu_synchronize_state(cs); 1912 1913 switch (run->exit_reason) { 1914 case KVM_EXIT_S390_SIEIC: 1915 ret = handle_intercept(cpu); 1916 break; 1917 case KVM_EXIT_S390_RESET: 1918 s390_ipl_reset_request(cs, S390_RESET_REIPL); 1919 break; 1920 case KVM_EXIT_S390_TSCH: 1921 ret = handle_tsch(cpu); 1922 break; 1923 case KVM_EXIT_S390_STSI: 1924 ret = handle_stsi(cpu); 1925 break; 1926 case KVM_EXIT_DEBUG: 1927 ret = kvm_arch_handle_debug_exit(cpu); 1928 break; 1929 default: 1930 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1931 break; 1932 } 1933 bql_unlock(); 1934 1935 if (ret == 0) { 1936 ret = EXCP_INTERRUPT; 1937 } 1938 return ret; 1939 } 1940 1941 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 1942 { 1943 return true; 1944 } 1945 1946 void kvm_s390_enable_css_support(S390CPU *cpu) 1947 { 1948 int r; 1949 1950 /* Activate host kernel channel subsystem support. */ 1951 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 1952 assert(r == 0); 1953 } 1954 1955 void kvm_arch_init_irq_routing(KVMState *s) 1956 { 1957 /* 1958 * Note that while irqchip capabilities generally imply that cpustates 1959 * are handled in-kernel, it is not true for s390 (yet); therefore, we 1960 * have to override the common code kvm_halt_in_kernel_allowed setting. 1961 */ 1962 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 1963 kvm_gsi_routing_allowed = true; 1964 kvm_halt_in_kernel_allowed = false; 1965 } 1966 } 1967 1968 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 1969 int vq, bool assign) 1970 { 1971 struct kvm_ioeventfd kick = { 1972 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 1973 KVM_IOEVENTFD_FLAG_DATAMATCH, 1974 .fd = event_notifier_get_fd(notifier), 1975 .datamatch = vq, 1976 .addr = sch, 1977 .len = 8, 1978 }; 1979 trace_kvm_assign_subch_ioeventfd(kick.fd, kick.addr, assign, 1980 kick.datamatch); 1981 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 1982 return -ENOSYS; 1983 } 1984 if (!assign) { 1985 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1986 } 1987 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 1988 } 1989 1990 int kvm_s390_get_protected_dump(void) 1991 { 1992 return cap_protected_dump; 1993 } 1994 1995 int kvm_s390_get_ri(void) 1996 { 1997 return cap_ri; 1998 } 1999 2000 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2001 { 2002 struct kvm_mp_state mp_state = {}; 2003 int ret; 2004 2005 /* the kvm part might not have been initialized yet */ 2006 if (CPU(cpu)->kvm_state == NULL) { 2007 return 0; 2008 } 2009 2010 switch (cpu_state) { 2011 case S390_CPU_STATE_STOPPED: 2012 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2013 break; 2014 case S390_CPU_STATE_CHECK_STOP: 2015 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2016 break; 2017 case S390_CPU_STATE_OPERATING: 2018 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2019 break; 2020 case S390_CPU_STATE_LOAD: 2021 mp_state.mp_state = KVM_MP_STATE_LOAD; 2022 break; 2023 default: 2024 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2025 cpu_state); 2026 exit(1); 2027 } 2028 2029 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2030 if (ret) { 2031 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2032 strerror(-ret)); 2033 } 2034 2035 return ret; 2036 } 2037 2038 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2039 { 2040 unsigned int max_cpus = MACHINE(qdev_get_machine())->smp.max_cpus; 2041 struct kvm_s390_irq_state irq_state = { 2042 .buf = (uint64_t) cpu->irqstate, 2043 .len = VCPU_IRQ_BUF_SIZE(max_cpus), 2044 }; 2045 CPUState *cs = CPU(cpu); 2046 int32_t bytes; 2047 2048 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2049 return; 2050 } 2051 2052 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2053 if (bytes < 0) { 2054 cpu->irqstate_saved_size = 0; 2055 error_report("Migration of interrupt state failed"); 2056 return; 2057 } 2058 2059 cpu->irqstate_saved_size = bytes; 2060 } 2061 2062 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2063 { 2064 CPUState *cs = CPU(cpu); 2065 struct kvm_s390_irq_state irq_state = { 2066 .buf = (uint64_t) cpu->irqstate, 2067 .len = cpu->irqstate_saved_size, 2068 }; 2069 int r; 2070 2071 if (cpu->irqstate_saved_size == 0) { 2072 return 0; 2073 } 2074 2075 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2076 return -ENOSYS; 2077 } 2078 2079 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2080 if (r) { 2081 error_report("Setting interrupt state failed %d", r); 2082 } 2083 return r; 2084 } 2085 2086 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2087 uint64_t address, uint32_t data, PCIDevice *dev) 2088 { 2089 S390PCIBusDevice *pbdev; 2090 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2091 2092 if (!dev) { 2093 trace_kvm_msi_route_fixup("no pci device"); 2094 return -ENODEV; 2095 } 2096 2097 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2098 if (!pbdev) { 2099 trace_kvm_msi_route_fixup("no zpci device"); 2100 return -ENODEV; 2101 } 2102 2103 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2104 route->flags = 0; 2105 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2106 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2107 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2108 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2109 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2110 return 0; 2111 } 2112 2113 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2114 int vector, PCIDevice *dev) 2115 { 2116 return 0; 2117 } 2118 2119 int kvm_arch_release_virq_post(int virq) 2120 { 2121 return 0; 2122 } 2123 2124 int kvm_arch_msi_data_to_gsi(uint32_t data) 2125 { 2126 abort(); 2127 } 2128 2129 static int query_cpu_subfunc(S390FeatBitmap features) 2130 { 2131 struct kvm_s390_vm_cpu_subfunc prop = {}; 2132 struct kvm_device_attr attr = { 2133 .group = KVM_S390_VM_CPU_MODEL, 2134 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2135 .addr = (uint64_t) &prop, 2136 }; 2137 int rc; 2138 2139 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2140 if (rc) { 2141 return rc; 2142 } 2143 2144 /* 2145 * We're going to add all subfunctions now, if the corresponding feature 2146 * is available that unlocks the query functions. 2147 */ 2148 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2149 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2150 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2151 } 2152 if (test_bit(S390_FEAT_MSA, features)) { 2153 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2154 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2155 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2156 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2157 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2158 } 2159 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2160 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2161 } 2162 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2163 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2164 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2165 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2166 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2167 } 2168 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2169 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2170 } 2171 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2172 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2173 } 2174 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2175 s390_add_from_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2176 } 2177 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2178 s390_add_from_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2179 } 2180 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2181 s390_add_from_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2182 } 2183 if (test_bit(S390_FEAT_CCF_BASE, features)) { 2184 s390_add_from_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr); 2185 } 2186 return 0; 2187 } 2188 2189 static int configure_cpu_subfunc(const S390FeatBitmap features) 2190 { 2191 struct kvm_s390_vm_cpu_subfunc prop = {}; 2192 struct kvm_device_attr attr = { 2193 .group = KVM_S390_VM_CPU_MODEL, 2194 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2195 .addr = (uint64_t) &prop, 2196 }; 2197 2198 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2199 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2200 /* hardware support might be missing, IBC will handle most of this */ 2201 return 0; 2202 } 2203 2204 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2205 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2206 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2207 } 2208 if (test_bit(S390_FEAT_MSA, features)) { 2209 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2210 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2211 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2212 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2213 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2214 } 2215 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2216 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2217 } 2218 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2219 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2220 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2221 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2222 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2223 } 2224 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2225 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2226 } 2227 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2228 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2229 } 2230 if (test_bit(S390_FEAT_MSA_EXT_9, features)) { 2231 s390_fill_feat_block(features, S390_FEAT_TYPE_KDSA, prop.kdsa); 2232 } 2233 if (test_bit(S390_FEAT_ESORT_BASE, features)) { 2234 s390_fill_feat_block(features, S390_FEAT_TYPE_SORTL, prop.sortl); 2235 } 2236 if (test_bit(S390_FEAT_DEFLATE_BASE, features)) { 2237 s390_fill_feat_block(features, S390_FEAT_TYPE_DFLTCC, prop.dfltcc); 2238 } 2239 if (test_bit(S390_FEAT_CCF_BASE, features)) { 2240 s390_fill_feat_block(features, S390_FEAT_TYPE_PFCR, prop.pfcr); 2241 } 2242 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2243 } 2244 2245 static bool ap_available(void) 2246 { 2247 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, 2248 KVM_S390_VM_CRYPTO_ENABLE_APIE); 2249 } 2250 2251 static bool ap_enabled(const S390FeatBitmap features) 2252 { 2253 return test_bit(S390_FEAT_AP, features); 2254 } 2255 2256 static bool uv_feat_supported(void) 2257 { 2258 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2259 KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST); 2260 } 2261 2262 static int query_uv_feat_guest(S390FeatBitmap features) 2263 { 2264 struct kvm_s390_vm_cpu_uv_feat prop = {}; 2265 struct kvm_device_attr attr = { 2266 .group = KVM_S390_VM_CPU_MODEL, 2267 .attr = KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST, 2268 .addr = (uint64_t) &prop, 2269 }; 2270 int rc; 2271 2272 /* AP support check is currently the only user of the UV feature test */ 2273 if (!(uv_feat_supported() && ap_available())) { 2274 return 0; 2275 } 2276 2277 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2278 if (rc) { 2279 return rc; 2280 } 2281 2282 if (prop.ap) { 2283 set_bit(S390_FEAT_UV_FEAT_AP, features); 2284 } 2285 if (prop.ap_intr) { 2286 set_bit(S390_FEAT_UV_FEAT_AP_INTR, features); 2287 } 2288 2289 return 0; 2290 } 2291 2292 static int kvm_to_feat[][2] = { 2293 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2294 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2295 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2296 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2297 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2298 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2299 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2300 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2301 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2302 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2303 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2304 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2305 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2306 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2307 }; 2308 2309 static int query_cpu_feat(S390FeatBitmap features) 2310 { 2311 struct kvm_s390_vm_cpu_feat prop = {}; 2312 struct kvm_device_attr attr = { 2313 .group = KVM_S390_VM_CPU_MODEL, 2314 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2315 .addr = (uint64_t) &prop, 2316 }; 2317 int rc; 2318 int i; 2319 2320 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2321 if (rc) { 2322 return rc; 2323 } 2324 2325 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2326 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2327 set_bit(kvm_to_feat[i][1], features); 2328 } 2329 } 2330 return 0; 2331 } 2332 2333 static int configure_cpu_feat(const S390FeatBitmap features) 2334 { 2335 struct kvm_s390_vm_cpu_feat prop = {}; 2336 struct kvm_device_attr attr = { 2337 .group = KVM_S390_VM_CPU_MODEL, 2338 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2339 .addr = (uint64_t) &prop, 2340 }; 2341 int i; 2342 2343 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2344 if (test_bit(kvm_to_feat[i][1], features)) { 2345 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2346 } 2347 } 2348 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2349 } 2350 2351 bool kvm_s390_cpu_models_supported(void) 2352 { 2353 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2354 KVM_S390_VM_CPU_MACHINE) && 2355 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2356 KVM_S390_VM_CPU_PROCESSOR) && 2357 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2358 KVM_S390_VM_CPU_MACHINE_FEAT) && 2359 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2360 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2361 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2362 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2363 } 2364 2365 bool kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2366 { 2367 struct kvm_s390_vm_cpu_machine prop = {}; 2368 struct kvm_device_attr attr = { 2369 .group = KVM_S390_VM_CPU_MODEL, 2370 .attr = KVM_S390_VM_CPU_MACHINE, 2371 .addr = (uint64_t) &prop, 2372 }; 2373 uint16_t unblocked_ibc = 0, cpu_type = 0; 2374 int rc; 2375 2376 memset(model, 0, sizeof(*model)); 2377 2378 if (!kvm_s390_cpu_models_supported()) { 2379 error_setg(errp, "KVM doesn't support CPU models"); 2380 return false; 2381 } 2382 2383 /* query the basic cpu model properties */ 2384 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2385 if (rc) { 2386 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2387 return false; 2388 } 2389 2390 cpu_type = cpuid_type(prop.cpuid); 2391 if (has_ibc(prop.ibc)) { 2392 model->lowest_ibc = lowest_ibc(prop.ibc); 2393 unblocked_ibc = unblocked_ibc(prop.ibc); 2394 } 2395 model->cpu_id = cpuid_id(prop.cpuid); 2396 model->cpu_id_format = cpuid_format(prop.cpuid); 2397 model->cpu_ver = 0xff; 2398 2399 /* get supported cpu features indicated via STFL(E) */ 2400 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2401 (uint8_t *) prop.fac_mask); 2402 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2403 if (test_bit(S390_FEAT_STFLE, model->features)) { 2404 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2405 } 2406 /* get supported cpu features indicated e.g. via SCLP */ 2407 rc = query_cpu_feat(model->features); 2408 if (rc) { 2409 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2410 return false; 2411 } 2412 /* get supported cpu subfunctions indicated via query / test bit */ 2413 rc = query_cpu_subfunc(model->features); 2414 if (rc) { 2415 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2416 return false; 2417 } 2418 2419 /* PTFF subfunctions might be indicated although kernel support missing */ 2420 if (!test_bit(S390_FEAT_MULTIPLE_EPOCH, model->features)) { 2421 clear_bit(S390_FEAT_PTFF_QSIE, model->features); 2422 clear_bit(S390_FEAT_PTFF_QTOUE, model->features); 2423 clear_bit(S390_FEAT_PTFF_STOE, model->features); 2424 clear_bit(S390_FEAT_PTFF_STOUE, model->features); 2425 } 2426 2427 /* with cpu model support, CMM is only indicated if really available */ 2428 if (kvm_s390_cmma_available()) { 2429 set_bit(S390_FEAT_CMM, model->features); 2430 } else { 2431 /* no cmm -> no cmm nt */ 2432 clear_bit(S390_FEAT_CMM_NT, model->features); 2433 } 2434 2435 /* bpb needs kernel support for migration, VSIE and reset */ 2436 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_BPB)) { 2437 clear_bit(S390_FEAT_BPB, model->features); 2438 } 2439 2440 /* 2441 * If we have support for protected virtualization, indicate 2442 * the protected virtualization IPL unpack facility. 2443 */ 2444 if (cap_protected) { 2445 set_bit(S390_FEAT_UNPACK, model->features); 2446 } 2447 2448 /* 2449 * If we have kernel support for CPU Topology indicate the 2450 * configuration-topology facility. 2451 */ 2452 if (kvm_check_extension(kvm_state, KVM_CAP_S390_CPU_TOPOLOGY)) { 2453 set_bit(S390_FEAT_CONFIGURATION_TOPOLOGY, model->features); 2454 } 2455 2456 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2457 set_bit(S390_FEAT_ZPCI, model->features); 2458 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2459 2460 if (s390_known_cpu_type(cpu_type)) { 2461 /* we want the exact model, even if some features are missing */ 2462 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2463 ibc_ec_ga(unblocked_ibc), NULL); 2464 } else { 2465 /* model unknown, e.g. too new - search using features */ 2466 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2467 ibc_ec_ga(unblocked_ibc), 2468 model->features); 2469 } 2470 if (!model->def) { 2471 error_setg(errp, "KVM: host CPU model could not be identified"); 2472 return false; 2473 } 2474 /* for now, we can only provide the AP feature with HW support */ 2475 if (ap_available()) { 2476 set_bit(S390_FEAT_AP, model->features); 2477 } 2478 2479 /* 2480 * Extended-Length SCCB is handled entirely within QEMU. 2481 * For PV guests this is completely fenced by the Ultravisor, as Service 2482 * Call error checking and STFLE interpretation are handled via SIE. 2483 */ 2484 set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features); 2485 2486 if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) { 2487 set_bit(S390_FEAT_DIAG_318, model->features); 2488 } 2489 2490 /* Test for Ultravisor features that influence secure guest behavior */ 2491 query_uv_feat_guest(model->features); 2492 2493 /* strip of features that are not part of the maximum model */ 2494 bitmap_and(model->features, model->features, model->def->full_feat, 2495 S390_FEAT_MAX); 2496 return true; 2497 } 2498 2499 static int configure_uv_feat_guest(const S390FeatBitmap features) 2500 { 2501 struct kvm_s390_vm_cpu_uv_feat uv_feat = {}; 2502 struct kvm_device_attr attribute = { 2503 .group = KVM_S390_VM_CPU_MODEL, 2504 .attr = KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST, 2505 .addr = (__u64) &uv_feat, 2506 }; 2507 2508 /* AP support check is currently the only user of the UV feature test */ 2509 if (!(uv_feat_supported() && ap_enabled(features))) { 2510 return 0; 2511 } 2512 2513 if (test_bit(S390_FEAT_UV_FEAT_AP, features)) { 2514 uv_feat.ap = 1; 2515 } 2516 if (test_bit(S390_FEAT_UV_FEAT_AP_INTR, features)) { 2517 uv_feat.ap_intr = 1; 2518 } 2519 2520 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 2521 } 2522 2523 static void kvm_s390_configure_apie(bool interpret) 2524 { 2525 uint64_t attr = interpret ? KVM_S390_VM_CRYPTO_ENABLE_APIE : 2526 KVM_S390_VM_CRYPTO_DISABLE_APIE; 2527 2528 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 2529 kvm_s390_set_crypto_attr(attr); 2530 } 2531 } 2532 2533 bool kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2534 { 2535 struct kvm_s390_vm_cpu_processor prop = { 2536 .fac_list = { 0 }, 2537 }; 2538 struct kvm_device_attr attr = { 2539 .group = KVM_S390_VM_CPU_MODEL, 2540 .attr = KVM_S390_VM_CPU_PROCESSOR, 2541 .addr = (uint64_t) &prop, 2542 }; 2543 int rc; 2544 2545 if (!model) { 2546 /* compatibility handling if cpu models are disabled */ 2547 if (kvm_s390_cmma_available()) { 2548 kvm_s390_enable_cmma(); 2549 } 2550 return true; 2551 } 2552 if (!kvm_s390_cpu_models_supported()) { 2553 error_setg(errp, "KVM doesn't support CPU models"); 2554 return false; 2555 } 2556 prop.cpuid = s390_cpuid_from_cpu_model(model); 2557 prop.ibc = s390_ibc_from_cpu_model(model); 2558 /* configure cpu features indicated via STFL(e) */ 2559 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2560 (uint8_t *) prop.fac_list); 2561 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2562 if (rc) { 2563 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2564 return false; 2565 } 2566 /* configure cpu features indicated e.g. via SCLP */ 2567 rc = configure_cpu_feat(model->features); 2568 if (rc) { 2569 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2570 return false; 2571 } 2572 /* configure cpu subfunctions indicated via query / test bit */ 2573 rc = configure_cpu_subfunc(model->features); 2574 if (rc) { 2575 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2576 return false; 2577 } 2578 /* enable CMM via CMMA */ 2579 if (test_bit(S390_FEAT_CMM, model->features)) { 2580 kvm_s390_enable_cmma(); 2581 } 2582 2583 if (ap_enabled(model->features)) { 2584 kvm_s390_configure_apie(true); 2585 } 2586 2587 /* configure UV-features for the guest indicated via query / test_bit */ 2588 rc = configure_uv_feat_guest(model->features); 2589 if (rc) { 2590 error_setg(errp, "KVM: Error configuring CPU UV features %d", rc); 2591 return false; 2592 } 2593 return true; 2594 } 2595 2596 void kvm_s390_restart_interrupt(S390CPU *cpu) 2597 { 2598 struct kvm_s390_irq irq = { 2599 .type = KVM_S390_RESTART, 2600 }; 2601 2602 kvm_s390_vcpu_interrupt(cpu, &irq); 2603 } 2604 2605 void kvm_s390_stop_interrupt(S390CPU *cpu) 2606 { 2607 struct kvm_s390_irq irq = { 2608 .type = KVM_S390_SIGP_STOP, 2609 }; 2610 2611 kvm_s390_vcpu_interrupt(cpu, &irq); 2612 } 2613 2614 int kvm_s390_get_zpci_op(void) 2615 { 2616 return cap_zpci_op; 2617 } 2618 2619 int kvm_s390_topology_set_mtcr(uint64_t attr) 2620 { 2621 struct kvm_device_attr attribute = { 2622 .group = KVM_S390_VM_CPU_TOPOLOGY, 2623 .attr = attr, 2624 }; 2625 2626 if (!s390_has_feat(S390_FEAT_CONFIGURATION_TOPOLOGY)) { 2627 return 0; 2628 } 2629 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_TOPOLOGY, attr)) { 2630 return -ENOTSUP; 2631 } 2632 2633 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 2634 } 2635 2636 void kvm_arch_accel_class_init(ObjectClass *oc) 2637 { 2638 } 2639