1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * Contributions after 2012-10-29 are licensed under the terms of the 18 * GNU GPL, version 2 or (at your option) any later version. 19 * 20 * You should have received a copy of the GNU (Lesser) General Public 21 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include "qemu/osdep.h" 25 #include <sys/ioctl.h> 26 27 #include <linux/kvm.h> 28 #include <asm/ptrace.h> 29 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "internal.h" 33 #include "kvm_s390x.h" 34 #include "qemu/error-report.h" 35 #include "qemu/timer.h" 36 #include "sysemu/sysemu.h" 37 #include "sysemu/hw_accel.h" 38 #include "hw/hw.h" 39 #include "sysemu/device_tree.h" 40 #include "qapi/qmp/qjson.h" 41 #include "exec/gdbstub.h" 42 #include "exec/address-spaces.h" 43 #include "trace.h" 44 #include "qapi-event.h" 45 #include "hw/s390x/s390-pci-inst.h" 46 #include "hw/s390x/s390-pci-bus.h" 47 #include "hw/s390x/ipl.h" 48 #include "hw/s390x/ebcdic.h" 49 #include "exec/memattrs.h" 50 #include "hw/s390x/s390-virtio-ccw.h" 51 #include "hw/s390x/s390-virtio-hcall.h" 52 53 #ifndef DEBUG_KVM 54 #define DEBUG_KVM 0 55 #endif 56 57 #define DPRINTF(fmt, ...) do { \ 58 if (DEBUG_KVM) { \ 59 fprintf(stderr, fmt, ## __VA_ARGS__); \ 60 } \ 61 } while (0) 62 63 #define kvm_vm_check_mem_attr(s, attr) \ 64 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 65 66 #define IPA0_DIAG 0x8300 67 #define IPA0_SIGP 0xae00 68 #define IPA0_B2 0xb200 69 #define IPA0_B9 0xb900 70 #define IPA0_EB 0xeb00 71 #define IPA0_E3 0xe300 72 73 #define PRIV_B2_SCLP_CALL 0x20 74 #define PRIV_B2_CSCH 0x30 75 #define PRIV_B2_HSCH 0x31 76 #define PRIV_B2_MSCH 0x32 77 #define PRIV_B2_SSCH 0x33 78 #define PRIV_B2_STSCH 0x34 79 #define PRIV_B2_TSCH 0x35 80 #define PRIV_B2_TPI 0x36 81 #define PRIV_B2_SAL 0x37 82 #define PRIV_B2_RSCH 0x38 83 #define PRIV_B2_STCRW 0x39 84 #define PRIV_B2_STCPS 0x3a 85 #define PRIV_B2_RCHP 0x3b 86 #define PRIV_B2_SCHM 0x3c 87 #define PRIV_B2_CHSC 0x5f 88 #define PRIV_B2_SIGA 0x74 89 #define PRIV_B2_XSCH 0x76 90 91 #define PRIV_EB_SQBS 0x8a 92 #define PRIV_EB_PCISTB 0xd0 93 #define PRIV_EB_SIC 0xd1 94 95 #define PRIV_B9_EQBS 0x9c 96 #define PRIV_B9_CLP 0xa0 97 #define PRIV_B9_PCISTG 0xd0 98 #define PRIV_B9_PCILG 0xd2 99 #define PRIV_B9_RPCIT 0xd3 100 101 #define PRIV_E3_MPCIFC 0xd0 102 #define PRIV_E3_STPCIFC 0xd4 103 104 #define DIAG_TIMEREVENT 0x288 105 #define DIAG_IPL 0x308 106 #define DIAG_KVM_HYPERCALL 0x500 107 #define DIAG_KVM_BREAKPOINT 0x501 108 109 #define ICPT_INSTRUCTION 0x04 110 #define ICPT_PROGRAM 0x08 111 #define ICPT_EXT_INT 0x14 112 #define ICPT_WAITPSW 0x1c 113 #define ICPT_SOFT_INTERCEPT 0x24 114 #define ICPT_CPU_STOP 0x28 115 #define ICPT_OPEREXC 0x2c 116 #define ICPT_IO 0x40 117 118 #define NR_LOCAL_IRQS 32 119 /* 120 * Needs to be big enough to contain max_cpus emergency signals 121 * and in addition NR_LOCAL_IRQS interrupts 122 */ 123 #define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \ 124 (max_cpus + NR_LOCAL_IRQS)) 125 126 static CPUWatchpoint hw_watchpoint; 127 /* 128 * We don't use a list because this structure is also used to transmit the 129 * hardware breakpoints to the kernel. 130 */ 131 static struct kvm_hw_breakpoint *hw_breakpoints; 132 static int nb_hw_breakpoints; 133 134 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 135 KVM_CAP_LAST_INFO 136 }; 137 138 static int cap_sync_regs; 139 static int cap_async_pf; 140 static int cap_mem_op; 141 static int cap_s390_irq; 142 static int cap_ri; 143 static int cap_gs; 144 145 static int active_cmma; 146 147 static void *legacy_s390_alloc(size_t size, uint64_t *align); 148 149 static int kvm_s390_query_mem_limit(uint64_t *memory_limit) 150 { 151 struct kvm_device_attr attr = { 152 .group = KVM_S390_VM_MEM_CTRL, 153 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 154 .addr = (uint64_t) memory_limit, 155 }; 156 157 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 158 } 159 160 int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit) 161 { 162 int rc; 163 164 struct kvm_device_attr attr = { 165 .group = KVM_S390_VM_MEM_CTRL, 166 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 167 .addr = (uint64_t) &new_limit, 168 }; 169 170 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) { 171 return 0; 172 } 173 174 rc = kvm_s390_query_mem_limit(hw_limit); 175 if (rc) { 176 return rc; 177 } else if (*hw_limit < new_limit) { 178 return -E2BIG; 179 } 180 181 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 182 } 183 184 int kvm_s390_cmma_active(void) 185 { 186 return active_cmma; 187 } 188 189 static bool kvm_s390_cmma_available(void) 190 { 191 static bool initialized, value; 192 193 if (!initialized) { 194 initialized = true; 195 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 196 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 197 } 198 return value; 199 } 200 201 void kvm_s390_cmma_reset(void) 202 { 203 int rc; 204 struct kvm_device_attr attr = { 205 .group = KVM_S390_VM_MEM_CTRL, 206 .attr = KVM_S390_VM_MEM_CLR_CMMA, 207 }; 208 209 if (!kvm_s390_cmma_active()) { 210 return; 211 } 212 213 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 214 trace_kvm_clear_cmma(rc); 215 } 216 217 static void kvm_s390_enable_cmma(void) 218 { 219 int rc; 220 struct kvm_device_attr attr = { 221 .group = KVM_S390_VM_MEM_CTRL, 222 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 223 }; 224 225 if (mem_path) { 226 warn_report("CMM will not be enabled because it is not " 227 "compatible with hugetlbfs."); 228 return; 229 } 230 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 231 active_cmma = !rc; 232 trace_kvm_enable_cmma(rc); 233 } 234 235 static void kvm_s390_set_attr(uint64_t attr) 236 { 237 struct kvm_device_attr attribute = { 238 .group = KVM_S390_VM_CRYPTO, 239 .attr = attr, 240 }; 241 242 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 243 244 if (ret) { 245 error_report("Failed to set crypto device attribute %lu: %s", 246 attr, strerror(-ret)); 247 } 248 } 249 250 static void kvm_s390_init_aes_kw(void) 251 { 252 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 253 254 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 255 NULL)) { 256 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 257 } 258 259 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 260 kvm_s390_set_attr(attr); 261 } 262 } 263 264 static void kvm_s390_init_dea_kw(void) 265 { 266 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 267 268 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 269 NULL)) { 270 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 271 } 272 273 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 274 kvm_s390_set_attr(attr); 275 } 276 } 277 278 void kvm_s390_crypto_reset(void) 279 { 280 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 281 kvm_s390_init_aes_kw(); 282 kvm_s390_init_dea_kw(); 283 } 284 } 285 286 int kvm_arch_init(MachineState *ms, KVMState *s) 287 { 288 MachineClass *mc = MACHINE_GET_CLASS(ms); 289 290 mc->default_cpu_type = S390_CPU_TYPE_NAME("host"); 291 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); 292 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 293 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 294 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 295 296 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) 297 || !kvm_check_extension(s, KVM_CAP_S390_COW)) { 298 phys_mem_set_alloc(legacy_s390_alloc); 299 } 300 301 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 302 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 303 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 304 if (ri_allowed()) { 305 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 306 cap_ri = 1; 307 } 308 } 309 if (cpu_model_allowed()) { 310 if (kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0) == 0) { 311 cap_gs = 1; 312 } 313 } 314 315 /* 316 * The migration interface for ais was introduced with kernel 4.13 317 * but the capability itself had been active since 4.12. As migration 318 * support is considered necessary let's disable ais in the 2.10 319 * machine. 320 */ 321 /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */ 322 323 return 0; 324 } 325 326 int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) 327 { 328 return 0; 329 } 330 331 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 332 { 333 return cpu->cpu_index; 334 } 335 336 int kvm_arch_init_vcpu(CPUState *cs) 337 { 338 S390CPU *cpu = S390_CPU(cs); 339 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 340 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE); 341 return 0; 342 } 343 344 void kvm_s390_reset_vcpu(S390CPU *cpu) 345 { 346 CPUState *cs = CPU(cpu); 347 348 /* The initial reset call is needed here to reset in-kernel 349 * vcpu data that we can't access directly from QEMU 350 * (i.e. with older kernels which don't support sync_regs/ONE_REG). 351 * Before this ioctl cpu_synchronize_state() is called in common kvm 352 * code (kvm-all) */ 353 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) { 354 error_report("Initial CPU reset failed on CPU %i", cs->cpu_index); 355 } 356 } 357 358 static int can_sync_regs(CPUState *cs, int regs) 359 { 360 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; 361 } 362 363 int kvm_arch_put_registers(CPUState *cs, int level) 364 { 365 S390CPU *cpu = S390_CPU(cs); 366 CPUS390XState *env = &cpu->env; 367 struct kvm_sregs sregs; 368 struct kvm_regs regs; 369 struct kvm_fpu fpu = {}; 370 int r; 371 int i; 372 373 /* always save the PSW and the GPRS*/ 374 cs->kvm_run->psw_addr = env->psw.addr; 375 cs->kvm_run->psw_mask = env->psw.mask; 376 377 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 378 for (i = 0; i < 16; i++) { 379 cs->kvm_run->s.regs.gprs[i] = env->regs[i]; 380 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 381 } 382 } else { 383 for (i = 0; i < 16; i++) { 384 regs.gprs[i] = env->regs[i]; 385 } 386 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 387 if (r < 0) { 388 return r; 389 } 390 } 391 392 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 393 for (i = 0; i < 32; i++) { 394 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll; 395 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll; 396 } 397 cs->kvm_run->s.regs.fpc = env->fpc; 398 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 399 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 400 for (i = 0; i < 16; i++) { 401 cs->kvm_run->s.regs.fprs[i] = get_freg(env, i)->ll; 402 } 403 cs->kvm_run->s.regs.fpc = env->fpc; 404 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 405 } else { 406 /* Floating point */ 407 for (i = 0; i < 16; i++) { 408 fpu.fprs[i] = get_freg(env, i)->ll; 409 } 410 fpu.fpc = env->fpc; 411 412 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 413 if (r < 0) { 414 return r; 415 } 416 } 417 418 /* Do we need to save more than that? */ 419 if (level == KVM_PUT_RUNTIME_STATE) { 420 return 0; 421 } 422 423 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 424 cs->kvm_run->s.regs.cputm = env->cputm; 425 cs->kvm_run->s.regs.ckc = env->ckc; 426 cs->kvm_run->s.regs.todpr = env->todpr; 427 cs->kvm_run->s.regs.gbea = env->gbea; 428 cs->kvm_run->s.regs.pp = env->pp; 429 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 430 } else { 431 /* 432 * These ONE_REGS are not protected by a capability. As they are only 433 * necessary for migration we just trace a possible error, but don't 434 * return with an error return code. 435 */ 436 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 437 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 438 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 439 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 440 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 441 } 442 443 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 444 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 445 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 446 } 447 448 /* pfault parameters */ 449 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 450 cs->kvm_run->s.regs.pft = env->pfault_token; 451 cs->kvm_run->s.regs.pfs = env->pfault_select; 452 cs->kvm_run->s.regs.pfc = env->pfault_compare; 453 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 454 } else if (cap_async_pf) { 455 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 456 if (r < 0) { 457 return r; 458 } 459 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 460 if (r < 0) { 461 return r; 462 } 463 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 464 if (r < 0) { 465 return r; 466 } 467 } 468 469 /* access registers and control registers*/ 470 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 471 for (i = 0; i < 16; i++) { 472 cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; 473 cs->kvm_run->s.regs.crs[i] = env->cregs[i]; 474 } 475 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; 476 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; 477 } else { 478 for (i = 0; i < 16; i++) { 479 sregs.acrs[i] = env->aregs[i]; 480 sregs.crs[i] = env->cregs[i]; 481 } 482 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 483 if (r < 0) { 484 return r; 485 } 486 } 487 488 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 489 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32); 490 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB; 491 } 492 493 /* Finally the prefix */ 494 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 495 cs->kvm_run->s.regs.prefix = env->psa; 496 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; 497 } else { 498 /* prefix is only supported via sync regs */ 499 } 500 return 0; 501 } 502 503 int kvm_arch_get_registers(CPUState *cs) 504 { 505 S390CPU *cpu = S390_CPU(cs); 506 CPUS390XState *env = &cpu->env; 507 struct kvm_sregs sregs; 508 struct kvm_regs regs; 509 struct kvm_fpu fpu; 510 int i, r; 511 512 /* get the PSW */ 513 env->psw.addr = cs->kvm_run->psw_addr; 514 env->psw.mask = cs->kvm_run->psw_mask; 515 516 /* the GPRS */ 517 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 518 for (i = 0; i < 16; i++) { 519 env->regs[i] = cs->kvm_run->s.regs.gprs[i]; 520 } 521 } else { 522 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 523 if (r < 0) { 524 return r; 525 } 526 for (i = 0; i < 16; i++) { 527 env->regs[i] = regs.gprs[i]; 528 } 529 } 530 531 /* The ACRS and CRS */ 532 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 533 for (i = 0; i < 16; i++) { 534 env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; 535 env->cregs[i] = cs->kvm_run->s.regs.crs[i]; 536 } 537 } else { 538 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 539 if (r < 0) { 540 return r; 541 } 542 for (i = 0; i < 16; i++) { 543 env->aregs[i] = sregs.acrs[i]; 544 env->cregs[i] = sregs.crs[i]; 545 } 546 } 547 548 /* Floating point and vector registers */ 549 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 550 for (i = 0; i < 32; i++) { 551 env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0]; 552 env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1]; 553 } 554 env->fpc = cs->kvm_run->s.regs.fpc; 555 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 556 for (i = 0; i < 16; i++) { 557 get_freg(env, i)->ll = cs->kvm_run->s.regs.fprs[i]; 558 } 559 env->fpc = cs->kvm_run->s.regs.fpc; 560 } else { 561 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 562 if (r < 0) { 563 return r; 564 } 565 for (i = 0; i < 16; i++) { 566 get_freg(env, i)->ll = fpu.fprs[i]; 567 } 568 env->fpc = fpu.fpc; 569 } 570 571 /* The prefix */ 572 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 573 env->psa = cs->kvm_run->s.regs.prefix; 574 } 575 576 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 577 env->cputm = cs->kvm_run->s.regs.cputm; 578 env->ckc = cs->kvm_run->s.regs.ckc; 579 env->todpr = cs->kvm_run->s.regs.todpr; 580 env->gbea = cs->kvm_run->s.regs.gbea; 581 env->pp = cs->kvm_run->s.regs.pp; 582 } else { 583 /* 584 * These ONE_REGS are not protected by a capability. As they are only 585 * necessary for migration we just trace a possible error, but don't 586 * return with an error return code. 587 */ 588 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 589 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 590 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 591 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 592 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 593 } 594 595 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 596 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 597 } 598 599 if (can_sync_regs(cs, KVM_SYNC_GSCB)) { 600 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32); 601 } 602 603 /* pfault parameters */ 604 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 605 env->pfault_token = cs->kvm_run->s.regs.pft; 606 env->pfault_select = cs->kvm_run->s.regs.pfs; 607 env->pfault_compare = cs->kvm_run->s.regs.pfc; 608 } else if (cap_async_pf) { 609 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 610 if (r < 0) { 611 return r; 612 } 613 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 614 if (r < 0) { 615 return r; 616 } 617 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 618 if (r < 0) { 619 return r; 620 } 621 } 622 623 return 0; 624 } 625 626 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 627 { 628 int r; 629 struct kvm_device_attr attr = { 630 .group = KVM_S390_VM_TOD, 631 .attr = KVM_S390_VM_TOD_LOW, 632 .addr = (uint64_t)tod_low, 633 }; 634 635 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 636 if (r) { 637 return r; 638 } 639 640 attr.attr = KVM_S390_VM_TOD_HIGH; 641 attr.addr = (uint64_t)tod_high; 642 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 643 } 644 645 int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 646 { 647 int r; 648 struct kvm_s390_vm_tod_clock gtod; 649 struct kvm_device_attr attr = { 650 .group = KVM_S390_VM_TOD, 651 .attr = KVM_S390_VM_TOD_EXT, 652 .addr = (uint64_t)>od, 653 }; 654 655 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 656 *tod_high = gtod.epoch_idx; 657 *tod_low = gtod.tod; 658 659 return r; 660 } 661 662 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) 663 { 664 int r; 665 struct kvm_device_attr attr = { 666 .group = KVM_S390_VM_TOD, 667 .attr = KVM_S390_VM_TOD_LOW, 668 .addr = (uint64_t)tod_low, 669 }; 670 671 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 672 if (r) { 673 return r; 674 } 675 676 attr.attr = KVM_S390_VM_TOD_HIGH; 677 attr.addr = (uint64_t)tod_high; 678 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 679 } 680 681 int kvm_s390_set_clock_ext(uint8_t *tod_high, uint64_t *tod_low) 682 { 683 struct kvm_s390_vm_tod_clock gtod = { 684 .epoch_idx = *tod_high, 685 .tod = *tod_low, 686 }; 687 struct kvm_device_attr attr = { 688 .group = KVM_S390_VM_TOD, 689 .attr = KVM_S390_VM_TOD_EXT, 690 .addr = (uint64_t)>od, 691 }; 692 693 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 694 } 695 696 /** 697 * kvm_s390_mem_op: 698 * @addr: the logical start address in guest memory 699 * @ar: the access register number 700 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 701 * @len: length that should be transferred 702 * @is_write: true = write, false = read 703 * Returns: 0 on success, non-zero if an exception or error occurred 704 * 705 * Use KVM ioctl to read/write from/to guest memory. An access exception 706 * is injected into the vCPU in case of translation errors. 707 */ 708 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 709 int len, bool is_write) 710 { 711 struct kvm_s390_mem_op mem_op = { 712 .gaddr = addr, 713 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 714 .size = len, 715 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 716 : KVM_S390_MEMOP_LOGICAL_READ, 717 .buf = (uint64_t)hostbuf, 718 .ar = ar, 719 }; 720 int ret; 721 722 if (!cap_mem_op) { 723 return -ENOSYS; 724 } 725 if (!hostbuf) { 726 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 727 } 728 729 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 730 if (ret < 0) { 731 error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret)); 732 } 733 return ret; 734 } 735 736 /* 737 * Legacy layout for s390: 738 * Older S390 KVM requires the topmost vma of the RAM to be 739 * smaller than an system defined value, which is at least 256GB. 740 * Larger systems have larger values. We put the guest between 741 * the end of data segment (system break) and this value. We 742 * use 32GB as a base to have enough room for the system break 743 * to grow. We also have to use MAP parameters that avoid 744 * read-only mapping of guest pages. 745 */ 746 static void *legacy_s390_alloc(size_t size, uint64_t *align) 747 { 748 void *mem; 749 750 mem = mmap((void *) 0x800000000ULL, size, 751 PROT_EXEC|PROT_READ|PROT_WRITE, 752 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 753 return mem == MAP_FAILED ? NULL : mem; 754 } 755 756 static uint8_t const *sw_bp_inst; 757 static uint8_t sw_bp_ilen; 758 759 static void determine_sw_breakpoint_instr(void) 760 { 761 /* DIAG 501 is used for sw breakpoints with old kernels */ 762 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 763 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 764 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 765 766 if (sw_bp_inst) { 767 return; 768 } 769 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 770 sw_bp_inst = diag_501; 771 sw_bp_ilen = sizeof(diag_501); 772 DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); 773 } else { 774 sw_bp_inst = instr_0x0000; 775 sw_bp_ilen = sizeof(instr_0x0000); 776 DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); 777 } 778 } 779 780 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 781 { 782 determine_sw_breakpoint_instr(); 783 784 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 785 sw_bp_ilen, 0) || 786 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 787 return -EINVAL; 788 } 789 return 0; 790 } 791 792 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 793 { 794 uint8_t t[MAX_ILEN]; 795 796 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 797 return -EINVAL; 798 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 799 return -EINVAL; 800 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 801 sw_bp_ilen, 1)) { 802 return -EINVAL; 803 } 804 805 return 0; 806 } 807 808 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 809 int len, int type) 810 { 811 int n; 812 813 for (n = 0; n < nb_hw_breakpoints; n++) { 814 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 815 (hw_breakpoints[n].len == len || len == -1)) { 816 return &hw_breakpoints[n]; 817 } 818 } 819 820 return NULL; 821 } 822 823 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 824 { 825 int size; 826 827 if (find_hw_breakpoint(addr, len, type)) { 828 return -EEXIST; 829 } 830 831 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 832 833 if (!hw_breakpoints) { 834 nb_hw_breakpoints = 0; 835 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 836 } else { 837 hw_breakpoints = 838 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 839 } 840 841 if (!hw_breakpoints) { 842 nb_hw_breakpoints = 0; 843 return -ENOMEM; 844 } 845 846 hw_breakpoints[nb_hw_breakpoints].addr = addr; 847 hw_breakpoints[nb_hw_breakpoints].len = len; 848 hw_breakpoints[nb_hw_breakpoints].type = type; 849 850 nb_hw_breakpoints++; 851 852 return 0; 853 } 854 855 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 856 target_ulong len, int type) 857 { 858 switch (type) { 859 case GDB_BREAKPOINT_HW: 860 type = KVM_HW_BP; 861 break; 862 case GDB_WATCHPOINT_WRITE: 863 if (len < 1) { 864 return -EINVAL; 865 } 866 type = KVM_HW_WP_WRITE; 867 break; 868 default: 869 return -ENOSYS; 870 } 871 return insert_hw_breakpoint(addr, len, type); 872 } 873 874 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 875 target_ulong len, int type) 876 { 877 int size; 878 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 879 880 if (bp == NULL) { 881 return -ENOENT; 882 } 883 884 nb_hw_breakpoints--; 885 if (nb_hw_breakpoints > 0) { 886 /* 887 * In order to trim the array, move the last element to the position to 888 * be removed - if necessary. 889 */ 890 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 891 *bp = hw_breakpoints[nb_hw_breakpoints]; 892 } 893 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 894 hw_breakpoints = 895 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); 896 } else { 897 g_free(hw_breakpoints); 898 hw_breakpoints = NULL; 899 } 900 901 return 0; 902 } 903 904 void kvm_arch_remove_all_hw_breakpoints(void) 905 { 906 nb_hw_breakpoints = 0; 907 g_free(hw_breakpoints); 908 hw_breakpoints = NULL; 909 } 910 911 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 912 { 913 int i; 914 915 if (nb_hw_breakpoints > 0) { 916 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 917 dbg->arch.hw_bp = hw_breakpoints; 918 919 for (i = 0; i < nb_hw_breakpoints; ++i) { 920 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 921 hw_breakpoints[i].addr); 922 } 923 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 924 } else { 925 dbg->arch.nr_hw_bp = 0; 926 dbg->arch.hw_bp = NULL; 927 } 928 } 929 930 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 931 { 932 } 933 934 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 935 { 936 return MEMTXATTRS_UNSPECIFIED; 937 } 938 939 int kvm_arch_process_async_events(CPUState *cs) 940 { 941 return cs->halted; 942 } 943 944 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 945 struct kvm_s390_interrupt *interrupt) 946 { 947 int r = 0; 948 949 interrupt->type = irq->type; 950 switch (irq->type) { 951 case KVM_S390_INT_VIRTIO: 952 interrupt->parm = irq->u.ext.ext_params; 953 /* fall through */ 954 case KVM_S390_INT_PFAULT_INIT: 955 case KVM_S390_INT_PFAULT_DONE: 956 interrupt->parm64 = irq->u.ext.ext_params2; 957 break; 958 case KVM_S390_PROGRAM_INT: 959 interrupt->parm = irq->u.pgm.code; 960 break; 961 case KVM_S390_SIGP_SET_PREFIX: 962 interrupt->parm = irq->u.prefix.address; 963 break; 964 case KVM_S390_INT_SERVICE: 965 interrupt->parm = irq->u.ext.ext_params; 966 break; 967 case KVM_S390_MCHK: 968 interrupt->parm = irq->u.mchk.cr14; 969 interrupt->parm64 = irq->u.mchk.mcic; 970 break; 971 case KVM_S390_INT_EXTERNAL_CALL: 972 interrupt->parm = irq->u.extcall.code; 973 break; 974 case KVM_S390_INT_EMERGENCY: 975 interrupt->parm = irq->u.emerg.code; 976 break; 977 case KVM_S390_SIGP_STOP: 978 case KVM_S390_RESTART: 979 break; /* These types have no parameters */ 980 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 981 interrupt->parm = irq->u.io.subchannel_id << 16; 982 interrupt->parm |= irq->u.io.subchannel_nr; 983 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 984 interrupt->parm64 |= irq->u.io.io_int_word; 985 break; 986 default: 987 r = -EINVAL; 988 break; 989 } 990 return r; 991 } 992 993 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 994 { 995 struct kvm_s390_interrupt kvmint = {}; 996 int r; 997 998 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 999 if (r < 0) { 1000 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1001 exit(1); 1002 } 1003 1004 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 1005 if (r < 0) { 1006 fprintf(stderr, "KVM failed to inject interrupt\n"); 1007 exit(1); 1008 } 1009 } 1010 1011 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 1012 { 1013 CPUState *cs = CPU(cpu); 1014 int r; 1015 1016 if (cap_s390_irq) { 1017 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 1018 if (!r) { 1019 return; 1020 } 1021 error_report("KVM failed to inject interrupt %llx", irq->type); 1022 exit(1); 1023 } 1024 1025 inject_vcpu_irq_legacy(cs, irq); 1026 } 1027 1028 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) 1029 { 1030 struct kvm_s390_interrupt kvmint = {}; 1031 int r; 1032 1033 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 1034 if (r < 0) { 1035 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 1036 exit(1); 1037 } 1038 1039 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 1040 if (r < 0) { 1041 fprintf(stderr, "KVM failed to inject interrupt\n"); 1042 exit(1); 1043 } 1044 } 1045 1046 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) 1047 { 1048 static bool use_flic = true; 1049 int r; 1050 1051 if (use_flic) { 1052 r = kvm_s390_inject_flic(irq); 1053 if (r == -ENOSYS) { 1054 use_flic = false; 1055 } 1056 if (!r) { 1057 return; 1058 } 1059 } 1060 __kvm_s390_floating_interrupt(irq); 1061 } 1062 1063 void kvm_s390_service_interrupt(uint32_t parm) 1064 { 1065 struct kvm_s390_irq irq = { 1066 .type = KVM_S390_INT_SERVICE, 1067 .u.ext.ext_params = parm, 1068 }; 1069 1070 kvm_s390_floating_interrupt(&irq); 1071 } 1072 1073 void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code) 1074 { 1075 struct kvm_s390_irq irq = { 1076 .type = KVM_S390_PROGRAM_INT, 1077 .u.pgm.code = code, 1078 }; 1079 1080 kvm_s390_vcpu_interrupt(cpu, &irq); 1081 } 1082 1083 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1084 { 1085 struct kvm_s390_irq irq = { 1086 .type = KVM_S390_PROGRAM_INT, 1087 .u.pgm.code = code, 1088 .u.pgm.trans_exc_code = te_code, 1089 .u.pgm.exc_access_id = te_code & 3, 1090 }; 1091 1092 kvm_s390_vcpu_interrupt(cpu, &irq); 1093 } 1094 1095 static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1096 uint16_t ipbh0) 1097 { 1098 CPUS390XState *env = &cpu->env; 1099 uint64_t sccb; 1100 uint32_t code; 1101 int r = 0; 1102 1103 cpu_synchronize_state(CPU(cpu)); 1104 sccb = env->regs[ipbh0 & 0xf]; 1105 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1106 1107 r = sclp_service_call(env, sccb, code); 1108 if (r < 0) { 1109 kvm_s390_program_interrupt(cpu, -r); 1110 } else { 1111 setcc(cpu, r); 1112 } 1113 1114 return 0; 1115 } 1116 1117 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1118 { 1119 CPUS390XState *env = &cpu->env; 1120 int rc = 0; 1121 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1122 1123 cpu_synchronize_state(CPU(cpu)); 1124 1125 switch (ipa1) { 1126 case PRIV_B2_XSCH: 1127 ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED); 1128 break; 1129 case PRIV_B2_CSCH: 1130 ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED); 1131 break; 1132 case PRIV_B2_HSCH: 1133 ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED); 1134 break; 1135 case PRIV_B2_MSCH: 1136 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1137 break; 1138 case PRIV_B2_SSCH: 1139 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1140 break; 1141 case PRIV_B2_STCRW: 1142 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED); 1143 break; 1144 case PRIV_B2_STSCH: 1145 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED); 1146 break; 1147 case PRIV_B2_TSCH: 1148 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1149 fprintf(stderr, "Spurious tsch intercept\n"); 1150 break; 1151 case PRIV_B2_CHSC: 1152 ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED); 1153 break; 1154 case PRIV_B2_TPI: 1155 /* This should have been handled by kvm already. */ 1156 fprintf(stderr, "Spurious tpi intercept\n"); 1157 break; 1158 case PRIV_B2_SCHM: 1159 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1160 run->s390_sieic.ipb, RA_IGNORED); 1161 break; 1162 case PRIV_B2_RSCH: 1163 ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED); 1164 break; 1165 case PRIV_B2_RCHP: 1166 ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED); 1167 break; 1168 case PRIV_B2_STCPS: 1169 /* We do not provide this instruction, it is suppressed. */ 1170 break; 1171 case PRIV_B2_SAL: 1172 ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED); 1173 break; 1174 case PRIV_B2_SIGA: 1175 /* Not provided, set CC = 3 for subchannel not operational */ 1176 setcc(cpu, 3); 1177 break; 1178 case PRIV_B2_SCLP_CALL: 1179 rc = kvm_sclp_service_call(cpu, run, ipbh0); 1180 break; 1181 default: 1182 rc = -1; 1183 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); 1184 break; 1185 } 1186 1187 return rc; 1188 } 1189 1190 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1191 uint8_t *ar) 1192 { 1193 CPUS390XState *env = &cpu->env; 1194 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1195 uint32_t base2 = run->s390_sieic.ipb >> 28; 1196 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1197 ((run->s390_sieic.ipb & 0xff00) << 4); 1198 1199 if (disp2 & 0x80000) { 1200 disp2 += 0xfff00000; 1201 } 1202 if (ar) { 1203 *ar = base2; 1204 } 1205 1206 return (base2 ? env->regs[base2] : 0) + 1207 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1208 } 1209 1210 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1211 uint8_t *ar) 1212 { 1213 CPUS390XState *env = &cpu->env; 1214 uint32_t base2 = run->s390_sieic.ipb >> 28; 1215 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1216 ((run->s390_sieic.ipb & 0xff00) << 4); 1217 1218 if (disp2 & 0x80000) { 1219 disp2 += 0xfff00000; 1220 } 1221 if (ar) { 1222 *ar = base2; 1223 } 1224 1225 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1226 } 1227 1228 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1229 { 1230 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1231 1232 if (s390_has_feat(S390_FEAT_ZPCI)) { 1233 return clp_service_call(cpu, r2, RA_IGNORED); 1234 } else { 1235 return -1; 1236 } 1237 } 1238 1239 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1240 { 1241 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1242 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1243 1244 if (s390_has_feat(S390_FEAT_ZPCI)) { 1245 return pcilg_service_call(cpu, r1, r2, RA_IGNORED); 1246 } else { 1247 return -1; 1248 } 1249 } 1250 1251 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1252 { 1253 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1254 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1255 1256 if (s390_has_feat(S390_FEAT_ZPCI)) { 1257 return pcistg_service_call(cpu, r1, r2, RA_IGNORED); 1258 } else { 1259 return -1; 1260 } 1261 } 1262 1263 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1264 { 1265 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1266 uint64_t fiba; 1267 uint8_t ar; 1268 1269 if (s390_has_feat(S390_FEAT_ZPCI)) { 1270 cpu_synchronize_state(CPU(cpu)); 1271 fiba = get_base_disp_rxy(cpu, run, &ar); 1272 1273 return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1274 } else { 1275 return -1; 1276 } 1277 } 1278 1279 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1280 { 1281 CPUS390XState *env = &cpu->env; 1282 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1283 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1284 uint8_t isc; 1285 uint16_t mode; 1286 int r; 1287 1288 cpu_synchronize_state(CPU(cpu)); 1289 mode = env->regs[r1] & 0xffff; 1290 isc = (env->regs[r3] >> 27) & 0x7; 1291 r = css_do_sic(env, isc, mode); 1292 if (r) { 1293 kvm_s390_program_interrupt(cpu, -r); 1294 } 1295 1296 return 0; 1297 } 1298 1299 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1300 { 1301 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1302 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1303 1304 if (s390_has_feat(S390_FEAT_ZPCI)) { 1305 return rpcit_service_call(cpu, r1, r2, RA_IGNORED); 1306 } else { 1307 return -1; 1308 } 1309 } 1310 1311 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1312 { 1313 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1314 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1315 uint64_t gaddr; 1316 uint8_t ar; 1317 1318 if (s390_has_feat(S390_FEAT_ZPCI)) { 1319 cpu_synchronize_state(CPU(cpu)); 1320 gaddr = get_base_disp_rsy(cpu, run, &ar); 1321 1322 return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED); 1323 } else { 1324 return -1; 1325 } 1326 } 1327 1328 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1329 { 1330 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1331 uint64_t fiba; 1332 uint8_t ar; 1333 1334 if (s390_has_feat(S390_FEAT_ZPCI)) { 1335 cpu_synchronize_state(CPU(cpu)); 1336 fiba = get_base_disp_rxy(cpu, run, &ar); 1337 1338 return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED); 1339 } else { 1340 return -1; 1341 } 1342 } 1343 1344 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1345 { 1346 int r = 0; 1347 1348 switch (ipa1) { 1349 case PRIV_B9_CLP: 1350 r = kvm_clp_service_call(cpu, run); 1351 break; 1352 case PRIV_B9_PCISTG: 1353 r = kvm_pcistg_service_call(cpu, run); 1354 break; 1355 case PRIV_B9_PCILG: 1356 r = kvm_pcilg_service_call(cpu, run); 1357 break; 1358 case PRIV_B9_RPCIT: 1359 r = kvm_rpcit_service_call(cpu, run); 1360 break; 1361 case PRIV_B9_EQBS: 1362 /* just inject exception */ 1363 r = -1; 1364 break; 1365 default: 1366 r = -1; 1367 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); 1368 break; 1369 } 1370 1371 return r; 1372 } 1373 1374 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1375 { 1376 int r = 0; 1377 1378 switch (ipbl) { 1379 case PRIV_EB_PCISTB: 1380 r = kvm_pcistb_service_call(cpu, run); 1381 break; 1382 case PRIV_EB_SIC: 1383 r = kvm_sic_service_call(cpu, run); 1384 break; 1385 case PRIV_EB_SQBS: 1386 /* just inject exception */ 1387 r = -1; 1388 break; 1389 default: 1390 r = -1; 1391 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); 1392 break; 1393 } 1394 1395 return r; 1396 } 1397 1398 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1399 { 1400 int r = 0; 1401 1402 switch (ipbl) { 1403 case PRIV_E3_MPCIFC: 1404 r = kvm_mpcifc_service_call(cpu, run); 1405 break; 1406 case PRIV_E3_STPCIFC: 1407 r = kvm_stpcifc_service_call(cpu, run); 1408 break; 1409 default: 1410 r = -1; 1411 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); 1412 break; 1413 } 1414 1415 return r; 1416 } 1417 1418 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) 1419 { 1420 CPUS390XState *env = &cpu->env; 1421 int ret; 1422 1423 cpu_synchronize_state(CPU(cpu)); 1424 ret = s390_virtio_hypercall(env); 1425 if (ret == -EINVAL) { 1426 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1427 return 0; 1428 } 1429 1430 return ret; 1431 } 1432 1433 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1434 { 1435 uint64_t r1, r3; 1436 int rc; 1437 1438 cpu_synchronize_state(CPU(cpu)); 1439 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1440 r3 = run->s390_sieic.ipa & 0x000f; 1441 rc = handle_diag_288(&cpu->env, r1, r3); 1442 if (rc) { 1443 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1444 } 1445 } 1446 1447 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1448 { 1449 uint64_t r1, r3; 1450 1451 cpu_synchronize_state(CPU(cpu)); 1452 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1453 r3 = run->s390_sieic.ipa & 0x000f; 1454 handle_diag_308(&cpu->env, r1, r3, RA_IGNORED); 1455 } 1456 1457 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1458 { 1459 CPUS390XState *env = &cpu->env; 1460 unsigned long pc; 1461 1462 cpu_synchronize_state(CPU(cpu)); 1463 1464 pc = env->psw.addr - sw_bp_ilen; 1465 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1466 env->psw.addr = pc; 1467 return EXCP_DEBUG; 1468 } 1469 1470 return -ENOENT; 1471 } 1472 1473 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1474 1475 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1476 { 1477 int r = 0; 1478 uint16_t func_code; 1479 1480 /* 1481 * For any diagnose call we support, bits 48-63 of the resulting 1482 * address specify the function code; the remainder is ignored. 1483 */ 1484 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1485 switch (func_code) { 1486 case DIAG_TIMEREVENT: 1487 kvm_handle_diag_288(cpu, run); 1488 break; 1489 case DIAG_IPL: 1490 kvm_handle_diag_308(cpu, run); 1491 break; 1492 case DIAG_KVM_HYPERCALL: 1493 r = handle_hypercall(cpu, run); 1494 break; 1495 case DIAG_KVM_BREAKPOINT: 1496 r = handle_sw_breakpoint(cpu, run); 1497 break; 1498 default: 1499 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); 1500 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION); 1501 break; 1502 } 1503 1504 return r; 1505 } 1506 1507 static int kvm_s390_handle_sigp(S390CPU *cpu, uint8_t ipa1, uint32_t ipb) 1508 { 1509 CPUS390XState *env = &cpu->env; 1510 const uint8_t r1 = ipa1 >> 4; 1511 const uint8_t r3 = ipa1 & 0x0f; 1512 int ret; 1513 uint8_t order; 1514 1515 cpu_synchronize_state(CPU(cpu)); 1516 1517 /* get order code */ 1518 order = decode_basedisp_rs(env, ipb, NULL) & SIGP_ORDER_MASK; 1519 1520 ret = handle_sigp(env, order, r1, r3); 1521 setcc(cpu, ret); 1522 return 0; 1523 } 1524 1525 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1526 { 1527 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1528 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1529 int r = -1; 1530 1531 DPRINTF("handle_instruction 0x%x 0x%x\n", 1532 run->s390_sieic.ipa, run->s390_sieic.ipb); 1533 switch (ipa0) { 1534 case IPA0_B2: 1535 r = handle_b2(cpu, run, ipa1); 1536 break; 1537 case IPA0_B9: 1538 r = handle_b9(cpu, run, ipa1); 1539 break; 1540 case IPA0_EB: 1541 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1542 break; 1543 case IPA0_E3: 1544 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1545 break; 1546 case IPA0_DIAG: 1547 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1548 break; 1549 case IPA0_SIGP: 1550 r = kvm_s390_handle_sigp(cpu, ipa1, run->s390_sieic.ipb); 1551 break; 1552 } 1553 1554 if (r < 0) { 1555 r = 0; 1556 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1557 } 1558 1559 return r; 1560 } 1561 1562 static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset) 1563 { 1564 CPUState *cs = CPU(cpu); 1565 1566 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx", 1567 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset), 1568 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8)); 1569 s390_cpu_halt(cpu); 1570 qemu_system_guest_panicked(NULL); 1571 } 1572 1573 /* try to detect pgm check loops */ 1574 static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run) 1575 { 1576 CPUState *cs = CPU(cpu); 1577 PSW oldpsw, newpsw; 1578 1579 cpu_synchronize_state(cs); 1580 newpsw.mask = ldq_phys(cs->as, cpu->env.psa + 1581 offsetof(LowCore, program_new_psw)); 1582 newpsw.addr = ldq_phys(cs->as, cpu->env.psa + 1583 offsetof(LowCore, program_new_psw) + 8); 1584 oldpsw.mask = run->psw_mask; 1585 oldpsw.addr = run->psw_addr; 1586 /* 1587 * Avoid endless loops of operation exceptions, if the pgm new 1588 * PSW will cause a new operation exception. 1589 * The heuristic checks if the pgm new psw is within 6 bytes before 1590 * the faulting psw address (with same DAT, AS settings) and the 1591 * new psw is not a wait psw and the fault was not triggered by 1592 * problem state. In that case go into crashed state. 1593 */ 1594 1595 if (oldpsw.addr - newpsw.addr <= 6 && 1596 !(newpsw.mask & PSW_MASK_WAIT) && 1597 !(oldpsw.mask & PSW_MASK_PSTATE) && 1598 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) && 1599 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) { 1600 unmanageable_intercept(cpu, "operation exception loop", 1601 offsetof(LowCore, program_new_psw)); 1602 return EXCP_HALTED; 1603 } 1604 return 0; 1605 } 1606 1607 static int handle_intercept(S390CPU *cpu) 1608 { 1609 CPUState *cs = CPU(cpu); 1610 struct kvm_run *run = cs->kvm_run; 1611 int icpt_code = run->s390_sieic.icptcode; 1612 int r = 0; 1613 1614 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, 1615 (long)cs->kvm_run->psw_addr); 1616 switch (icpt_code) { 1617 case ICPT_INSTRUCTION: 1618 r = handle_instruction(cpu, run); 1619 break; 1620 case ICPT_PROGRAM: 1621 unmanageable_intercept(cpu, "program interrupt", 1622 offsetof(LowCore, program_new_psw)); 1623 r = EXCP_HALTED; 1624 break; 1625 case ICPT_EXT_INT: 1626 unmanageable_intercept(cpu, "external interrupt", 1627 offsetof(LowCore, external_new_psw)); 1628 r = EXCP_HALTED; 1629 break; 1630 case ICPT_WAITPSW: 1631 /* disabled wait, since enabled wait is handled in kernel */ 1632 cpu_synchronize_state(cs); 1633 s390_handle_wait(cpu); 1634 r = EXCP_HALTED; 1635 break; 1636 case ICPT_CPU_STOP: 1637 do_stop_interrupt(&cpu->env); 1638 r = EXCP_HALTED; 1639 break; 1640 case ICPT_OPEREXC: 1641 /* check for break points */ 1642 r = handle_sw_breakpoint(cpu, run); 1643 if (r == -ENOENT) { 1644 /* Then check for potential pgm check loops */ 1645 r = handle_oper_loop(cpu, run); 1646 if (r == 0) { 1647 kvm_s390_program_interrupt(cpu, PGM_OPERATION); 1648 } 1649 } 1650 break; 1651 case ICPT_SOFT_INTERCEPT: 1652 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1653 exit(1); 1654 break; 1655 case ICPT_IO: 1656 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1657 exit(1); 1658 break; 1659 default: 1660 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1661 exit(1); 1662 break; 1663 } 1664 1665 return r; 1666 } 1667 1668 static int handle_tsch(S390CPU *cpu) 1669 { 1670 CPUState *cs = CPU(cpu); 1671 struct kvm_run *run = cs->kvm_run; 1672 int ret; 1673 1674 cpu_synchronize_state(cs); 1675 1676 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb, 1677 RA_IGNORED); 1678 if (ret < 0) { 1679 /* 1680 * Failure. 1681 * If an I/O interrupt had been dequeued, we have to reinject it. 1682 */ 1683 if (run->s390_tsch.dequeued) { 1684 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id, 1685 run->s390_tsch.subchannel_nr, 1686 run->s390_tsch.io_int_parm, 1687 run->s390_tsch.io_int_word); 1688 } 1689 ret = 0; 1690 } 1691 return ret; 1692 } 1693 1694 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1695 { 1696 struct sysib_322 sysib; 1697 int del; 1698 1699 if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1700 return; 1701 } 1702 /* Shift the stack of Extended Names to prepare for our own data */ 1703 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1704 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1705 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1706 * assumed it's not capable of managing Extended Names for lower levels. 1707 */ 1708 for (del = 1; del < sysib.count; del++) { 1709 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1710 break; 1711 } 1712 } 1713 if (del < sysib.count) { 1714 memset(sysib.ext_names[del], 0, 1715 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1716 } 1717 /* Insert short machine name in EBCDIC, padded with blanks */ 1718 if (qemu_name) { 1719 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1720 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1721 strlen(qemu_name))); 1722 } 1723 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1724 memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0])); 1725 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1726 * considered by s390 as not capable of providing any Extended Name. 1727 * Therefore if no name was specified on qemu invocation, we go with the 1728 * same "KVMguest" default, which KVM has filled into short name field. 1729 */ 1730 if (qemu_name) { 1731 strncpy((char *)sysib.ext_names[0], qemu_name, 1732 sizeof(sysib.ext_names[0])); 1733 } else { 1734 strcpy((char *)sysib.ext_names[0], "KVMguest"); 1735 } 1736 /* Insert UUID */ 1737 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 1738 1739 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 1740 } 1741 1742 static int handle_stsi(S390CPU *cpu) 1743 { 1744 CPUState *cs = CPU(cpu); 1745 struct kvm_run *run = cs->kvm_run; 1746 1747 switch (run->s390_stsi.fc) { 1748 case 3: 1749 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 1750 return 0; 1751 } 1752 /* Only sysib 3.2.2 needs post-handling for now. */ 1753 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 1754 return 0; 1755 default: 1756 return 0; 1757 } 1758 } 1759 1760 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 1761 { 1762 CPUState *cs = CPU(cpu); 1763 struct kvm_run *run = cs->kvm_run; 1764 1765 int ret = 0; 1766 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 1767 1768 switch (arch_info->type) { 1769 case KVM_HW_WP_WRITE: 1770 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1771 cs->watchpoint_hit = &hw_watchpoint; 1772 hw_watchpoint.vaddr = arch_info->addr; 1773 hw_watchpoint.flags = BP_MEM_WRITE; 1774 ret = EXCP_DEBUG; 1775 } 1776 break; 1777 case KVM_HW_BP: 1778 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 1779 ret = EXCP_DEBUG; 1780 } 1781 break; 1782 case KVM_SINGLESTEP: 1783 if (cs->singlestep_enabled) { 1784 ret = EXCP_DEBUG; 1785 } 1786 break; 1787 default: 1788 ret = -ENOSYS; 1789 } 1790 1791 return ret; 1792 } 1793 1794 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 1795 { 1796 S390CPU *cpu = S390_CPU(cs); 1797 int ret = 0; 1798 1799 qemu_mutex_lock_iothread(); 1800 1801 switch (run->exit_reason) { 1802 case KVM_EXIT_S390_SIEIC: 1803 ret = handle_intercept(cpu); 1804 break; 1805 case KVM_EXIT_S390_RESET: 1806 s390_reipl_request(); 1807 break; 1808 case KVM_EXIT_S390_TSCH: 1809 ret = handle_tsch(cpu); 1810 break; 1811 case KVM_EXIT_S390_STSI: 1812 ret = handle_stsi(cpu); 1813 break; 1814 case KVM_EXIT_DEBUG: 1815 ret = kvm_arch_handle_debug_exit(cpu); 1816 break; 1817 default: 1818 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 1819 break; 1820 } 1821 qemu_mutex_unlock_iothread(); 1822 1823 if (ret == 0) { 1824 ret = EXCP_INTERRUPT; 1825 } 1826 return ret; 1827 } 1828 1829 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 1830 { 1831 return true; 1832 } 1833 1834 void kvm_s390_io_interrupt(uint16_t subchannel_id, 1835 uint16_t subchannel_nr, uint32_t io_int_parm, 1836 uint32_t io_int_word) 1837 { 1838 struct kvm_s390_irq irq = { 1839 .u.io.subchannel_id = subchannel_id, 1840 .u.io.subchannel_nr = subchannel_nr, 1841 .u.io.io_int_parm = io_int_parm, 1842 .u.io.io_int_word = io_int_word, 1843 }; 1844 1845 if (io_int_word & IO_INT_WORD_AI) { 1846 irq.type = KVM_S390_INT_IO(1, 0, 0, 0); 1847 } else { 1848 irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8, 1849 (subchannel_id & 0x0006), 1850 subchannel_nr); 1851 } 1852 kvm_s390_floating_interrupt(&irq); 1853 } 1854 1855 void kvm_s390_crw_mchk(void) 1856 { 1857 struct kvm_s390_irq irq = { 1858 .type = KVM_S390_MCHK, 1859 .u.mchk.cr14 = CR14_CHANNEL_REPORT_SC, 1860 .u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP, 1861 }; 1862 kvm_s390_floating_interrupt(&irq); 1863 } 1864 1865 void kvm_s390_enable_css_support(S390CPU *cpu) 1866 { 1867 int r; 1868 1869 /* Activate host kernel channel subsystem support. */ 1870 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 1871 assert(r == 0); 1872 } 1873 1874 void kvm_arch_init_irq_routing(KVMState *s) 1875 { 1876 /* 1877 * Note that while irqchip capabilities generally imply that cpustates 1878 * are handled in-kernel, it is not true for s390 (yet); therefore, we 1879 * have to override the common code kvm_halt_in_kernel_allowed setting. 1880 */ 1881 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 1882 kvm_gsi_routing_allowed = true; 1883 kvm_halt_in_kernel_allowed = false; 1884 } 1885 } 1886 1887 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 1888 int vq, bool assign) 1889 { 1890 struct kvm_ioeventfd kick = { 1891 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 1892 KVM_IOEVENTFD_FLAG_DATAMATCH, 1893 .fd = event_notifier_get_fd(notifier), 1894 .datamatch = vq, 1895 .addr = sch, 1896 .len = 8, 1897 }; 1898 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 1899 return -ENOSYS; 1900 } 1901 if (!assign) { 1902 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1903 } 1904 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 1905 } 1906 1907 int kvm_s390_get_memslot_count(void) 1908 { 1909 return kvm_check_extension(kvm_state, KVM_CAP_NR_MEMSLOTS); 1910 } 1911 1912 int kvm_s390_get_ri(void) 1913 { 1914 return cap_ri; 1915 } 1916 1917 int kvm_s390_get_gs(void) 1918 { 1919 return cap_gs; 1920 } 1921 1922 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 1923 { 1924 struct kvm_mp_state mp_state = {}; 1925 int ret; 1926 1927 /* the kvm part might not have been initialized yet */ 1928 if (CPU(cpu)->kvm_state == NULL) { 1929 return 0; 1930 } 1931 1932 switch (cpu_state) { 1933 case CPU_STATE_STOPPED: 1934 mp_state.mp_state = KVM_MP_STATE_STOPPED; 1935 break; 1936 case CPU_STATE_CHECK_STOP: 1937 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 1938 break; 1939 case CPU_STATE_OPERATING: 1940 mp_state.mp_state = KVM_MP_STATE_OPERATING; 1941 break; 1942 case CPU_STATE_LOAD: 1943 mp_state.mp_state = KVM_MP_STATE_LOAD; 1944 break; 1945 default: 1946 error_report("Requested CPU state is not a valid S390 CPU state: %u", 1947 cpu_state); 1948 exit(1); 1949 } 1950 1951 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 1952 if (ret) { 1953 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 1954 strerror(-ret)); 1955 } 1956 1957 return ret; 1958 } 1959 1960 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 1961 { 1962 struct kvm_s390_irq_state irq_state = { 1963 .buf = (uint64_t) cpu->irqstate, 1964 .len = VCPU_IRQ_BUF_SIZE, 1965 }; 1966 CPUState *cs = CPU(cpu); 1967 int32_t bytes; 1968 1969 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 1970 return; 1971 } 1972 1973 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 1974 if (bytes < 0) { 1975 cpu->irqstate_saved_size = 0; 1976 error_report("Migration of interrupt state failed"); 1977 return; 1978 } 1979 1980 cpu->irqstate_saved_size = bytes; 1981 } 1982 1983 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 1984 { 1985 CPUState *cs = CPU(cpu); 1986 struct kvm_s390_irq_state irq_state = { 1987 .buf = (uint64_t) cpu->irqstate, 1988 .len = cpu->irqstate_saved_size, 1989 }; 1990 int r; 1991 1992 if (cpu->irqstate_saved_size == 0) { 1993 return 0; 1994 } 1995 1996 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 1997 return -ENOSYS; 1998 } 1999 2000 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2001 if (r) { 2002 error_report("Setting interrupt state failed %d", r); 2003 } 2004 return r; 2005 } 2006 2007 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2008 uint64_t address, uint32_t data, PCIDevice *dev) 2009 { 2010 S390PCIBusDevice *pbdev; 2011 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2012 2013 if (!dev) { 2014 DPRINTF("add_msi_route no pci device\n"); 2015 return -ENODEV; 2016 } 2017 2018 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id); 2019 if (!pbdev) { 2020 DPRINTF("add_msi_route no zpci device\n"); 2021 return -ENODEV; 2022 } 2023 2024 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2025 route->flags = 0; 2026 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2027 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2028 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2029 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec; 2030 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2031 return 0; 2032 } 2033 2034 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2035 int vector, PCIDevice *dev) 2036 { 2037 return 0; 2038 } 2039 2040 int kvm_arch_release_virq_post(int virq) 2041 { 2042 return 0; 2043 } 2044 2045 int kvm_arch_msi_data_to_gsi(uint32_t data) 2046 { 2047 abort(); 2048 } 2049 2050 static int query_cpu_subfunc(S390FeatBitmap features) 2051 { 2052 struct kvm_s390_vm_cpu_subfunc prop; 2053 struct kvm_device_attr attr = { 2054 .group = KVM_S390_VM_CPU_MODEL, 2055 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2056 .addr = (uint64_t) &prop, 2057 }; 2058 int rc; 2059 2060 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2061 if (rc) { 2062 return rc; 2063 } 2064 2065 /* 2066 * We're going to add all subfunctions now, if the corresponding feature 2067 * is available that unlocks the query functions. 2068 */ 2069 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2070 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2071 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2072 } 2073 if (test_bit(S390_FEAT_MSA, features)) { 2074 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2075 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2076 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2077 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2078 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2079 } 2080 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2081 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2082 } 2083 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2084 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2085 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2086 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2087 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2088 } 2089 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2090 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2091 } 2092 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2093 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2094 } 2095 return 0; 2096 } 2097 2098 static int configure_cpu_subfunc(const S390FeatBitmap features) 2099 { 2100 struct kvm_s390_vm_cpu_subfunc prop = {}; 2101 struct kvm_device_attr attr = { 2102 .group = KVM_S390_VM_CPU_MODEL, 2103 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2104 .addr = (uint64_t) &prop, 2105 }; 2106 2107 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2108 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2109 /* hardware support might be missing, IBC will handle most of this */ 2110 return 0; 2111 } 2112 2113 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2114 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2115 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2116 } 2117 if (test_bit(S390_FEAT_MSA, features)) { 2118 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2119 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2120 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2121 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2122 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2123 } 2124 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2125 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2126 } 2127 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2128 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2129 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2130 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2131 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2132 } 2133 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2134 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2135 } 2136 if (test_bit(S390_FEAT_MSA_EXT_8, features)) { 2137 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma); 2138 } 2139 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2140 } 2141 2142 static int kvm_to_feat[][2] = { 2143 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2144 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2145 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2146 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2147 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2148 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2149 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2150 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2151 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2152 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2153 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2154 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2155 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2156 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS}, 2157 }; 2158 2159 static int query_cpu_feat(S390FeatBitmap features) 2160 { 2161 struct kvm_s390_vm_cpu_feat prop; 2162 struct kvm_device_attr attr = { 2163 .group = KVM_S390_VM_CPU_MODEL, 2164 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2165 .addr = (uint64_t) &prop, 2166 }; 2167 int rc; 2168 int i; 2169 2170 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2171 if (rc) { 2172 return rc; 2173 } 2174 2175 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2176 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) { 2177 set_bit(kvm_to_feat[i][1], features); 2178 } 2179 } 2180 return 0; 2181 } 2182 2183 static int configure_cpu_feat(const S390FeatBitmap features) 2184 { 2185 struct kvm_s390_vm_cpu_feat prop = {}; 2186 struct kvm_device_attr attr = { 2187 .group = KVM_S390_VM_CPU_MODEL, 2188 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2189 .addr = (uint64_t) &prop, 2190 }; 2191 int i; 2192 2193 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2194 if (test_bit(kvm_to_feat[i][1], features)) { 2195 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat); 2196 } 2197 } 2198 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2199 } 2200 2201 bool kvm_s390_cpu_models_supported(void) 2202 { 2203 if (!cpu_model_allowed()) { 2204 /* compatibility machines interfere with the cpu model */ 2205 return false; 2206 } 2207 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2208 KVM_S390_VM_CPU_MACHINE) && 2209 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2210 KVM_S390_VM_CPU_PROCESSOR) && 2211 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2212 KVM_S390_VM_CPU_MACHINE_FEAT) && 2213 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2214 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2215 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2216 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2217 } 2218 2219 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2220 { 2221 struct kvm_s390_vm_cpu_machine prop = {}; 2222 struct kvm_device_attr attr = { 2223 .group = KVM_S390_VM_CPU_MODEL, 2224 .attr = KVM_S390_VM_CPU_MACHINE, 2225 .addr = (uint64_t) &prop, 2226 }; 2227 uint16_t unblocked_ibc = 0, cpu_type = 0; 2228 int rc; 2229 2230 memset(model, 0, sizeof(*model)); 2231 2232 if (!kvm_s390_cpu_models_supported()) { 2233 error_setg(errp, "KVM doesn't support CPU models"); 2234 return; 2235 } 2236 2237 /* query the basic cpu model properties */ 2238 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2239 if (rc) { 2240 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2241 return; 2242 } 2243 2244 cpu_type = cpuid_type(prop.cpuid); 2245 if (has_ibc(prop.ibc)) { 2246 model->lowest_ibc = lowest_ibc(prop.ibc); 2247 unblocked_ibc = unblocked_ibc(prop.ibc); 2248 } 2249 model->cpu_id = cpuid_id(prop.cpuid); 2250 model->cpu_id_format = cpuid_format(prop.cpuid); 2251 model->cpu_ver = 0xff; 2252 2253 /* get supported cpu features indicated via STFL(E) */ 2254 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2255 (uint8_t *) prop.fac_mask); 2256 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2257 if (test_bit(S390_FEAT_STFLE, model->features)) { 2258 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2259 } 2260 /* get supported cpu features indicated e.g. via SCLP */ 2261 rc = query_cpu_feat(model->features); 2262 if (rc) { 2263 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2264 return; 2265 } 2266 /* get supported cpu subfunctions indicated via query / test bit */ 2267 rc = query_cpu_subfunc(model->features); 2268 if (rc) { 2269 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2270 return; 2271 } 2272 2273 /* with cpu model support, CMM is only indicated if really available */ 2274 if (kvm_s390_cmma_available()) { 2275 set_bit(S390_FEAT_CMM, model->features); 2276 } else { 2277 /* no cmm -> no cmm nt */ 2278 clear_bit(S390_FEAT_CMM_NT, model->features); 2279 } 2280 2281 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */ 2282 if (pci_available) { 2283 set_bit(S390_FEAT_ZPCI, model->features); 2284 } 2285 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features); 2286 2287 if (s390_known_cpu_type(cpu_type)) { 2288 /* we want the exact model, even if some features are missing */ 2289 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2290 ibc_ec_ga(unblocked_ibc), NULL); 2291 } else { 2292 /* model unknown, e.g. too new - search using features */ 2293 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2294 ibc_ec_ga(unblocked_ibc), 2295 model->features); 2296 } 2297 if (!model->def) { 2298 error_setg(errp, "KVM: host CPU model could not be identified"); 2299 return; 2300 } 2301 /* strip of features that are not part of the maximum model */ 2302 bitmap_and(model->features, model->features, model->def->full_feat, 2303 S390_FEAT_MAX); 2304 } 2305 2306 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2307 { 2308 struct kvm_s390_vm_cpu_processor prop = { 2309 .fac_list = { 0 }, 2310 }; 2311 struct kvm_device_attr attr = { 2312 .group = KVM_S390_VM_CPU_MODEL, 2313 .attr = KVM_S390_VM_CPU_PROCESSOR, 2314 .addr = (uint64_t) &prop, 2315 }; 2316 int rc; 2317 2318 if (!model) { 2319 /* compatibility handling if cpu models are disabled */ 2320 if (kvm_s390_cmma_available()) { 2321 kvm_s390_enable_cmma(); 2322 } 2323 return; 2324 } 2325 if (!kvm_s390_cpu_models_supported()) { 2326 error_setg(errp, "KVM doesn't support CPU models"); 2327 return; 2328 } 2329 prop.cpuid = s390_cpuid_from_cpu_model(model); 2330 prop.ibc = s390_ibc_from_cpu_model(model); 2331 /* configure cpu features indicated via STFL(e) */ 2332 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2333 (uint8_t *) prop.fac_list); 2334 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2335 if (rc) { 2336 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2337 return; 2338 } 2339 /* configure cpu features indicated e.g. via SCLP */ 2340 rc = configure_cpu_feat(model->features); 2341 if (rc) { 2342 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2343 return; 2344 } 2345 /* configure cpu subfunctions indicated via query / test bit */ 2346 rc = configure_cpu_subfunc(model->features); 2347 if (rc) { 2348 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2349 return; 2350 } 2351 /* enable CMM via CMMA */ 2352 if (test_bit(S390_FEAT_CMM, model->features)) { 2353 kvm_s390_enable_cmma(); 2354 } 2355 } 2356 2357 void kvm_s390_restart_interrupt(S390CPU *cpu) 2358 { 2359 struct kvm_s390_irq irq = { 2360 .type = KVM_S390_RESTART, 2361 }; 2362 2363 kvm_s390_vcpu_interrupt(cpu, &irq); 2364 } 2365 2366 void kvm_s390_stop_interrupt(S390CPU *cpu) 2367 { 2368 struct kvm_s390_irq irq = { 2369 .type = KVM_S390_SIGP_STOP, 2370 }; 2371 2372 kvm_s390_vcpu_interrupt(cpu, &irq); 2373 } 2374